content
stringlengths
7
928k
avg_line_length
float64
3.5
33.8k
max_line_length
int64
6
139k
alphanum_fraction
float64
0.08
0.96
licenses
sequence
repository_name
stringlengths
7
104
path
stringlengths
4
230
size
int64
7
928k
lang
stringclasses
1 value
from unittest import TestCase from block import source, conjunction, negation, operator_of, nor from components import rs_flip_flop from simulation import Simulation def sr_simulation(initial_s, initial_q): nor1, nor2, source_r, source_s, _, _ = rs_flip_flop(initial_q, initial_s) simulation = Simulation([source_s, source_r, nor1, nor2]) return source_s, source_r, nor2.outputs[0], nor1.outputs[0], simulation class SimulationTest(TestCase): def test_step_pushes(self): block = source() simulation = Simulation([block]) simulation.run() self.assertEqual(1, block.outputs[0].value) def test_step_three_blocks(self): block1 = source() block2 = source() block3 = operator_of(conjunction, block1, block2) simulation = Simulation([block3, block2, block1]) simulation.run() self.assertEqual(1, block3.outputs[0].value) def test_step_four_blocks(self): source1 = source() source2 = source() conj = operator_of(conjunction, source1, source2) neg = operator_of(negation, conj) simulation = Simulation([neg, conj, source2, source1]) simulation.run() self.assertEqual(0, neg.outputs[0].value) def test_disconnected(self): block = nor() simulation = Simulation([block]) simulation.run() self.assertEqual(None, block.outputs[0].value) def test_flip_flop_initial(self): self.assert_sr_flip_flop(1, 0, 1) # set self.assert_sr_flip_flop(0, 1, 0) # reset def test_flip_flop_multiple_steps(self): source_s, _, q, not_q, simulation = sr_simulation(1, 0) simulation.run() source_s.switch(1) simulation.run() self.assertEqual(1, q.value) self.assertEqual(0, not_q.value) def assert_sr_flip_flop(self, signal_s, signal_r, expected_q): _, _, q, not_q, simulation = sr_simulation(signal_s, signal_r) simulation.run() self.assertEqual(expected_q, q.value) self.assertEqual(not expected_q, not_q.value)
30.911765
77
0.662226
[ "MIT" ]
mjoniak/adder
test_simulation.py
2,102
Python
import dataclasses import re import textwrap from typing import Optional, Iterable, List, Match, Pattern, Tuple, Type, TypeVar, Union def add_line_prefix(s: str, prefix: str, /, empty_lines=False) -> str: if empty_lines: predicate = lambda line: True else: predicate = None return textwrap.indent(s, prefix, predicate=predicate) def add_indent(s: str, levels=1) -> str: level_prefix = 4 * " " return add_line_prefix(s, levels * level_prefix) def remove_indent(s: str) -> str: return textwrap.dedent(s) def split_trim(s: str, delim: Optional[str]) -> List[str]: return [s.strip() for s in s.split(delim)] def join_nonempty_lines(lines: Iterable[Optional[str]]) -> str: return "\n".join(filter(None, (line.strip() for line in lines if line))) def read_until_closing(s: str, open: str, close: str) -> Tuple[str, str, str]: open_pattern = re.compile(open) pattern = re.compile(f"(?:{open_pattern.pattern})|(?:{close})") start_pos = end_pos = 0 depth = 1 while depth: match = pattern.search(s, end_pos) if not match: raise ValueError(f"missing closing bracket (expected {depth} closing)") start_pos, end_pos = match.start(), match.end() if open_pattern.match(match[0]): depth += 1 else: depth -= 1 return s[:start_pos], s[start_pos:end_pos], s[end_pos:] def read_until_closing_bracket( s: str, *, skip_non_content_after=True ) -> Tuple[str, str]: (a, _, b) = read_until_closing(s, r"\{", r"\}") if skip_non_content_after: b = skip_non_content(b) return a, b def build_wasm_bindgen_attr( *args: Union[str, None], **kwargs: Union[str, Iterable[str], None] ) -> str: args = list(args) for key, value in kwargs.items(): if not value: continue if isinstance(value, str): args.append(f"{key} = {value}") else: args.extend(f"{key} = {v}" for v in value) return f"#[wasm_bindgen({', '.join(filter(None, args))})]" _PATTERN_COMMENT = re.compile(r"^ *\/\/.*\n") def consume_comments(s: str) -> str: while match := _PATTERN_COMMENT.match(s): s = s[match.end() :] return s _PATTERN_EMPTY_LINES = re.compile(r"^ *(?:\n|$)") def consume_empty_lines(s: str) -> str: while match := _PATTERN_EMPTY_LINES.match(s): s = s[match.end() :] if not s: break return s def skip_non_content(s: str) -> str: while True: new = consume_comments(consume_empty_lines(s)) if new == s: break s = new return s @dataclasses.dataclass() class MatchError(Exception): s: str pattern: Optional[Pattern] = None info: Optional[str] = None def __str__(self) -> str: s = self.preview_s() if info := self.info: return f"failed to parse: {info}:\n{s}" elif pattern := self.pattern: return f"didn't match pattern: `{pattern.pattern}`:\n{s}" else: return "{s}" def preview_s(self) -> str: lines = self.s.splitlines() if len(lines) > 8: lines = lines[:8] lines.append("... TRUNCATED") s = "\n".join(lines) hor_line = 80 * "=" return f"{hor_line}\n{s}\n{hor_line}" def consume_match( pattern: Pattern, s: str, *, skip_non_content_after=True, info: str = None, ) -> Tuple[Match, str]: if match := pattern.match(s): remainder = s[match.end() :] if skip_non_content_after: remainder = skip_non_content(remainder) return match, remainder raise MatchError(s=s, pattern=pattern, info=info) T = TypeVar("T") def consume_first(s: str, *consumers: Type[T], args=None) -> Tuple[T, str]: assert consumers, "need at least one consumer" if args is None: args = () error = None for consumer in consumers: try: return consumer.consume(s, *args) except MatchError as e: e.__context__ = error error = e raise MatchError( s=s, info=" | ".join(f"`{consumer.__qualname__}`" for consumer in consumers) ) from error class ModSet: def __init__(self, mods: Iterable[str]) -> None: self._mods = set(mods) @classmethod def create(cls, s: str): return cls(s.split()) def pop(self, mod: str) -> bool: try: self._mods.remove(mod) except KeyError: return False else: return True def assert_empty(self): if self._mods: raise ValueError(f"unhandled modifiers: {self._mods}")
24.46875
88
0.58791
[ "Apache-2.0", "MIT" ]
ctron/rust-monaco
ts2rs/ts2rs/helpers.py
4,698
Python
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg api_paste_config_opt = cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for nova-api') wsgi_log_format_opt = cfg.StrOpt('wsgi_log_format', default='%(client_ip)s "%(request_line)s" status: %(status_code)s' ' len: %(body_length)s time: %(wall_seconds).7f', help='A python format string that is used as the template to ' 'generate log lines. The following values can be formatted ' 'into it: client_ip, date_time, request_line, status_code, ' 'body_length, wall_seconds.') secure_proxy_ssl_header_opt = cfg.StrOpt('secure_proxy_ssl_header', help='The HTTP header used to determine the scheme for the ' 'original request, even if it was removed by an SSL ' 'terminating proxy. Typical value is ' '"HTTP_X_FORWARDED_PROTO".') ssl_ca_file_opt = cfg.StrOpt('ssl_ca_file', help="CA certificate file to use to verify " "connecting clients") ssl_cert_file_opt = cfg.StrOpt('ssl_cert_file', help="SSL certificate of API server") ssl_key_file_opt = cfg.StrOpt('ssl_key_file', help="SSL private key of API server") tcp_keepidle_opt = cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X.") wsgi_default_pool_size_opt = cfg.IntOpt('wsgi_default_pool_size', default=1000, help="Size of the pool of greenthreads used by wsgi") max_header_line_opt = cfg.IntOpt('max_header_line', default=16384, help="Maximum line size of message headers to be accepted. " "max_header_line may need to be increased when using " "large tokens (typically those generated by the " "Keystone v3 API with big service catalogs).") wsgi_keep_alive_opt = cfg.BoolOpt('wsgi_keep_alive', default=True, help="If False, closes the client socket connection " "explicitly.") client_socket_timeout_opt = cfg.IntOpt('client_socket_timeout', default=900, help="Timeout for client connections' socket operations. " "If an incoming connection is idle for this number of " "seconds it will be closed. A value of '0' means " "wait forever.") ALL_OPTS = [api_paste_config_opt, wsgi_log_format_opt, secure_proxy_ssl_header_opt, ssl_ca_file_opt, ssl_cert_file_opt, ssl_key_file_opt, tcp_keepidle_opt, wsgi_default_pool_size_opt, max_header_line_opt, wsgi_keep_alive_opt, client_socket_timeout_opt ] def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return {"DEFAULT": ALL_OPTS}
38.462366
78
0.662007
[ "Apache-2.0" ]
HybridF5/nova
nova/conf/wsgi.py
3,577
Python
import pickle import zmq import threading import pymongo from constants import ( MONGO_DEFAULT_HOST, MONGO_DEFAULT_PORT, ZMQ_DEFAULT_HOST, ZMQ_DEFAULT_PORT ) class QueryExecutor(object): """A query executor""" def __init__(self, dbconfig={}, zmqconfig={}): """Initialize executor""" self.dbconfig = dbconfig self.zmqconfig = zmqconfig self._context = zmq.Context() self._socket = self._context.socket(zmq.REP) self._conn = None def _connectdb(self): """Connects Mongo""" self._conn = pymongo.MongoClient(self.zmqconfig.get('host', MONGO_DEFAULT_HOST), self.zmqconfig.get('port', MONGO_DEFAULT_PORT)) def _listen(self): """Start listening for queries""" # start server self._socket.bind('tcp://{}:{}'.format(self.zmqconfig.get('host', ZMQ_DEFAULT_HOST), self.zmqconfig.get('port', ZMQ_DEFAULT_PORT))) while True: query = self._socket.recv_pyobj() self._socket.send_pyobj('QUERY_RECEIVED') callback_string = self._socket.recv_pyobj() callback = pickle.loads(callback_string) self._socket.send_pyobj('CALLBACK_RECEIVED') self._execute(query, callback) def _execute(self, query, callback): """Return the query result""" if not self._conn: self._connectdb() query_result = self._conn.[query['dbname']][query['colname']] return callback(query_result) def start(self): "Start executor thread" thread = threading.Thread(target=self._listen, args=()) thread.daemon = True thread.start()
32.705882
139
0.63729
[ "Apache-2.0" ]
voidabhi/mongomq
mongomq/queryexecutor.py
1,668
Python
import math from pyvolution.EvolutionManager import * from pyvolution.GeneLibrary import * """ This example attempts to find a solution to the following system of equations: a + b + c + d - 17 = 0 a^2 + b^2 - 5 = 0 sin(a) + c - d - 20 = 0 """ def fitnessFunction(chromosome): """ Given a "chromosome", this function must determine its fitness score The fitness score should be a floating point value. If the fitness is zero or smaller then the chromosome will not be allowed to "reproduce" """ #you can access the attributes of a chromosome using square brackets #the key is the description of the gene a = chromosome["a"] b = chromosome["b"] c = chromosome["c"] d = chromosome["d"] #for a perfect solution each of the values will be zero val1 = math.fabs(a + b + c + d - 17) val2 = math.fabs(math.pow(a, 2) + math.pow(b, 2) - 5) val3 = math.sin(a) + c - d - 20 #minimize the "distance", this gives a better fitness estimate than summing the values dist = math.sqrt(math.pow(val1, 2) + math.pow(val2, 2) + math.pow(val3, 2)) #number returned must be a positive floating point value if dist != 0: return 1 / dist #lower dist means better fitness, the closer to a good solution the higher this will be else: return None #returning None indicates that a perfect solution has been found #configure the evolution manager as you see fit #see EvolutionManager.py for documentation on the arguments for this class em = EvolutionManager(fitnessFunction, individualsPerGeneration=100, mutationRate=0.2, #a mutation rate of 0.2 means that 20% of the genes will be mutated each round maxGenerations=1000) #standard floating point genes #The values of the genes in the first generation are chosen randomly in a gaussian distribution. #generatorAverage and generatorSTDEV describe the gaussian distribution #When a gene mutates, the amount that it changes by is also chosen from a gaussian distribution with #a standard deviation of mutationSTDEV atype = FloatGeneType("a", generatorAverage=0, generatorSTDEV=100, mutationSTDEV=1.0) btype = FloatGeneType("b", generatorAverage=0, generatorSTDEV=100, mutationSTDEV=1.0) ctype = FloatGeneType("c", generatorAverage=0, generatorSTDEV=100, mutationSTDEV=1.0) dtype = FloatGeneType("d", generatorAverage=0, generatorSTDEV=100, mutationSTDEV=1.0) em.addGeneType(atype) em.addGeneType(btype) em.addGeneType(ctype) em.addGeneType(dtype) result = em.run()
38.878788
119
0.708885
[ "Apache-2.0" ]
littley/pyvolution
examples/EquationSolver_simple.py
2,566
Python
import json import argparse from time import sleep import psycopg2 import subprocess import os import signal def connTemp(puser, phost, pport, stmt): conn = psycopg2.connect(database = 'postgres', user = puser, host = phost, port = pport) conn.autocommit = True cur = conn.cursor() cur.execute(stmt) #res = cur.fetchall() conn.commit() cur.close() conn.close() def makeTemp(host, path): stmt = 'ssh ' + defuser + '@' + host + " 'mkdir -p " + path + "'" subprocess.run(stmt, shell = True) print('ssh mkdir : ' + stmt) print() def remoTemp(host, path): stmt = 'ssh ' + defuser + '@' + host + " 'rm -rf " + path + "'" subprocess.run(stmt, shell = True) print('ssh rm : ' + stmt) print() def sshTemp(host, stmt, time): stmt = 'ssh ' + defuser + '@' + host + " 'cd " + defbase + ' && source env.sh && ' + stmt + "'" print('ssh common : ' + stmt) p = subprocess.Popen(stmt, shell = True) os.kill(p.pid, signal.SIGCONT) sleep(time) print() def readJsonFile(): Of = open(File,encoding='utf-8') Ofload = json.loads(Of.read()) gtm = Ofload['gtm'] gtms = Ofload['gtm_slave'] cn = Ofload['coordinator'] dn = Ofload['datanode'] dns = Ofload['datanode_slave'] global gtmhost, gtmport, gtmdata, gtmuser, gtmname global gtmshost, gtmsport, gtmsdata, gtmsuser, gtmsname global cnhost, cnport, cndata, cnuser, cnname, cnpooler global dnhost, dnport, dndata, dnuser, dnname, dnpooler global dnsname, dnshost, dnsport, dnsdata, dnsuser, dnspooler, dnsmname, dnsmuser, dnsmport, dnsmhost gtmhost, gtmport, gtmdata, gtmuser, gtmname = [], [], [], [], [] gtmshost, gtmsport, gtmsdata, gtmsuser, gtmsname = [], [], [], [], [] cnhost, cnport, cndata, cnuser, cnname, cnpooler = [], [], [], [], [], [] dnhost, dnport, dndata, dnuser, dnname, dnpooler = [], [], [], [], [], [] dnsname, dnshost, dnsport, dnsdata, dnsuser, dnspooler, dnsmname, dnsmuser, dnsmport, dnsmhost = [], [], [], [], [], [], [], [], [], [] for i in gtm: Gtmhost = i["host"] Gtmport = i["port"] Gtmdata = i["datadir"] Gtmuser = i["user"] Gtmname = i["name"] gtmhost.append(Gtmhost) gtmport.append(Gtmport) gtmdata.append(Gtmdata) gtmuser.append(Gtmuser) gtmname.append(Gtmname) for i in gtms: Gtmshost = i["host"] Gtmsport = i["port"] Gtmsdata = i["datadir"] Gtmsuser = i["user"] Gtmsname = i["name"] gtmshost.append(Gtmshost) gtmsport.append(Gtmsport) gtmsdata.append(Gtmsdata) gtmsuser.append(Gtmsuser) gtmsname.append(Gtmsname) for i in cn: Cnhost = i["host"] Cnport = i["port"] Cndata = i["datadir"] Cnuser = i["user"] Cnname = i["name"] Cnpooler = i["pooler_port"] cnhost.append(Cnhost) cnport.append(Cnport) cnuser.append(Cnuser) cndata.append(Cndata) cnname.append(Cnname) cnpooler.append(Cnpooler) for i in dn: Dnhost = i["host"] Dnport = i["port"] Dndata = i["datadir"] Dnuser = i["user"] Dnname = i["name"] Dnpooler = i["pooler_port"] dnhost.append(Dnhost) dnport.append(Dnport) dndata.append(Dndata) dnuser.append(Dnuser) dnname.append(Dnname) dnpooler.append(Dnpooler) for i in dns: Dnshost = i["host"] Dnsport = i["port"] Dnsdata = i["datadir"] Dnsuser = i["user"] Dnspooler = i["pooler_port"] Dnsname = i["name"] Dnsmport = i["Master_port"] Dnsmuser = i["Master_user"] Dnsmname = i["Master_name"] Dnsmhost = i["Master_host"] dnshost.append(Dnshost) dnsport.append(Dnsport) dnsdata.append(Dnsdata) dnsuser.append(Dnsuser) dnspooler.append(Dnspooler) dnsname.append(Dnsname) dnsmport.append(Dnsmport) dnsmuser.append(Dnsmuser) dnsmname.append(Dnsmname) dnsmhost.append(Dnsmhost) def install(): allhost = [] allhost.extend(gtmhost) allhost.extend(gtmshost) allhost.extend(dnhost) allhost.extend(cnhost) allhost.extend(dnshost) ahost = list(set(allhost)) #print(ahost) pakname = package.replace('./','') pakname = pakname.replace('.tgz','') # create env file print('======== deploy packages ========') f = open('./env.sh', 'w') stmt1 = 'export PATH=' + defbase + '/' + pakname + '/bin:$PATH' stmt2 = 'export LD_LIBRARY_PATH=' + defbase + '/' + pakname + '/lib:$LD_LIBRARY_PATH' f.write(stmt1 + '\n' + stmt2 + '\n') f.close() # scp package & env file to each instance ====================== for i in ahost: stmt = 'scp' + ' ' + package + ' ' + defuser + '@' + i + ':' + defbase stmt3 = 'scp' + ' ./env.sh ' + defuser + '@' + i + ':' + defbase stmt5 = 'scp' + ' ./install.sh ' + defuser + '@' + i + ':' + defbase subprocess.run(stmt, shell = True) print(stmt) subprocess.run(stmt3, shell = True) print(stmt3) subprocess.run(stmt5, shell = True) print(stmt5) stmt7 = 'tar -zxf ' + defbase + '/' + package sshTemp(i, stmt7, 1) print() # -------------------------- gtm ------------------------------------- # init gtm master node ================================== print('\n ======== creating gtm master node ======== \n') makeTemp(gtmhost[0], gtmdata[0]) initgtm = 'initgtm -Z gtm -D ' + gtmdata[0] sshTemp(gtmhost[0], initgtm, 1) # change gtm configuration =================================== gtmconf = '/bin/bash ' + defbase + '/install.sh gtm ' + gtmhost[0] + ' ' + str(gtmport[0]) + ' ' + gtmname[0] + ' ' + gtmdata[0] + ' ' + gtmuser[0] sshTemp(gtmhost[0], gtmconf, 1) # start gtm ============================= startgtm = 'gtm_ctl -Z gtm -D ' + gtmdata[0] + ' start' sshTemp(gtmhost[0], startgtm, 1) # -------------------------- gtm slave ----------------------------- n = 0 print('\n ======== creating gtm slave node ========') for i in gtmshost: makeTemp(i, gtmsdata[n]) print('\n creating gtm slave node ' + gtmsname[n]) # init gtm slave node ==================== initgtms = 'initgtm -Z gtm -D ' + gtmsdata[n] sshTemp(i, initgtms, 1) # change gtm slave configuration ===================== gtmsconf = '/bin/bash ' + defbase + '/install.sh gtm_slave ' + gtmhost[0] + ' ' + str(gtmsport[n]) + ' ' + gtmsname[n] + ' ' + gtmsdata[n] + ' ' + str(gtmport[0]) sshTemp(i, gtmsconf, 1) #start gtm slave ================== startgtms = 'gtm_ctl -Z gtm_standby -D ' + gtmsdata[n] + ' start' sshTemp(i, startgtms, 1) n = n + 1 # ------------------------- cn node -------------------------------- n = 0 print('\n ======== creating cn node ========') #initdb --locale=zh_CN.UTF-8 -U kunlun -E utf8 -D /home/kunlun/TPC/postgres-xz/data/cn01 --nodename=cn01 --nodetype=coordinator --master_gtm_nodename gtm --master_gtm_ip 192.168.0.134 --master_gtm_port 23001 for i in cnhost: makeTemp(i, cndata[n]) print('\n ==========creating cn node ' + cnname[n]) # init cn node =============== if types == 'pgxz': initcn = 'initdb --locale=en_US.UTF-8 -U ' + cnuser[n] + ' -E utf8 -D ' + cndata[n] + ' --nodename=' + cnname[n] + ' --nodetype=coordinator --master_gtm_nodename ' + gtmname[0] + ' --master_gtm_ip ' + gtmhost[0] + ' --master_gtm_port ' + str(gtmport[0]) sshTemp(i, initcn, 3) elif types == 'pgxc': initcn = 'initdb -D ' + cndata[n] + ' --nodename ' + cnname[n] sshTemp(i, initcn, 3) # change cn node configuration ============= if types == 'pgxz': cnconf = '/bin/bash ' + defbase + '/install.sh cn ' + str(cnport[n]) + ' ' + str(cnpooler[n]) + ' ' + cndata[n] + ' ' + gtmhost[0] + ' ' + str(gtmport[0]) sshTemp(i, cnconf, 1) elif types == 'pgxc': cnconf = '/bin/bash ' + defbase + '/install.sh cn ' + str(cnport[n]) + ' ' + str(cnpooler[n]) + ' ' + cndata[n] + ' ' + gtmhost[0] + ' ' + str(gtmport[0]) + ' ' + types sshTemp(i, cnconf, 2) # start cn node ================= startcn = 'pg_ctl -Z coordinator -D ' + cndata[n] + ' start' reloadcn = 'pg_ctl -D ' + cndata[n] + ' reload' sshTemp(i, startcn, 3) if types == 'pgxc': restartcn = 'pg_ctl -Z coordinator restart -m f -D ' + cndata[n] sshTemp(i, restartcn, 2) sshTemp(i, reloadcn, 1) n = n + 1 # ------------------------- dn node -------------------------------- n = 0 print('\n ======== creating dn master node ========') for i in dnhost: makeTemp(i, dndata[n]) print('\n ================creating dn node ' + dnname[n]) # init dn node =============== if types == 'pgxz': initdn = 'initdb --locale=en_US.UTF-8 -U ' + dnuser[n] + ' -E utf8 -D ' + dndata[n] + ' --nodename=' + dnname[n] + ' --nodetype=datanode --master_gtm_nodename ' + gtmname[0] + ' --master_gtm_ip ' + gtmhost[0] + ' --master_gtm_port ' + str(gtmport[0]) sshTemp(i, initdn, 3) elif types == 'pgxc': initdn = 'initdb -D ' + dndata[n] + ' --nodename ' + dnname[n] sshTemp(i, initdn, 5) # change dn configuration ==================== if types == 'pgxz': dnconf = '/bin/bash ' + defbase + '/install.sh dn ' + str(dnport[n]) + ' ' + str(dnpooler[n]) + ' ' + dndata[n] + ' ' + gtmhost[0] + ' ' + str(gtmport[0]) sshTemp(i, dnconf, 1) elif types == 'pgxc': dnconf = '/bin/bash ' + defbase + '/install.sh dn ' + str(dnport[n]) + ' ' + str(dnpooler[n]) + ' ' + dndata[n] + ' ' + gtmhost[0] + ' ' + str(gtmport[0]) + ' ' + types sshTemp(i, dnconf, 2) # start dn node ================= startdn = 'pg_ctl -Z datanode -D ' + dndata[n] + ' start' reloaddn = 'pg_ctl -D ' + dndata[n] + ' reload' sshTemp(i, startdn, 3) if types == 'pgxc': restartdn = 'pg_ctl -Z datanode restart -m f -D ' + dndata[n] sshTemp(i, restartdn, 2) sshTemp(i, reloaddn, 1) n = n + 1 # ----------------------- dn slave node -------------------------- # pg_basebackup -p 23003 -h 192.168.0.132 -U kunlun -D /home/kunlun/TPC/postgres-xz/data/dn01s1 -X f -P -v n = 0 print('\n ======== creating dn slave node ========') for i in dnshost: makeTemp(i, dnsdata[n]) print('\n ==============creating dns node ' + dnsname[n]) # init dns node =============== initdns = 'pg_basebackup -p ' + dnsmport[n] + ' -h ' + dnsmhost[n] + ' -U ' + dnsuser[n] + ' -D ' + dnsdata[n] + ' -X f -P -v' sshTemp(i, initdns, 5) # change dns configuration ================== dnsconf = '/bin/bash ' + defbase + '/install.sh dn_slave ' + str(dnsport[n]) + ' ' + str(dnspooler[n]) + ' ' + dnsdata[n] + ' ' + dnsmhost[n] + ' ' + dnsmport[n] + ' ' + dnsmuser[n] + ' ' + dnsmname[n] changedir = 'chmod 700 ' + dnsdata[n] sshTemp(i, dnsconf, 2) sshTemp(i, changedir, 1) # start dns node ================== startdns = 'pg_ctl -Z datanode -D ' + dnsdata[n] + ' start' reloaddns = 'pg_ctl -D ' + dnsdata[n] + ' reload' sshTemp(i, startdns, 3) if types == 'pgxc': restartdns = 'pg_ctl -Z datanode restart -m f -D ' + dnsdata[n] sshTemp(i, restartdns, 2) sshTemp(i, reloaddns, 1) n = n + 1 def ConfigRoute(): # 配置路由 print('\n======== Configration Route ========') cof = ['cn','dn'] for i in cof: if i == 'cn': ns = 0 for a in cnhost: print('\npsql -h '+ cnhost[ns] + ' -d postgres -p ' + str(cnport[ns])) for b in cof: if b == 'cn': n = 0 for c in cnhost: if c == a: stmt = 'alter node ' + cnname[n] + " with(host='" + c + "',port=" + str(cnport[n]) + ')' print(cnuser[ns],cnhost[ns], cnport[ns],stmt) connTemp(cnuser[ns], cnhost[ns], cnport[ns], stmt) n = n + 1 else: stmt = 'create node ' + cnname[n] + " with(type=coordinator,host='" + c + "',port=" + str(cnport[n]) + ',primary=false,preferred=false)' print(cnuser[ns], cnhost[ns], cnport[ns],stmt) connTemp(cnuser[ns], cnhost[ns], cnport[ns], stmt) n = n + 1 else: n = 0 for c in dnhost: stmt = 'create node ' + dnname[n] + " with(type=datanode,host='" + c + "',port=" + str(dnport[n]) + ',primary=false,preferred=false)' print(cnuser[ns], cnhost[ns], cnport[ns],stmt) connTemp(cnuser[ns], cnhost[ns], cnport[ns], stmt) n = n + 1 ns = ns + 1 if i == 'dn': nn = 0 for a in dnhost: print('\npsql -h '+ dnhost[nn] + ' -d postgres -p ' + str(dnport[nn])) for b in cof: if b == 'dn': n = 0 for c in dnhost: if c == a: stmt = 'alter node ' + dnname[n] + " with(host='" + c + "',port=" + str(dnport[n]) + ')' print(dnuser[nn], dnhost[nn], dnport[nn],stmt) connTemp(dnuser[nn], dnhost[nn], dnport[nn], stmt) n = n + 1 else: stmt = 'create node ' + dnname[n] + " with(type=datanode,host='" + c + "',port=" + str(dnport[n]) + ',primary=false,preferred=false)' print(dnuser[nn], dnhost[nn], dnport[nn], stmt) connTemp(dnuser[nn], dnhost[nn], dnport[nn], stmt) n = n + 1 else: n = 0 for c in cnhost: stmt = 'create node ' + cnname[n] + " with(type=coordinator,host='" + c + "',port=" + str(cnport[n]) + ',primary=false,preferred=false)' print(cnuser[nn], cnhost[nn], cnport[nn], stmt) connTemp(dnuser[nn], dnhost[nn], dnport[nn], stmt) n = n + 1 nn = nn + 1 print('\ncreating sharding') alldn = '' for i in dnname: if i == dnname[0]: alldn = i else: alldn = alldn + ',' + i stmt1 = 'create default node group default_group with(' + alldn + ')' connTemp(cnuser[0], cnhost[0], cnport[0], stmt1) stmt2 = 'create sharding group to group default_group' connTemp(cnuser[0], cnhost[0], cnport[0], stmt2) stmt3 = 'clean sharding' connTemp(cnuser[0], cnhost[0], cnport[0], stmt3) def clean(): #pg_ctl -D /home/charles/data/pgdatadir stop -m immediate gtmclean = 'gtm_ctl -Z gtm -m immediate -D ' + gtmdata[0] + '\n' sshTemp(gtmhost[0], gtmclean, 1) n = 0 for i in gtmshost: gtmsclean = 'gtm_ctl -Z gtm_standby stop -m immediate -D ' + gtmsdata[n] + '\n' sshTemp(i, gtmsclean, 1) n = n +1 n = 0 for i in cnhost: cnclean = 'pg_ctl stop -m immediate -D ' + cndata[n] + '\n' sshTemp(i, cnclean, 1) n = n + 1 n = 0 for i in dnhost: dnclean = 'pg_ctl stop -m immediate -D ' + dndata[n] + '\n' sshTemp(i, dnclean, 1) n = n + 1 n = 0 for i in dnshost: dnsclean = 'pg_ctl stop -m immediate -D ' + dnsdata[n] + '\n' sshTemp(i, dnsclean, 1) n = n + 1 remoTemp(gtmhost[0], gtmdata[0]) n = 0 for i in gtmshost: remoTemp(i, gtmsdata[n]) n = n +1 n = 0 for i in cnhost: remoTemp(i, cndata[n]) n = n + 1 n = 0 for i in dnhost: remoTemp(i, dndata[n]) n = n + 1 n = 0 for i in dnshost: remoTemp(i, dnsdata[n]) n = n + 1 if __name__ == '__main__': parser = argparse.ArgumentParser(description = 'the pgxz/pgxl/pgxc install script.') parser.add_argument('--type', default='pgxc', help = 'pgxc, pgxz, pgxl') parser.add_argument('--config', default='install.json', help = 'the config json file') parser.add_argument('--defbase', default='/home/kunlun/compare/postgres-xc/base', help = 'default basedir') parser.add_argument('--defuser', default='kunlun', help = 'default user') parser.add_argument('--package', default='package', help = 'the package of pgxz/xl/xc') parser.add_argument('--opt', default='install', help = 'can be "i" or "c", "i" = "install" \n "c" = "clean"') args = parser.parse_args() File = args.config defbase = args.defbase defuser = args.defuser package = args.package types = args.type opt = args.opt print(args) readJsonFile() if opt == 'i': install() ConfigRoute() elif opt == 'c': clean() #print('gtm\n', gtmhost,'\n', gtmport, '\n', gtmdata, '\n',gtmuser, '\n', gtmname, '\n' , '\ngtm_slave \n',gtmshost, '\n', gtmsport, '\n', gtmsdata, '\n', gtmsuser, '\n', gtmsname, '\n', '\ncn\n', cnhost, '\n', cnport, '\n', cndata, '\n', cnuser, '\n', cnname, '\n', '\ndn\n', dnhost, '\n', dnport, '\n', dndata, '\n', dnuser, '\n', dnname, '\n', dnpooler, '\n', '\ndn_slave \n', dnshost, '\n', dnsport, '\n', dnsdata, '\n', dnsuser, '\n', dnspooler, '\n', dnsname, '\n', dnsmport, '\n', dnsmname, '\n', dnsmhost)
39.696312
517
0.488033
[ "Apache-2.0" ]
zettadb/cloudnative
person/charles/PGX/pgx_install.py
18,308
Python
# -*- coding: utf-8 -*- """ Created on Tue Jun 07 2016 @author: Matthew Carse """ #@ Class containing methods for machine learning. #@ Chromosomes must be preprocessed through feature scaling standardisation prior to being #@ used in machine learning. The class implements Scikit-learn to apply feature scaling to the #@ training chromosomes. The scaling factors are retained for use with the validation #@ and testing datasets. #@ The class is also responsible for reading in groupings for model generation and prediction #@ validation. The prediction validation (fitness) can be accuracy, precision, recall or f-statistic. #@ Outputs HTML confusion matrices for split mode (lda and rfc on held-out data) and confusion matrix (lda) #@ and ROC curve for cross-validation. import numpy, os import vpGenList as vpGenList from sklearn import preprocessing from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import precision_recall_fscore_support from sklearn.ensemble import RandomForestClassifier from sklearn import cross_validation from sklearn.pipeline import make_pipeline from sklearn.metrics import confusion_matrix import plotly as py from plotly.graph_objs import Heatmap, Figure import warnings warnings.filterwarnings("ignore") # warnings about collinearity are ignored as not relating to number of vpGens class machineLearning: def __init__(self, mode): self.mode = mode # call function to read groupings for validation of classification # the file reading will only be performed once with the results # saved to class variables self.groupingsRead() self.cm = False # variable to toggle plotting of confusion matrix for highest-scoring model # use scikit-learn preprocessing to scale features using standardisation ((X-mean)/s.d.) # feature scaling results in 0 or near-0 mean and unit variance for non-0 features def preprocess(self): # scaling values created using training set # list of training feature set dictionaries decomposed to obtain feature sets self.scaler = preprocessing.StandardScaler().fit(numpy.asarray(self.getFSets(vpGenList.allTrainFSets))) # keys used to remake dictionary (both allvpGens and all*FSets in same order) # saved as class variable if len(keys) == 0: for k in vpGenList.allTrainFSets[0]: keys.append(k) # list of dictionaries of scaled feature sets to be sent back to vpGenList allFSetsScaled = list() x = self.scaler.transform(numpy.asarray(self.getFSets(vpGenList.allTrainFSets))) # use list of dictionaries for seq in x: d = {keys[i]: list(seq[i*6:(i+1)*6]) for i in range(400)} allFSetsScaled.append(d) return allFSetsScaled # read through each dictionary in the list of feature sets # reforming the feature sets into 2400-feature list # results in list of length # sequences * 2400 (400*6) # used to calculate feature scaling values def getFSets(self, fsets): l = list() for d in fsets: subl = list() for fset in d.values(): subl += fset l.append(subl) return l # read in protein groupings for use with classification validation # use hard-coded file names, as checked for with program call def groupingsRead(self): if len(trainGroupings) == 0: if self.mode == "split": f = open(("{0}/training_groupings.txt").format(os.getcwd())).read() else: f = open(("{0}/all_groupings.txt").format(os.getcwd())).read() # split using whitespace delimiter for g in f.split(): trainGroupings.append(g) #print trainGroupings print 'Training groupings created!' if len(validGroupings) == 0: if self.mode == "split": f = open(("{0}/validation_groupings.txt").format(os.getcwd())).read() # split using whitespace delimiter for g in f.split(): validGroupings.append(g) #print validGroupings print 'Validation groupings created!' if len(testGroupings) == 0: if self.mode == "split": f = open(("{0}/testing_groupings.txt").format(os.getcwd())).read() # split using whitespace delimiter for g in f.split(): testGroupings.append(g) #print testGroupings print 'Testing groupings created!' # function which uses the scaling factors derived from the training dataset # to scale the validation and test datasets' chromosomes def scale(self, dataset): # list of dictionaries of scaled feature sets to be sent back to vpGenList allFSetsScaled = list() # scale feature sets if dataset == 'valid': x = self.scaler.transform(numpy.asarray(self.getFSets(vpGenList.allValidFSets))) if dataset == 'test': x = self.scaler.transform(numpy.asarray(self.getFSets(vpGenList.allTestFSets))) # use list of dictionaries # for each scaled sequence (2400 features, 400 feature sets) # decompose this into feature sets, incrementally taking the next 6 features # each feature set is added to a dictionary with the appropriate id key # each dictionary (one per sequence) is then added to a list and returned for seq in x: d = {keys[i]: list(seq[i*6:(i+1)*6]) for i in range(400)} allFSetsScaled.append(d) return allFSetsScaled # function to create an lda model using the training chromosome list # of the defined number of chromosomes and the correct peptide groupings # the same identifiers are used to create the validation chromosome list # the lda model predicts the classifications of the validation set # the predictions, along with the correct groupings, are used to calculate # several fitness metrics - precision, accuracy, f-statistic, accuracy def classifyLDA(self, tCList, vCList): if self.mode == "cv": # LDA object clf = make_pipeline(preprocessing.StandardScaler(), LinearDiscriminantAnalysis()) predicted = cross_validation.cross_val_predict(clf, tCList, trainGroupings, cv=3) if self.cm: self.confusionMatrix(trainGroupings, predicted, 'lda_cv') return precision_recall_fscore_support(trainGroupings, predicted, average = 'weighted')[2] else: clf = LinearDiscriminantAnalysis() # fit lda model using training chromosomes clf.fit(numpy.asarray(tCList), numpy.asarray(trainGroupings)) if self.cm: self.confusionMatrix(validGroupings, predicted, 'lda_valid') # return precision ([0]), recall ([1]) or f1 score ([2]), replace with clf.score(numpy.asarray(vCList), validGroupings) for accuracy return precision_recall_fscore_support(validGroupings, clf.predict(numpy.asarray(vCList)), average = 'weighted')[2] # fitness for validation set # return clf.score(numpy.asarray(vCList), validGroupings) # accuracy for validation set # create a random forest model using the training chromosomes # predict the groupings of the validation set ### CHANGE: n_estimators = NUMBER OF DECISION TREES ### def classifyRFC( self, tCList, vCList): rfc = RandomForestClassifier(n_estimators=5, random_state=1, max_features=None) # fit model using training chromosomes rfc.fit(numpy.asarray(tCList), numpy.asarray(trainGroupings)) predicted = rfc.predict(vCList) self.confusionMatrix(validGroupings, predicted, 'rfc_valid') # return precision ([0]), recall ([1]) or f1 score ([2]), replace with rfc.score(numpy.asarray(vCList), validGroupings) for accuracy return precision_recall_fscore_support(validGroupings, predicted, average = 'weighted')[2] # fitness for validation set # return rfc.score(vCList, numpy.asarray(validGroupings)) # accuracy for validation set # create an lda model using the training chromosomes # predict the groupings of the test set def testEvaluateLDA(self, trCList, teCList): # LDA object clf = LinearDiscriminantAnalysis() # fit lda model using training chromosomes clf.fit(numpy.asarray(trCList), numpy.asarray(trainGroupings)) predicted = clf.predict(teCList) self.confusionMatrix(testGroupings, predicted, 'lda_test') # return precision ([0]), recall ([1]) or f1 score ([2]), replace with clf.score(numpy.asarray(teCList), testGroupings) for accuracy return precision_recall_fscore_support(testGroupings, predicted, average = 'weighted')[2] # fitness for test set # return clf.score(numpy.asarray(teCList), testGroupings) # accuracy for test set # create a random forest model using the training chromosomes # predict the groupings of the test set ### CHANGE: n_estimators = NUMBER OF DECISION TREES ### def testEvaluateRFC(self, trCList, teCList): rfc = RandomForestClassifier(n_estimators=10, random_state=1, max_features=None) # fit model using training chromosomes rfc.fit(numpy.asarray(trCList), numpy.asarray(trainGroupings)) predicted = rfc.predict(teCList) self.confusionMatrix(testGroupings, predicted, 'rfc_test') # return precision ([0]), recall ([1]) or f1 score ([2]), replace with rfc.score(numpy.asarray(teCList), testGroupings) for accuracy return precision_recall_fscore_support(testGroupings, predicted, average = 'weighted')[2] # fitness for test set # return rfc.score(teCList, numpy.asarray(testGroupings)) # accuracy for test set # function to plot a confusion matrix heatmap # takes as parameters: # the correct groupings for the test set # the predicted groupings # the classifier type - lda or rfc (to change title/filename) # outputs an html file def confusionMatrix(self, groupings, predicted, clf): cm = confusion_matrix(groupings, predicted) z = cm x=["Hydrolase","Mem. Trans","Structural"] # change as necessary y=["Hydrolase","Mem. Trans","Structural"] # change as necessary data = [Heatmap(z=z,x=x,y=y)] annotations = [] for n, row in enumerate(z): for m, val in enumerate(row): annotations.append( dict( text="{:4.2f}% ({})".format((float(val)/len(groupings)*100),str(val)), x=x[m], y=y[n], xref='x1', yref='y1', font=dict(color='white'), showarrow=False) ) xaxis = dict(title='Predicted class',ticks='') yaxis = dict(title='Actual class',ticks='') if clf == 'lda_test': title = "Confusion matrix - linear discriminant analysis classification" filename = "Confusion matrix_test_lda" if clf == 'rfc_test': title = "Confusion matrix - random forest classification" filename = "Confusion matrix_test_rfc" if clf == 'lda_cv': title = "Confusion matrix - linear discriminant analysis classification" filename = "Confusion matrix_cv_lda" if clf == 'lda_valid': title = "Confusion matrix - linear discriminant analysis classification" filename = "Confusion matrix_valid_lda" if clf == 'rfc_valid': title = "Confusion matrix - random forest classification" filename = "Confusion matrix_valid_rfc" fig = Figure(data=data) fig['layout'].update(title=title, xaxis=xaxis, yaxis=yaxis, annotations=annotations) py.offline.plot(fig, filename=filename) # function to plot roc curve for a cross-validation model # outputs an html file def rocCurve(self, tCList): from sklearn.cross_validation import KFold from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from plotly.graph_objs import Scatter cv = KFold(len(trainGroupings)+1, n_folds=6) mean_tpr = 0.0 fig = py.tools.make_subplots(shared_xaxes=True, shared_yaxes=True, print_grid=False) for index, (train, test) in enumerate(cv): classes = list(set(trainGroupings)) classes.sort() y = label_binarize(trainGroupings, classes=classes) n_classes = len(classes) trainL, testL = [e for e in train], [e for e in test] del trainL[-1] # delete last element otherwise too many del testL[-1] trainL, testL = numpy.asarray(trainL), numpy.asarray(testL) # split dataset for fold into training and testing sets and groupings X_train, X_test, y_train, y_test = numpy.asarray(tCList)[trainL], numpy.asarray(tCList)[testL], numpy.asarray(y)[trainL], numpy.asarray(y)[testL] scaler = preprocessing.StandardScaler().fit(X_train) # create feature scaling values using training set X_train_transformed = scaler.transform(X_train) # scale training set X_test_transformed = scaler.transform(X_test) # scale test set classifier = OneVsRestClassifier(LinearDiscriminantAnalysis()) # one vs rest classifier for multi-class y_score = classifier.fit(X_train_transformed, y_train).decision_function(X_test_transformed) fpr = dict() tpr = dict() roc_auc = dict() # roc area under curve for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) all_fpr = numpy.unique(numpy.concatenate([fpr[i] for i in range(n_classes)])) mean_tpr = numpy.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += numpy.interp(all_fpr, fpr[i], tpr[i]) mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # add trace for micro and macro averages trace1 = Scatter(x=fpr["micro"],y=tpr["micro"],name=('Micro-average ROC curve fold {} (area = {:03.2f})').format(index, roc_auc["micro"]),mode='lines') fig.append_trace(trace1, 1, 1) trace2 = Scatter(x=fpr["macro"],y=tpr["macro"],name=('Macro-average ROC curve fold {} (area = {:03.2f})').format(index, roc_auc["macro"]),mode='lines') fig.append_trace(trace2, 1, 1) trace1 = Scatter(x=[0,1],y=[0,1],name='Chance',mode='lines') # add diagonal line for change fig.append_trace(trace1, 1, 1) title = 'Cross-validation Receiver Operating Characteristic' xaxis = dict(title='False Positive Rate') yaxis = dict(title='True Positive Rate') fig['layout'].update(title=title, xaxis=xaxis, yaxis=yaxis) saveName = '6fold_cv_roc' py.offline.plot(fig, filename=saveName) # class variables trainGroupings = list() validGroupings = list() testGroupings = list() keys = list()
49.278932
180
0.613898
[ "MIT" ]
MatthewCarse/evolve
machineLearning.py
16,607
Python
""" In this exercise you are going to apply what you learned about stacks with a real world problem. We will be using stacks to make sure the parentheses are balanced in mathematical expressions such as: ((3^2 + 8)*(5/2))/(2+6) In real life you can see this extend to many things such as text editor plugins and interactive development environments for all sorts of bracket completion checks. Take a string as an input and return `True` if it's parentheses are balanced or `False` if it is not. """ from typing import List class Stack: def __init__(self): self.items = [] def size(self): return len(self.items) def push(self, item): self.items.append(item) def pop(self): if self.size() == 0: return None else: return self.items.pop() def equation_checker(equation): """ Check equation for balanced parentheses Args: equation(string): String form of equation Returns: bool: Return if parentheses are balanced or not """ opening_parenthesis = Stack() closing_parenthesis = Stack() for _ in equation: if _ == "(": opening_parenthesis.push(_) elif _ == ")": closing_parenthesis.push(_) return opening_parenthesis.size() == closing_parenthesis.size() def _equation_checker(equation): """ Check equation for balanced parentheses """ # not in the the spirit return equation.count("(") == equation.count(")") def udacity_equation_checker(equation): stack = Stack() for char in equation: if char == "(": stack.push(char) elif char == ")": if stack.pop() == None: return False return stack.size() == 0
24.067568
103
0.623807
[ "MIT" ]
m-01101101/udacity-datastructures-algorithms
3. data_structures/stack/balanced_parantheses.py
1,781
Python
import os import re import uuid import typing as t import logging import pathlib import functools from typing import TYPE_CHECKING from distutils.dir_util import copy_tree from simple_di import inject from simple_di import Provide import bentoml from bentoml import Tag from bentoml.exceptions import BentoMLException from bentoml.exceptions import MissingDependencyException from ..types import LazyType from ..runner.utils import Params from ..utils.tensorflow import get_tf_version from ..utils.tensorflow import is_gpu_available from ..utils.tensorflow import hook_loaded_model from .common.model_runner import BaseModelRunner from ..configuration.containers import BentoMLContainer logger = logging.getLogger(__name__) try: import tensorflow as tf # type: ignore except ImportError: # pragma: no cover raise MissingDependencyException( """\ `tensorflow` is required in order to use `bentoml.tensorflow`. Instruction: `pip install tensorflow` """ ) try: import tensorflow_hub as hub # type: ignore from tensorflow_hub import resolve # type: ignore from tensorflow_hub import native_module # type: ignore except ImportError: # pragma: no cover logger.warning( """\ If you want to use `bentoml.tensorflow.import_from_tfhub(), make sure to `pip install --upgrade tensorflow_hub` before using. """ ) hub = None try: import importlib.metadata as importlib_metadata except ImportError: import importlib_metadata if TYPE_CHECKING: from tensorflow_hub import Module as HubModule # type: ignore from tensorflow_hub import KerasLayer # type: ignore from .. import external_typing as ext from ..types import PathType from ..models import ModelStore from ..external_typing import tensorflow as tf_ext TFArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor] MODULE_NAME = "bentoml.tensorflow_v2" def _clean_name(name: str) -> str: # pragma: no cover if name.startswith(("http://", "https://")): name = name.split("/", maxsplit=3)[-1] else: name = name.split("/")[-1] return re.sub(r"\W|^(?=\d)-", "_", name) @inject def load( bento_tag: t.Union[str, Tag], tags: t.Optional[t.List[str]] = None, options: t.Optional["tf_ext.SaveOptions"] = None, load_as_hub_module: t.Optional[bool] = None, model_store: "ModelStore" = Provide[BentoMLContainer.model_store], ) -> t.Union["tf_ext.AutoTrackable", "tf_ext.Module", "HubModule", "KerasLayer"]: """ Load a model from BentoML local modelstore with given name. Args: bento_tag (:code:`Union[str, Tag]`): Tag of a saved model in BentoML local modelstore. tags (:code:`str`, `optional`, defaults to `None`): A set of strings specifying the graph variant to use, if loading from a v1 module. options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`): :code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This argument can only be used from TensorFlow 2.3 onwards. load_as_hub_module (`bool`, `optional`, default to :code:`True`): Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`. The latter only applies for TF1. model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`): BentoML modelstore, provided by DI Container. Returns: :obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore. Examples: .. code-block:: python import bentoml # load a model back into memory model = bentoml.tensorflow.load("my_tensorflow_model") """ # noqa: LN001 model = model_store.get(bento_tag) if model.info.module not in (MODULE_NAME, __name__): raise BentoMLException( f"Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}." ) if model.info.context["import_from_tfhub"]: assert load_as_hub_module is not None, ( "You have to specified `load_as_hub_module=True | False`" " to load a `tensorflow_hub` module. If True is chosen," " then BentoML will return either an instance of `hub.KerasLayer`" " or `hub.Module` depending on your TF version. For most usecase," " we recommend to keep `load_as_hub_module=True`. If you wish to extend" " the functionalities of the given model, set `load_as_hub_module=False`" " will return a SavedModel object." ) if hub is None: raise MissingDependencyException( """\ `tensorflow_hub` does not exists. Make sure to `pip install --upgrade tensorflow_hub` before using. """ ) module_path = model.path_of(model.info.options["local_path"]) if load_as_hub_module: return ( hub.Module(module_path) if get_tf_version().startswith("1") else hub.KerasLayer(module_path) ) # In case users want to load as a SavedModel file object. # https://github.com/tensorflow/hub/blob/master/tensorflow_hub/module_v2.py#L93 is_hub_module_v1: bool = tf.io.gfile.exists( # type: ignore native_module.get_module_proto_path(module_path) ) if tags is None and is_hub_module_v1: tags = [] if options is not None: if not LazyType( "tensorflow.python.saved_model.save_options.SaveOptions" ).isinstance(options): raise BentoMLException( f"`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead." ) if not hasattr(getattr(tf, "saved_model", None), "LoadOptions"): raise NotImplementedError( "options are not supported for TF < 2.3.x," f" Current version: {get_tf_version()}" ) tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore module_path, tags=tags, options=options, # type: ignore ) else: tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore module_path, tags=tags, ) tf_model._is_hub_module_v1 = ( is_hub_module_v1 # pylint: disable=protected-access # noqa ) return tf_model else: tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2(model.path) # type: ignore return hook_loaded_model(tf_model, MODULE_NAME) @inject def import_from_tfhub( identifier: t.Union[str, "HubModule", "KerasLayer"], name: t.Optional[str] = None, labels: t.Optional[t.Dict[str, str]] = None, custom_objects: t.Optional[t.Dict[str, t.Any]] = None, metadata: t.Optional[t.Dict[str, t.Any]] = None, model_store: "ModelStore" = Provide[BentoMLContainer.model_store], ) -> Tag: """ Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore. Args: identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts two type of inputs: - if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format. - if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI. name (:code:`str`, `optional`, default to `None`): Optional name for the saved model. If None, then name will be generated from :code:`identifier`. labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`): user-defined labels for managing models, e.g. team=nlp, stage=dev custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`): user-defined additional python objects to be saved alongside the model, e.g. a tokenizer instance, preprocessor function, model configuration json metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`): Custom metadata for given model. model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`): BentoML modelstore, provided by DI Container. Returns: :obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`: Example for importing a model from Tensorflow Hub: .. code-block:: python import tensorflow_text as text # noqa # pylint: disable import bentoml tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3") # load model back with `load`: model = bentoml.tensorflow.load(tag, load_as_hub_module=True) Example for importing a custom Tensorflow Hub model: .. code-block:: python import tensorflow as tf import tensorflow_hub as hub import bentoml def _plus_one_model_tf2(): obj = tf.train.Checkpoint() @tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)]) def plus_one(x): return x + 1 obj.__call__ = plus_one return obj # then save the given model to BentoML modelstore: model = _plus_one_model_tf2() tag = bentoml.tensorflow.import_from_tfhub(model) """ # noqa if hub is None: raise MissingDependencyException( """\ `tensorflow_hub` does not exists. Make sure to `pip install --upgrade tensorflow_hub` before using. """ ) context: t.Dict[str, t.Any] = { "framework_name": "tensorflow", "pip_dependencies": [ f"tensorflow=={get_tf_version()}", f"tensorflow_hub=={importlib_metadata.version('tensorflow_hub')}", ], "import_from_tfhub": True, } if name is None: if isinstance(identifier, str): name = _clean_name(identifier) else: name = f"{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}" with bentoml.models.create( name, module=MODULE_NAME, options=None, context=context, metadata=metadata, labels=labels, custom_objects=custom_objects, ) as _model: if isinstance(identifier, str): current_cache_dir = os.environ.get("TFHUB_CACHE_DIR") os.environ["TFHUB_CACHE_DIR"] = _model.path fpath: str = resolve(identifier) folder = fpath.split("/")[-1] _model.info.options = {"model": identifier, "local_path": folder} if current_cache_dir is not None: os.environ["TFHUB_CACHE_DIR"] = current_cache_dir else: if hasattr(identifier, "export"): # hub.Module.export() with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: # type: ignore sess.run(tf.compat.v1.global_variables_initializer()) # type: ignore identifier.export(_model.path, sess) # type: ignore else: tf.saved_model.save(identifier, _model.path) _model.info.options = { "model": identifier.__class__.__name__, "local_path": ".", } return _model.tag @inject def save( name: str, model: t.Union["PathType", "tf_ext.KerasModel", "tf_ext.Module"], *, signatures: t.Optional["tf_ext.ConcreteFunction"] = None, options: t.Optional["tf_ext.SaveOptions"] = None, labels: t.Optional[t.Dict[str, str]] = None, custom_objects: t.Optional[t.Dict[str, t.Any]] = None, metadata: t.Optional[t.Dict[str, t.Any]] = None, model_store: "ModelStore" = Provide[BentoMLContainer.model_store], ) -> Tag: """ Save a model instance to BentoML modelstore. Args: name (:code:`str`): Name for given model instance. This should pass Python identifier check. model (:code:`Union[keras.Model, tf.Module, path-like objects]`): Instance of model to be saved labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`): user-defined labels for managing models, e.g. team=nlp, stage=dev custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`): user-defined additional python objects to be saved alongside the model, e.g. a tokenizer instance, preprocessor function, model configuration json metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`): Custom metadata for given model. model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`): BentoML modelstore, provided by DI Container. signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`): Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_ from Tensorflow documentation for more information. options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`): :obj:`tf.saved_model.SaveOptions` object that specifies options for saving. Raises: ValueError: If :obj:`obj` is not trackable. Returns: :obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML. Examples: .. code-block:: python import tensorflow as tf import numpy as np import bentoml class NativeModel(tf.Module): def __init__(self): super().__init__() self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]]) self.dense = lambda inputs: tf.matmul(inputs, self.weights) @tf.function( input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")] ) def __call__(self, inputs): return self.dense(inputs) # then save the given model to BentoML modelstore: model = NativeModel() tag = bentoml.tensorflow.save("native_toy", model) .. note:: :code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`. """ # noqa context: t.Dict[str, t.Any] = { "framework_name": "tensorflow", "pip_dependencies": [f"tensorflow=={get_tf_version()}"], "import_from_tfhub": False, } with bentoml.models.create( name, module=MODULE_NAME, options=None, context=context, labels=labels, custom_objects=custom_objects, metadata=metadata, ) as _model: if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)): # type: ignore[reportUnknownMemberType] assert os.path.isdir(model) copy_tree(str(model), _model.path) else: if options: logger.warning( f"Parameter 'options: {str(options)}' is ignored when " f"using tensorflow {get_tf_version()}" ) tf.saved_model.save( model, _model.path, signatures=signatures, options=options ) return _model.tag class _TensorflowRunner(BaseModelRunner): def __init__( self, tag: t.Union[str, Tag], predict_fn_name: str, device_id: str, partial_kwargs: t.Optional[t.Dict[str, t.Any]], name: t.Optional[str] = None, ): super().__init__(tag, name=name) self._device_id = device_id self._configure(device_id) self._predict_fn_name = predict_fn_name self._partial_kwargs: t.Dict[str, t.Any] = ( partial_kwargs if partial_kwargs is not None else dict() ) def _configure(self, device_id: str) -> None: if "GPU" in device_id: tf.config.set_visible_devices(device_id, "GPU") self._config_proto = dict( allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=self._num_threads, inter_op_parallelism_threads=self._num_threads, ) @property def _num_threads(self) -> int: if is_gpu_available() and self.resource_quota.on_gpu: return 1 return int(round(self.resource_quota.cpu)) @property def num_replica(self) -> int: if is_gpu_available() and self.resource_quota.on_gpu: return len(self.resource_quota.gpus) return 1 def _setup(self) -> None: self._model = load(self._tag, model_store=self.model_store) raw_predict_fn = getattr(self._model, self._predict_fn_name) # type: ignore self._predict_fn = functools.partial(raw_predict_fn, **self._partial_kwargs) def _run_batch(self, *args: "TFArgType", **kwargs: "TFArgType") -> "ext.NpNDArray": params = Params["TFArgType"](*args, **kwargs) with tf.device(self._device_id): # type: ignore def _mapping(item: "TFArgType") -> "tf_ext.TensorLike": if not LazyType["tf_ext.TensorLike"]("tf.Tensor").isinstance(item): return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item)) else: return item params = params.map(_mapping) tf.compat.v1.global_variables_initializer() # type: ignore res = self._predict_fn(*params.args, **params.kwargs) return t.cast("ext.NpNDArray", res.numpy()) def load_runner( tag: t.Union[str, Tag], *, predict_fn_name: str = "__call__", device_id: str = "CPU:0", name: t.Optional[str] = None, partial_kwargs: t.Optional[t.Dict[str, t.Any]] = None, ) -> "_TensorflowRunner": """ Runner represents a unit of serving logic that can be scaled horizontally to maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that wrap around a Tensorflow model, which optimize it for the BentoML runtime. Args: tag (:code:`Union[str, Tag]`): Tag of a saved model in BentoML local modelstore. predict_fn_name (:code:`str`, default to :code:`__call__`): Inference function to be used. partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`): Dictionary of partial kwargs that can be shared across different model. device_id (:code:`str`, `optional`, default to the first CPU): Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation. Returns: :obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model Examples: .. code-block:: python import bentoml # load a runner from a given flag runner = bentoml.tensorflow.load_runner(tag) # load a runner on GPU:0 runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0") """ return _TensorflowRunner( tag=tag, predict_fn_name=predict_fn_name, device_id=device_id, partial_kwargs=partial_kwargs, name=name, )
39.56262
392
0.631617
[ "Apache-2.0" ]
almirb/BentoML
bentoml/_internal/frameworks/tensorflow_v2.py
20,533
Python
""" Django settings for modelos project. Generated by 'django-admin startproject' using Django 1.9.7. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'yxb@kgu3r$vjy8lfl0gti)+l5j6mvpt2r6xl^$(i-@iyttnd!!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # #nuestras apps 'myapp', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'modelos.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'modelos.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
25.862903
91
0.697225
[ "MIT" ]
probardjango/Modelos-de-Django
src/modelos/settings.py
3,207
Python
"""nflDAs URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path urlpatterns = [ path('admin/', admin.site.urls), ]
34
77
0.708556
[ "MIT" ]
BARarch/NFL-Topics
nflDAs/nflDAs/urls.py
748
Python
import numpy as np from gym.envs.mujoco import mujoco_env from gym import utils def mass_center(model, sim): mass = np.expand_dims(model.body_mass, 1) xpos = sim.data.xipos return (np.sum(mass * xpos, 0) / np.sum(mass))[0] class PoppyHumanoidKeepStandingEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self): mujoco_env.MujocoEnv.__init__(self, 'poppy_humanoid/poppy_keep_standing.xml', 5) utils.EzPickle.__init__(self) def _get_obs(self): data = self.sim.data return np.concatenate([data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat, data.qfrc_actuator.flat, data.cfrc_ext.flat]) def step(self, a): pos_before = mass_center(self.model, self.sim) self.do_simulation(a, self.frame_skip) pos_after = mass_center(self.model, self.sim) alive_bonus = 5.0 data = self.sim.data lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum() quad_impact_cost = min(quad_impact_cost, 10) reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus qpos = self.sim.data.qpos done = bool((qpos[2] < 0.2) or (qpos[2] > 2.0)) return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost) def reset_model(self): c = 0.01 self.set_state( self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,) ) return self._get_obs() def viewer_setup(self): self.viewer.cam.trackbodyid = 1 self.viewer.cam.distance = self.model.stat.extent * 1.0 self.viewer.cam.lookat[2] = 0.8 self.viewer.cam.elevation = -20
41.096154
170
0.614881
[ "MIT" ]
garrettkatz/poppy-simulations
ambulation/envs/poppy_humanoid_keep_standing/poppy_humanoid_keep_standing.py
2,137
Python
''' Simple tool to find big functions in a js or ll file ''' import os, sys, re filename = sys.argv[1] i = 0 start = -1 curr = None data = [] for line in open(filename): i += 1 if line.startswith(('function ', 'define ')): start = i curr = line elif line.startswith('}') and curr: size = i - start data.append([curr, size]) curr = None data.sort(lambda x, y: x[1] - y[1]) print ''.join(['%6d : %s' % (x[1], x[0]) for x in data])
19.083333
56
0.576419
[ "MIT" ]
Cloudef/emscripten
tools/find_bigfuncs.py
458
Python
from spn.structure.Base import Product, Sum, get_nodes_by_type from spn.structure.leaves.cltree.CLTree import CLTree from spn.algorithms.Validity import is_consistent from scipy.sparse.csgraph import minimum_spanning_tree from scipy.sparse.csgraph import depth_first_order from error import RootVarError import numpy as np ROOT = -1 class VTreeNode: """ used to model a Vtree """ def __init__(self, var_id=None, children=[]): self.var_id = var_id self.parent = None self.innerNode = True if self.var_id is None else False self.set_children(children) self.scopes = None def get_var_id(self): return self.var_id def get_parent(self): return self.parent def get_children(self): return self.children def set_children(self, children): self.children = children for child in children: child.parent = self def set_scopes(self, scopes): self.scopes = scopes def get_scopes(self): return self.scopes def is_leaf(self): return len(self.children) == 0 def is_inner(self): return len(self.children) != 0 class DTreeNode: """ used to model a dependency tree """ def __init__(self, var_id, parent=None): self.var_id = var_id self.set_parent(parent) self.children = [] self.tree = None def get_var_id(self): return self.var_id def get_parent(self): return self.parent def get_children(self): return self.children def set_parent(self, parent): if parent is not None: self.parent = parent self.parent.children.append(self) def set_tree(self, scope, tree): self.tree = [scope, tree] def get_tree(self): return self.tree def is_leaf(self): return len(self.children) == 0 def compute_probs(data, alpha): # # for fast np dot if data.dtype != np.float32: data = data.astype(np.float32) n_features = data.shape[1] n_samples = data.shape[0] j_ones = np.dot(data.T, data) # # to avoid normalization errors (weights of sum nodes have to sum up to 1) j_ones = j_ones.astype(np.float64) j_ones_diag = np.diag(j_ones) cols_diag = j_ones_diag * np.ones((n_features, n_features)) rows_diag = cols_diag.transpose() probs = np.zeros((n_features, 2)) j_probs = np.zeros((n_features, n_features, 2, 2)) probs[:, 1] = (j_ones_diag + 2 * alpha) / (n_samples + 4 * alpha) probs[:, 0] = 1 - probs[:, 1] j_probs[:, :, 0, 0] = n_samples - cols_diag - rows_diag + j_ones + alpha j_probs[:, :, 0, 1] = cols_diag - j_ones + alpha j_probs[:, :, 1, 0] = rows_diag - j_ones + alpha j_probs[:, :, 1, 1] = j_ones + alpha j_probs = j_probs / (n_samples + 4 * alpha) return probs, j_probs def compute_factors(probs, j_probs, tree): factors = np.zeros((probs.shape[0], 2, 2)) root_id = tree.index(ROOT) features = np.arange(probs.shape[0]).tolist() features.remove(root_id) parents = tree.copy() parents.pop(root_id) factors[root_id, 0, 0] = factors[root_id, 1, 0] = probs[root_id, 0] factors[root_id, 0, 1] = factors[root_id, 1, 1] = probs[root_id, 1] factors[features, 0, 0] = j_probs[features, parents, 0, 0] / probs[parents, 0] factors[features, 1, 0] = j_probs[features, parents, 0, 1] / probs[parents, 1] factors[features, 0, 1] = j_probs[features, parents, 1, 0] / probs[parents, 0] factors[features, 1, 1] = j_probs[features, parents, 1, 1] / probs[parents, 1] return factors def compute_mi_by_probs(probs, j_probs, log_j_probs): prod_probs = np.zeros((probs.shape[0], probs.shape[0], 2, 2)) prod_probs[:, :, 0, 0] = np.outer(probs[:, 0], probs[:, 0]) prod_probs[:, :, 0, 1] = np.outer(probs[:, 0], probs[:, 1]) prod_probs[:, :, 1, 0] = np.outer(probs[:, 1], probs[:, 0]) prod_probs[:, :, 1, 1] = np.outer(probs[:, 1], probs[:, 1]) mut_info = np.sum(j_probs * (log_j_probs - np.log(prod_probs)), axis=(2, 3)) np.fill_diagonal(mut_info, 0) return mut_info def compute_mi(data, alpha): probs, j_probs = compute_probs(data, alpha) return compute_mi_by_probs(probs, j_probs, np.log(j_probs)) def build_dependency_tree_from_mi(mut_info, scope, root_var=None): if root_var is None: root_var = np.random.choice(scope) else: if root_var not in scope: raise RootVarError() root_id = scope.index(root_var) mst = minimum_spanning_tree(-(mut_info + 1)) dfs_tree = depth_first_order(mst, directed=False, i_start=root_id) tree = dfs_tree[1].tolist() tree[root_id] = ROOT dtree_nodes = {var_id: DTreeNode(var_id) for var_id in scope} parents = np.arange(mut_info.shape[0]).tolist() parents.remove(root_id) for p in parents: dtree_nodes[scope[p]].set_parent(dtree_nodes[scope[tree[p]]]) dtree_nodes[scope[root_id]].set_tree(scope, tree) return dtree_nodes[scope[root_id]] def learn_cltree(data, scope, root_var, alpha): if root_var not in scope: raise RootVarError() probs, j_probs = compute_probs(data, alpha) mut_info = compute_mi_by_probs(probs, j_probs, np.log(j_probs)) dtree_root = build_dependency_tree_from_mi(mut_info, scope, root_var) factors = compute_factors(probs, j_probs, dtree_root.get_tree()[1]) factors_dict = {var: factors[scope.index(var)] for var in scope} return dtree_root, factors_dict def create_factors_dict(data, dtree_root, alpha): probs, j_probs = compute_probs(data, alpha) scope = dtree_root.get_tree()[0] tree = dtree_root.get_tree()[1] factors = compute_factors(probs, j_probs, tree) factors_dict = {var: factors[scope.index(var)] for var in scope} return factors_dict def create_dtree_dict(data_l, cl_parts_l, conj_vars_l, alpha): n_vars = data_l[0].shape[1] sum_mut_info = np.zeros((n_vars, n_vars)) for i in range(len(data_l)): for part in cl_parts_l[i]: mi = compute_mi(part.get_slice(data_l[i]), alpha) sum_mut_info[part.col_ids[:, None], part.col_ids] += mi # # create a dependency tree for each scope in scopes free_vars = list(set(np.arange(n_vars))-set([var for conj_vars in conj_vars_l for var in conj_vars])) if free_vars: scopes = conj_vars_l + [free_vars] else: scopes = conj_vars_l dtrees = [] for scope in scopes: dtrees.append(build_dependency_tree_from_mi(sum_mut_info[scope][:, scope], scope)) scope = dtrees[-1].get_tree()[0].copy() tree = dtrees[-1].get_tree()[1].copy() # # concatenate dtrees for k in reversed(range(0, len(dtrees) - 1)): tree += [t + len(scope) if t != ROOT else t for t in dtrees[k].get_tree()[1]] tree[tree.index(ROOT)] = tree.index(ROOT, len(scope)) scope += dtrees[k].get_tree()[0] dtrees[k].set_tree(scope.copy(), tree.copy()) dtrees[k + 1].set_parent(dtrees[k]) # # return a dictionary of dtrees where keys are scope lengths return {len(dtree.get_tree()[0]): dtree for dtree in dtrees} def is_structured_decomposable(spn, verbose=False): if not is_consistent(spn): return False nodes = get_nodes_by_type(spn) scope_set = set() for n in nodes: if isinstance(n, Product): scope_set.add(tuple(n.scope)) elif isinstance(n, CLTree): vtree = from_dtree_to_vtree(n.dtree_root) scope_set.update([tuple(s) for s in vtree.scopes]) scopes = list(scope_set) scopes = [set(t) for t in scopes] # # ordering is not needed, but useful for printing if verbose: scopes.sort(key=len) for s in scopes: print(s) for i in range(len(scopes)): for j in range(len(scopes)): int_len = len(scopes[i].intersection(scopes[j])) if int_len != 0 and int_len != min(len(scopes[i]), len(scopes[j])): return False return True def circuit_size(spn): nodes = get_nodes_by_type(spn) size = 0 for n in nodes: if isinstance(n, Product) or isinstance(n, Sum): size += len(n.children) elif isinstance(n, CLTree): queue = [n.dtree_root] clt_size = 0 while queue: peek_node = queue.pop(0) queue.extend(peek_node.children) if not peek_node.is_leaf(): clt_size += (1 + len(peek_node.children)) * 2 clt_size += 4 size += clt_size - 2 return size def from_dtree_to_vtree(dtree_root): if len(dtree_root.get_tree()[0]) == 1: vtree_root = VTreeNode(var_id=dtree_root.get_tree()[0][0]) vtree_root.set_scopes([dtree_root.get_tree()[0]]) return vtree_root scopes = [] scopes_t = [] last_node_visited = None dtree_stack = [dtree_root] buffer = [] while dtree_stack: peek_dnode = dtree_stack[-1] if not peek_dnode.children or (last_node_visited in peek_dnode.children): if not peek_dnode.is_leaf(): leaves = [] n_parents = 0 for child in peek_dnode.children: if child.is_leaf(): leaves.append(VTreeNode(child.var_id)) else: n_parents += 1 if n_parents: temp_buffer = buffer[-n_parents:] del buffer[-n_parents:] else: temp_buffer = [] vtree_root = VTreeNode(children=leaves + temp_buffer + [VTreeNode(peek_dnode.var_id)]) # # this piece of code store all the scopes of the vtree # ------ scope = [] n_inner_children = 0 for n in vtree_root.children: if not n.is_inner(): scope.append(n.var_id) else: n_inner_children += 1 if n_inner_children: prev_scopes = scopes_t[-n_inner_children:] del scopes_t[-n_inner_children:] scope.extend([v for s in prev_scopes for v in s]) scopes_t.append(scope) scopes.append(scope) # ------ buffer.append(vtree_root) dtree_stack.pop() last_node_visited = peek_dnode else: dtree_stack.extend(peek_dnode.children) vtree_root.set_scopes(scopes) return vtree_root
27.627249
105
0.602773
[ "Apache-2.0" ]
gengala/Random-Probabilistic-Circuits
utils.py
10,747
Python
import hashlib import json from datetime import datetime, timedelta import wordai.models as models from flask import Blueprint from flask_jwt_extended import (JWTManager, create_access_token, create_refresh_token, get_jwt_identity, get_raw_jwt, jwt_refresh_token_required, jwt_required) from flask_restful import Api, Resource, abort, reqparse, request from jsonschema import validate blueprint = Blueprint('profile', __name__, template_folder='templates', static_folder='static') api = Api(blueprint) class api_register(object): def __init__(self, path): self.path = path def __call__(self, cls): api.add_resource(cls, self.path) return cls def admin_required(f): def __inner__(self, *args, **kwargs): identify = get_jwt_identity() user = models.User.find_by_username(identify) if user and user.role == 'admin': return f(self, user, *args, **kwargs) return { 'message': 'Not found', }, 404 return jwt_required(__inner__) def user_required(f): def __inner__(self, *args, **kwargs): identify = get_jwt_identity() user = models.User.find_by_username(identify) if user and user.role in ['admin', 'user'] : return f(self, user, *args, **kwargs) return { 'message': 'Not found', }, 404 return jwt_required(__inner__) user_parser = reqparse.RequestParser() user_parser.add_argument('username', help='This username cannot be blank', required=True) user_parser.add_argument('password', help='This password cannot be blank', required=True) @api_register("/registration") class UserRegistration(Resource): def post(self): return {'message': 'User registration'} @api_register("/login") class UserLogin(Resource): def post(self): data = user_parser.parse_args() current_user = models.User.check_user(data['username'], data['password']) if not current_user: abort(401) return { 'message': 'User {} doesn\'t exist'.format(data['username']), } access_token = create_access_token(identity=data['username']) refresh_token = create_refresh_token(identity=data['username']) return { 'message': 'Logged in as {}'.format(current_user.username), 'role': current_user.role, 'access_token': access_token, 'refresh_token': refresh_token } @api_register("/token/refresh") class TokenRefresh(Resource): @jwt_refresh_token_required def post(self): current_user = get_jwt_identity() if current_user: access_token = create_access_token(identity=current_user) return { 'access_token': access_token} abort(401) return {'message': 'invalid refresh token'} @api_register("/wordlist") class WordListList(Resource): @user_required def get(self, user): return [json.loads(x.to_json()) for x in user.wordlists()] @user_required def put(self, user): schema = { "type": "array", "items": {"type": "string"}, "uniqueItems": True } try: body = request.json validate(instance=body, schema=schema) wordok, not_has, wnot_has = models.WordList.check_word(*body) defines = models.Word.search_words(*wordok) return { "defines": {w['word']: w for w in json.loads(defines.to_json())}, "not_dict": wnot_has, "not_sentence": not_has, } except Exception as err: return { "message": "invalid request body", "error": str(err) }, 422 @user_required def post(self, user): schema = { "type": "object", "properties": { "name": {"type": "string"}, "description": {"type": "string"}, "words": { "type": "array", "items": {"type": "string"}, "uniqueItems": True } } } try: body = request.json validate(instance=body, schema=schema) wordok, not_has, wnot_has = models.WordList.check_word(*body['words']) body['words'] = list(wordok) wordlist = models.WordList(**body) wordlist.user = user wordlist.save() return { "message": "ok", "has": list(wordok), "not_dict": wnot_has, "not_sentence": not_has, } except Exception as err: return { "message": "invalid request body", "error": str(err) }, 422 @api_register("/wordlist/<string:lid>") class WordListItem(Resource): @user_required def get(self, user, lid): print(lid) return json.loads(user.wordlists().filter(id=lid).first().to_json()) @user_required def put(self, user, lid): wordlist = models.WordList.objects(user=user, id=lid).first() if not wordlist: return { "message": "wordlist not exists", }, 404 schema = { "type": "object", "properties": { "name": {"type": "string"}, "description": {"type": "string"}, "words": { "type": "array", "items": {"type": "string"}, "uniqueItems": True } } } try: body = request.json validate(instance=body, schema=schema) wordok, not_has, wnot_has = models.WordList.check_word(*body['words']) wordlist.words = wordok wordlist.name = body['name'] wordlist.description = body['description'] wordlist.user = user wordlist.save() return { "message": "ok", "has": list(wordok), "not_dict": wnot_has, "not_sentence": not_has, } except Exception as err: return { "message": "invalid request body", "error": str(err) }, 422 @user_required def delete(self, user, lid): wordlist = models.WordList.objects(user=user, id=lid).first() if not wordlist: return { "message": "wordlist not exists", }, 404 wordlist.delete() @api_register("/user/wordlist") class UserWordList(Resource): @user_required def get(self, user): if not user.wordlist: return { "message", "wordlist not set" }, 404 data = json.loads(user.wordlist.to_json()) return { "message": "ok", "wordlist": data['id'], "wordlist_name": data['name'] } @user_required def post(self, user): parser = reqparse.RequestParser() parser.add_argument('wordlist', help='This wordlist cannot be blank', required=True) wordlist_id = parser.parse_args() wordlist = models.WordList.objects(id=wordlist_id['wordlist']).first() user.wordlist = wordlist user.save() return { "message": "ok", "wordlist": wordlist.name } @api_register("/learn/word") class LearnNext(Resource): @user_required def get(self, user): ex = user.next_exercise() if ex: sentence_id = json.loads(ex.sentence.to_json())['id'] word_id = json.loads(ex.word.to_json())['id'] return { "id": word_id, "word": ex.word.word, "message": "ok", "cloze": ex.cloze, "cn": ex.sentence.chn, "sid": sentence_id, "answers": [a for a in ex.answers], "check": [hashlib.sha1((a+sentence_id+word_id).encode()).hexdigest() for a in ex.answers] } else: return { "message": "no word need exercise" }, 404 @user_required def post(self, user): parser = reqparse.RequestParser() parser.add_argument('id', help='This answers cannot be blank', required=True) parser.add_argument('sid', help='This answers cannot be blank', required=True) parser.add_argument('answers', help='This answers cannot be blank', required=True,action='append') parser.add_argument('check', help='This answer_check cannot be blank', required=True, action='append') data = parser.parse_args() word_id = data['id'] word = models.Word.objects(id=word_id).first() if not word: return { "message": "word not exist" }, 404 sentence_id = data['sid'] answers = data['answers'] check = data['check'] check_res = [hashlib.sha1((a+sentence_id+word_id).encode()).hexdigest() for a in answers] result = check == check_res slog = models.SentenceLog(sentence=sentence_id, result=result, time=datetime.utcnow()) models.ExerciseLog.objects(user=user, word=word).update_one( push__sentences=slog, wordname=word.word, upsert=True) log = models.ExerciseLog.objects(user=user, word=word).first() log.calucate_review() log.save() return { "message": "ok", "result": result, } @api_register("/dictionary/<string:word>") class Dictionary(Resource): @user_required def get(self, user, word): define = models.Word.objects(word=word).first() if define: return json.loads(define.to_json()) else: return {"message": "not found"}, 404 @api_register("/wordlist/learned") class WordlistLearned(Resource): @user_required def get(self, user): words = user.wordlist.user_learned(user).only("wordname", "review") return json.loads(words.to_json()) @api_register("/wordlist/to_learn") class WordlistToLearn(Resource): @user_required def get(self, user): words = user.wordlist.user_to_learn(user) return words @api_register("/statistic/learn") class StatisticLearn(Resource): @user_required def get(self, user): return { 'exercise': models.ExerciseLog.exercise_count( user, datetime.now()-timedelta(days=7), datetime.now()+timedelta(days=7) ), 'review': models.ExerciseLog.review_count( user, datetime.now()-timedelta(days=7), datetime.now()+timedelta(days=7) ) }
33.101493
110
0.546307
[ "MIT" ]
archichen/wordai
wordai/api/apis.py
11,089
Python
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import argparse import os import glob import numpy as np import PIL.Image as Image parser = argparse.ArgumentParser() parser.add_argument('--out_dir', type=str, required=True, help='directory to store the image with noise') parser.add_argument('--image_path', type=str, required=True, help='directory of image to add noise') parser.add_argument('--channel', type=int, default=3 , help='image channel, 3 for color, 1 for gray') parser.add_argument('--sigma', type=int, default=15, help='level of noise') args = parser.parse_args() def add_noise(out_dir, image_path, channel, sigma): file_list = glob.glob(image_path+'*') # image_path must end by '/' if not os.path.exists(out_dir): os.makedirs(out_dir) for file in file_list: print("Adding noise to: ", file) # read image if channel == 3: img_clean = np.array(Image.open(file), dtype='float32') / 255.0 else: img_clean = np.expand_dims(np.array(Image.open(file).convert('L'), dtype='float32') / 255.0, axis=2) np.random.seed(0) #obtain the same random data when it is in the test phase img_test = img_clean + np.random.normal(0, sigma/255.0, img_clean.shape).astype(np.float32)#HWC img_test = np.expand_dims(img_test.transpose((2, 0, 1)), 0)#NCHW #img_test = np.clip(img_test, 0, 1) filename = file.split('/')[-1].split('.')[0] # get the name of image file img_test.tofile(os.path.join(out_dir, filename+'_noise.bin')) if __name__ == "__main__": add_noise(args.out_dir, args.image_path, args.channel, args.sigma)
43.074074
112
0.656492
[ "Apache-2.0" ]
Li-kewei/models
official/cv/brdnet/preprocess.py
2,411
Python
from __future__ import absolute_import # Copyright (c) 2010-2019 openpyxl import pytest from io import BytesIO from zipfile import ZipFile from openpyxl.xml.functions import fromstring, tostring from openpyxl.tests.helper import compare_xml from ..manifest import WORKSHEET_TYPE @pytest.fixture def FileExtension(): from ..manifest import FileExtension return FileExtension class TestFileExtension: def test_ctor(self, FileExtension): ext = FileExtension( ContentType="application/xml", Extension="xml" ) xml = tostring(ext.to_tree()) expected = """ <Default ContentType="application/xml" Extension="xml"/> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_from_xml(self, FileExtension): src = """ <Default ContentType="application/xml" Extension="xml"/> """ node = fromstring(src) ext = FileExtension.from_tree(node) assert ext == FileExtension(ContentType="application/xml", Extension="xml") @pytest.fixture def Override(): from ..manifest import Override return Override class TestOverride: def test_ctor(self, Override): override = Override( ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml", PartName="/xl/workbook.xml" ) xml = tostring(override.to_tree()) expected = """ <Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml" PartName="/xl/workbook.xml"/> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_from_xml(self, Override): src = """ <Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml" PartName="/xl/workbook.xml"/> """ node = fromstring(src) override = Override.from_tree(node) assert override == Override( ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml", PartName="/xl/workbook.xml" ) @pytest.fixture def Manifest(): from ..manifest import Manifest return Manifest class TestManifest: def test_ctor(self, Manifest): manifest = Manifest() xml = tostring(manifest.to_tree()) expected = """ <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"> <Default ContentType="application/vnd.openxmlformats-package.relationships+xml" Extension="rels" /> <Default ContentType="application/xml" Extension="xml" /> <Override ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml" PartName="/xl/styles.xml"/> <Override ContentType="application/vnd.openxmlformats-officedocument.theme+xml" PartName="/xl/theme/theme1.xml"/> <Override ContentType="application/vnd.openxmlformats-package.core-properties+xml" PartName="/docProps/core.xml"/> <Override ContentType="application/vnd.openxmlformats-officedocument.extended-properties+xml" PartName="/docProps/app.xml"/> </Types> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_from_xml(self, datadir, Manifest): datadir.chdir() with open("manifest.xml") as src: node = fromstring(src.read()) manifest = Manifest.from_tree(node) assert len(manifest.Default) == 2 defaults = [ ("application/xml", 'xml'), ("application/vnd.openxmlformats-package.relationships+xml", 'rels'), ] assert [(ct.ContentType, ct.Extension) for ct in manifest.Default] == defaults overrides = [ ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml', '/xl/workbook.xml'), ('application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml', '/xl/worksheets/sheet1.xml'), ('application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml', '/xl/chartsheets/sheet1.xml'), ('application/vnd.openxmlformats-officedocument.theme+xml', '/xl/theme/theme1.xml'), ('application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml', '/xl/styles.xml'), ('application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml', '/xl/sharedStrings.xml'), ('application/vnd.openxmlformats-officedocument.drawing+xml', '/xl/drawings/drawing1.xml'), ('application/vnd.openxmlformats-officedocument.drawingml.chart+xml', '/xl/charts/chart1.xml'), ('application/vnd.openxmlformats-package.core-properties+xml', '/docProps/core.xml'), ('application/vnd.openxmlformats-officedocument.extended-properties+xml', '/docProps/app.xml') ] assert [(ct.ContentType, ct.PartName) for ct in manifest.Override] == overrides def test_filenames(self, datadir, Manifest): datadir.chdir() with open("manifest.xml") as src: node = fromstring(src.read()) manifest = Manifest.from_tree(node) assert manifest.filenames == [ '/xl/workbook.xml', '/xl/worksheets/sheet1.xml', '/xl/chartsheets/sheet1.xml', '/xl/theme/theme1.xml', '/xl/styles.xml', '/xl/sharedStrings.xml', '/xl/drawings/drawing1.xml', '/xl/charts/chart1.xml', '/docProps/core.xml', '/docProps/app.xml', ] def test_exts(self, datadir, Manifest): datadir.chdir() with open("manifest.xml") as src: node = fromstring(src.read()) manifest = Manifest.from_tree(node) assert manifest.extensions == [ ('xml', 'application/xml'), ] def test_no_dupe_overrides(self, Manifest): manifest = Manifest() assert len(manifest.Override) == 4 manifest.Override.append("a") manifest.Override.append("a") assert len(manifest.Override) == 5 def test_no_dupe_types(self, Manifest): manifest = Manifest() assert len(manifest.Default) == 2 manifest.Default.append("a") manifest.Default.append("a") assert len(manifest.Default) == 3 def test_append(self, Manifest): from openpyxl import Workbook wb = Workbook() ws = wb.active manifest = Manifest() manifest.append(ws) assert len(manifest.Override) == 5 def test_write(self, Manifest): mf = Manifest() from openpyxl import Workbook wb = Workbook() archive = ZipFile(BytesIO(), "w") mf._write(archive, wb) assert "/xl/workbook.xml" in mf.filenames @pytest.mark.parametrize("file, registration", [ ('xl/media/image1.png', '<Default ContentType="image/png" Extension="png" />'), ('xl/drawings/commentsDrawing.vml', '<Default ContentType="application/vnd.openxmlformats-officedocument.vmlDrawing" Extension="vml" />'), ] ) def test_media(self, Manifest, file, registration): from openpyxl import Workbook wb = Workbook() manifest = Manifest() manifest._register_mimetypes([file]) xml = tostring(manifest.Default[-1].to_tree()) diff = compare_xml(xml, registration) assert diff is None, diff def test_vba(self, datadir, Manifest): datadir.chdir() from openpyxl import load_workbook wb = load_workbook('sample.xlsm', keep_vba=True) manifest = Manifest() manifest._write_vba(wb) partnames = set([t.PartName for t in manifest.Override]) expected = set([ '/xl/workbook.xml', '/xl/worksheets/sheet1.xml', '/xl/worksheets/sheet2.xml', '/xl/worksheets/sheet3.xml', '/xl/theme/theme1.xml', '/xl/styles.xml', '/docProps/core.xml', '/docProps/app.xml', ]) assert partnames == expected def test_no_defaults(self, Manifest): """ LibreOffice does not use the Default element """ xml = """ <Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"> <Override PartName="/_rels/.rels" ContentType="application/vnd.openxmlformats-package.relationships+xml"/> </Types> """ node = fromstring(xml) manifest = Manifest.from_tree(node) exts = manifest.extensions assert exts == [] def test_find(self, datadir, Manifest): datadir.chdir() with open("manifest.xml", "rb") as src: xml = src.read() tree = fromstring(xml) manifest = Manifest.from_tree(tree) ws = manifest.find(WORKSHEET_TYPE) assert ws.PartName == "/xl/worksheets/sheet1.xml" def test_find_none(self, Manifest): manifest = Manifest() assert manifest.find(WORKSHEET_TYPE) is None def test_findall(self, datadir, Manifest): datadir.chdir() with open("manifest.xml", "rb") as src: xml = src.read() tree = fromstring(xml) manifest = Manifest.from_tree(tree) sheets = manifest.findall(WORKSHEET_TYPE) assert len(list(sheets)) == 1
34.188153
135
0.603139
[ "MIT" ]
chenc2/openpyxl
openpyxl/packaging/tests/test_manifest.py
9,812
Python
from .engine_input import EngineInput from ..announcements import gen_victim_prefix_ann class ValidPrefix(EngineInput): __slots__ = () def _get_announcements(self, **extra_ann_kwargs): return [gen_victim_prefix_ann(self.AnnCls, self.victim_asn, **extra_ann_kwargs)]
30.083333
58
0.612188
[ "BSD-3-Clause" ]
jfuruness/lib_bgp_simulator
lib_bgp_simulator/engine_input/valid_prefix.py
361
Python
import asyncio import logging import ssl import time import traceback from ipaddress import IPv6Address, ip_address, ip_network, IPv4Network, IPv6Network from pathlib import Path from secrets import token_bytes from typing import Any, Callable, Dict, List, Optional, Union, Set, Tuple from aiohttp import ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, client_exceptions, web from aiohttp.web_app import Application from aiohttp.web_runner import TCPSite from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from greenberry.protocols.protocol_message_types import ProtocolMessageTypes from greenberry.protocols.shared_protocol import protocol_version from greenberry.server.introducer_peers import IntroducerPeers from greenberry.server.outbound_message import Message, NodeType from greenberry.server.ssl_context import private_ssl_paths, public_ssl_paths from greenberry.server.ws_connection import WSGreenBerryConnection from greenberry.types.blockchain_format.sized_bytes import bytes32 from greenberry.types.peer_info import PeerInfo from greenberry.util.errors import Err, ProtocolError from greenberry.util.ints import uint16 from greenberry.util.network import is_localhost, is_in_network def ssl_context_for_server( ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path ) -> Optional[ssl.SSLContext]: ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert)) ssl_context.check_hostname = False ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path)) ssl_context.verify_mode = ssl.CERT_REQUIRED return ssl_context def ssl_context_for_root( ca_cert_file: str, ) -> Optional[ssl.SSLContext]: ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_cert_file) return ssl_context def ssl_context_for_client( ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path, ) -> Optional[ssl.SSLContext]: ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert)) ssl_context.check_hostname = False ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path)) ssl_context.verify_mode = ssl.CERT_REQUIRED return ssl_context class GreenBerryServer: def __init__( self, port: int, node: Any, api: Any, local_type: NodeType, ping_interval: int, network_id: str, inbound_rate_limit_percent: int, outbound_rate_limit_percent: int, root_path: Path, config: Dict, private_ca_crt_key: Tuple[Path, Path], greenberry_ca_crt_key: Tuple[Path, Path], name: str = None, introducer_peers: Optional[IntroducerPeers] = None, ): # Keeps track of all connections to and from this node. logging.basicConfig(level=logging.DEBUG) self.all_connections: Dict[bytes32, WSGreenBerryConnection] = {} self.tasks: Set[asyncio.Task] = set() self.connection_by_type: Dict[NodeType, Dict[bytes32, WSGreenBerryConnection]] = { NodeType.FULL_NODE: {}, NodeType.WALLET: {}, NodeType.HARVESTER: {}, NodeType.FARMER: {}, NodeType.TIMELORD: {}, NodeType.INTRODUCER: {}, } self._port = port # TCP port to identify our node self._local_type: NodeType = local_type self._ping_interval = ping_interval self._network_id = network_id self._inbound_rate_limit_percent = inbound_rate_limit_percent self._outbound_rate_limit_percent = outbound_rate_limit_percent # Task list to keep references to tasks, so they don't get GCd self._tasks: List[asyncio.Task] = [] if name: self.log = logging.getLogger(name) else: self.log = logging.getLogger(__name__) # Our unique random node id that we will send to other peers, regenerated on launch self.api = api self.node = node self.root_path = root_path self.config = config self.on_connect: Optional[Callable] = None self.incoming_messages: asyncio.Queue = asyncio.Queue() self.shut_down_event = asyncio.Event() if self._local_type is NodeType.INTRODUCER: self.introducer_peers = IntroducerPeers() if self._local_type is not NodeType.INTRODUCER: self._private_cert_path, self._private_key_path = private_ssl_paths(root_path, config) if self._local_type is not NodeType.HARVESTER: self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(root_path, config) else: self.p2p_crt_path, self.p2p_key_path = None, None self.ca_private_crt_path, self.ca_private_key_path = private_ca_crt_key self.greenberry_ca_crt_path, self.greenberry_ca_key_path = greenberry_ca_crt_key self.node_id = self.my_id() self.incoming_task = asyncio.create_task(self.incoming_api_task()) self.gc_task: asyncio.Task = asyncio.create_task(self.garbage_collect_connections_task()) self.app: Optional[Application] = None self.runner: Optional[web.AppRunner] = None self.site: Optional[TCPSite] = None self.connection_close_task: Optional[asyncio.Task] = None self.site_shutdown_task: Optional[asyncio.Task] = None self.app_shut_down_task: Optional[asyncio.Task] = None self.received_message_callback: Optional[Callable] = None self.api_tasks: Dict[bytes32, asyncio.Task] = {} self.execute_tasks: Set[bytes32] = set() self.tasks_from_peer: Dict[bytes32, Set[bytes32]] = {} self.banned_peers: Dict[str, float] = {} self.invalid_protocol_ban_seconds = 10 self.api_exception_ban_seconds = 10 self.exempt_peer_networks: List[Union[IPv4Network, IPv6Network]] = [ ip_network(net, strict=False) for net in config.get("exempt_peer_networks", []) ] def my_id(self) -> bytes32: """If node has public cert use that one for id, if not use private.""" if self.p2p_crt_path is not None: pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend()) else: pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend()) der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER) der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend()) return bytes32(der_cert.fingerprint(hashes.SHA256())) def set_received_message_callback(self, callback: Callable): self.received_message_callback = callback async def garbage_collect_connections_task(self) -> None: """ Periodically checks for connections with no activity (have not sent us any data), and removes them, to allow room for other peers. """ while True: await asyncio.sleep(600) to_remove: List[WSGreenBerryConnection] = [] for connection in self.all_connections.values(): if self._local_type == NodeType.FULL_NODE and connection.connection_type == NodeType.FULL_NODE: if time.time() - connection.last_message_time > 1800: to_remove.append(connection) for connection in to_remove: self.log.debug(f"Garbage collecting connection {connection.peer_host} due to inactivity") await connection.close() # Also garbage collect banned_peers dict to_remove_ban = [] for peer_ip, ban_until_time in self.banned_peers.items(): if time.time() > ban_until_time: to_remove_ban.append(peer_ip) for peer_ip in to_remove_ban: del self.banned_peers[peer_ip] async def start_server(self, on_connect: Callable = None): if self._local_type in [NodeType.WALLET, NodeType.HARVESTER, NodeType.TIMELORD]: return None self.app = web.Application() self.on_connect = on_connect routes = [ web.get("/ws", self.incoming_connection), ] self.app.add_routes(routes) self.runner = web.AppRunner(self.app, access_log=None, logger=self.log) await self.runner.setup() authenticate = self._local_type not in (NodeType.FULL_NODE, NodeType.INTRODUCER) if authenticate: ssl_context = ssl_context_for_server( self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path ) else: self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(self.root_path, self.config) ssl_context = ssl_context_for_server( self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path ) self.site = web.TCPSite( self.runner, port=self._port, shutdown_timeout=3, ssl_context=ssl_context, ) await self.site.start() self.log.info(f"Started listening on port: {self._port}") async def incoming_connection(self, request): if request.remote in self.banned_peers and time.time() < self.banned_peers[request.remote]: self.log.warning(f"Peer {request.remote} is banned, refusing connection") return None ws = web.WebSocketResponse(max_msg_size=50 * 1024 * 1024) await ws.prepare(request) close_event = asyncio.Event() cert_bytes = request.transport._ssl_protocol._extra["ssl_object"].getpeercert(True) der_cert = x509.load_der_x509_certificate(cert_bytes) peer_id = bytes32(der_cert.fingerprint(hashes.SHA256())) if peer_id == self.node_id: return ws connection: Optional[WSGreenBerryConnection] = None try: connection = WSGreenBerryConnection( self._local_type, ws, self._port, self.log, False, False, request.remote, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, close_event, ) handshake = await connection.perform_handshake( self._network_id, protocol_version, self._port, self._local_type, ) assert handshake is True # Limit inbound connections to config's specifications. if not self.accept_inbound_connections(connection.connection_type) and not is_in_network( connection.peer_host, self.exempt_peer_networks ): self.log.info(f"Not accepting inbound connection: {connection.get_peer_info()}.Inbound limit reached.") await connection.close() close_event.set() else: await self.connection_added(connection, self.on_connect) if self._local_type is NodeType.INTRODUCER and connection.connection_type is NodeType.FULL_NODE: self.introducer_peers.add(connection.get_peer_info()) except ProtocolError as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code) if e.code == Err.INVALID_HANDSHAKE: self.log.warning("Invalid handshake with peer. Maybe the peer is running old software.") close_event.set() elif e.code == Err.INCOMPATIBLE_NETWORK_ID: self.log.warning("Incompatible network ID. Maybe the peer is on another network") close_event.set() elif e.code == Err.SELF_CONNECTION: close_event.set() else: error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") close_event.set() except Exception as e: if connection is not None: await connection.close(ws_close_code=WSCloseCode.PROTOCOL_ERROR, error=Err.UNKNOWN) error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") close_event.set() await close_event.wait() return ws async def connection_added(self, connection: WSGreenBerryConnection, on_connect=None): # If we already had a connection to this peer_id, close the old one. This is secure because peer_ids are based # on TLS public keys if connection.peer_node_id in self.all_connections: con = self.all_connections[connection.peer_node_id] await con.close() self.all_connections[connection.peer_node_id] = connection if connection.connection_type is not None: self.connection_by_type[connection.connection_type][connection.peer_node_id] = connection if on_connect is not None: await on_connect(connection) else: self.log.error(f"Invalid connection type for connection {connection}") def is_duplicate_or_self_connection(self, target_node: PeerInfo) -> bool: if is_localhost(target_node.host) and target_node.port == self._port: # Don't connect to self self.log.debug(f"Not connecting to {target_node}") return True for connection in self.all_connections.values(): if connection.host == target_node.host and connection.peer_server_port == target_node.port: self.log.debug(f"Not connecting to {target_node}, duplicate connection") return True return False async def start_client( self, target_node: PeerInfo, on_connect: Callable = None, auth: bool = False, is_feeler: bool = False, ) -> bool: """ Tries to connect to the target node, adding one connection into the pipeline, if successful. An on connect method can also be specified, and this will be saved into the instance variables. """ if self.is_duplicate_or_self_connection(target_node): return False if target_node.host in self.banned_peers and time.time() < self.banned_peers[target_node.host]: self.log.warning(f"Peer {target_node.host} is still banned, not connecting to it") return False if auth: ssl_context = ssl_context_for_client( self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path ) else: ssl_context = ssl_context_for_client( self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path ) session = None connection: Optional[WSGreenBerryConnection] = None try: timeout = ClientTimeout(total=30) session = ClientSession(timeout=timeout) try: if type(ip_address(target_node.host)) is IPv6Address: target_node = PeerInfo(f"[{target_node.host}]", target_node.port) except ValueError: pass url = f"wss://{target_node.host}:{target_node.port}/ws" self.log.debug(f"Connecting: {url}, Peer info: {target_node}") try: ws = await session.ws_connect( url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=50 * 1024 * 1024 ) except ServerDisconnectedError: self.log.debug(f"Server disconnected error connecting to {url}. Perhaps we are banned by the peer.") await session.close() return False except asyncio.TimeoutError: self.log.debug(f"Timeout error connecting to {url}") await session.close() return False if ws is not None: assert ws._response.connection is not None and ws._response.connection.transport is not None transport = ws._response.connection.transport # type: ignore cert_bytes = transport._ssl_protocol._extra["ssl_object"].getpeercert(True) # type: ignore der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend()) peer_id = bytes32(der_cert.fingerprint(hashes.SHA256())) if peer_id == self.node_id: raise RuntimeError(f"Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}") connection = WSGreenBerryConnection( self._local_type, ws, self._port, self.log, True, False, target_node.host, self.incoming_messages, self.connection_closed, peer_id, self._inbound_rate_limit_percent, self._outbound_rate_limit_percent, session=session, ) handshake = await connection.perform_handshake( self._network_id, protocol_version, self._port, self._local_type, ) assert handshake is True await self.connection_added(connection, on_connect) connection_type_str = "" if connection.connection_type is not None: connection_type_str = connection.connection_type.name.lower() self.log.info(f"Connected with {connection_type_str} {target_node}") if is_feeler: asyncio.create_task(connection.close()) return True else: await session.close() return False except client_exceptions.ClientConnectorError as e: self.log.info(f"{e}") except ProtocolError as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code) if e.code == Err.INVALID_HANDSHAKE: self.log.warning(f"Invalid handshake with peer {target_node}. Maybe the peer is running old software.") elif e.code == Err.INCOMPATIBLE_NETWORK_ID: self.log.warning("Incompatible network ID. Maybe the peer is on another network") elif e.code == Err.SELF_CONNECTION: pass else: error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") except Exception as e: if connection is not None: await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN) error_stack = traceback.format_exc() self.log.error(f"Exception {e}, exception Stack: {error_stack}") if session is not None: await session.close() return False def connection_closed(self, connection: WSGreenBerryConnection, ban_time: int): if is_localhost(connection.peer_host) and ban_time != 0: self.log.warning(f"Trying to ban localhost for {ban_time}, but will not ban") ban_time = 0 self.log.info(f"Connection closed: {connection.peer_host}, node id: {connection.peer_node_id}") if ban_time > 0: ban_until: float = time.time() + ban_time self.log.warning(f"Banning {connection.peer_host} for {ban_time} seconds") if connection.peer_host in self.banned_peers: if ban_until > self.banned_peers[connection.peer_host]: self.banned_peers[connection.peer_host] = ban_until else: self.banned_peers[connection.peer_host] = ban_until if connection.peer_node_id in self.all_connections: self.all_connections.pop(connection.peer_node_id) if connection.connection_type is not None: if connection.peer_node_id in self.connection_by_type[connection.connection_type]: self.connection_by_type[connection.connection_type].pop(connection.peer_node_id) else: # This means the handshake was enver finished with this peer self.log.debug( f"Invalid connection type for connection {connection.peer_host}," f" while closing. Handshake never finished." ) on_disconnect = getattr(self.node, "on_disconnect", None) if on_disconnect is not None: on_disconnect(connection) self.cancel_tasks_from_peer(connection.peer_node_id) def cancel_tasks_from_peer(self, peer_id: bytes32): if peer_id not in self.tasks_from_peer: return None task_ids = self.tasks_from_peer[peer_id] for task_id in task_ids: if task_id in self.execute_tasks: continue task = self.api_tasks[task_id] task.cancel() async def incoming_api_task(self) -> None: self.tasks = set() while True: payload_inc, connection_inc = await self.incoming_messages.get() if payload_inc is None or connection_inc is None: continue async def api_call(full_message: Message, connection: WSGreenBerryConnection, task_id): start_time = time.time() try: if self.received_message_callback is not None: await self.received_message_callback(connection) connection.log.debug( f"<- {ProtocolMessageTypes(full_message.type).name} from peer " f"{connection.peer_node_id} {connection.peer_host}" ) message_type: str = ProtocolMessageTypes(full_message.type).name f = getattr(self.api, message_type, None) if f is None: self.log.error(f"Non existing function: {message_type}") raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type]) if not hasattr(f, "api_function"): self.log.error(f"Peer trying to call non api function {message_type}") raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type]) # If api is not ready ignore the request if hasattr(self.api, "api_ready"): if self.api.api_ready is False: return None timeout: Optional[int] = 600 if hasattr(f, "execute_task"): # Don't timeout on methods with execute_task decorator, these need to run fully self.execute_tasks.add(task_id) timeout = None if hasattr(f, "peer_required"): coroutine = f(full_message.data, connection) else: coroutine = f(full_message.data) async def wrapped_coroutine() -> Optional[Message]: try: result = await coroutine return result except asyncio.CancelledError: pass except Exception as e: tb = traceback.format_exc() connection.log.error(f"Exception: {e}, {connection.get_peer_info()}. {tb}") raise e return None response: Optional[Message] = await asyncio.wait_for(wrapped_coroutine(), timeout=timeout) connection.log.debug( f"Time taken to process {message_type} from {connection.peer_node_id} is " f"{time.time() - start_time} seconds" ) if response is not None: response_message = Message(response.type, full_message.id, response.data) await connection.reply_to_request(response_message) except Exception as e: if self.connection_close_task is None: tb = traceback.format_exc() connection.log.error( f"Exception: {e} {type(e)}, closing connection {connection.get_peer_info()}. {tb}" ) else: connection.log.debug(f"Exception: {e} while closing connection") # TODO: actually throw one of the errors from errors.py and pass this to close await connection.close(self.api_exception_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN) finally: if task_id in self.api_tasks: self.api_tasks.pop(task_id) if task_id in self.tasks_from_peer[connection.peer_node_id]: self.tasks_from_peer[connection.peer_node_id].remove(task_id) if task_id in self.execute_tasks: self.execute_tasks.remove(task_id) task_id = token_bytes() api_task = asyncio.create_task(api_call(payload_inc, connection_inc, task_id)) self.api_tasks[task_id] = api_task if connection_inc.peer_node_id not in self.tasks_from_peer: self.tasks_from_peer[connection_inc.peer_node_id] = set() self.tasks_from_peer[connection_inc.peer_node_id].add(task_id) async def send_to_others( self, messages: List[Message], node_type: NodeType, origin_peer: WSGreenBerryConnection, ): for node_id, connection in self.all_connections.items(): if node_id == origin_peer.peer_node_id: continue if connection.connection_type is node_type: for message in messages: await connection.send_message(message) async def send_to_all(self, messages: List[Message], node_type: NodeType): for _, connection in self.all_connections.items(): if connection.connection_type is node_type: for message in messages: await connection.send_message(message) async def send_to_all_except(self, messages: List[Message], node_type: NodeType, exclude: bytes32): for _, connection in self.all_connections.items(): if connection.connection_type is node_type and connection.peer_node_id != exclude: for message in messages: await connection.send_message(message) async def send_to_specific(self, messages: List[Message], node_id: bytes32): if node_id in self.all_connections: connection = self.all_connections[node_id] for message in messages: await connection.send_message(message) def get_outgoing_connections(self) -> List[WSGreenBerryConnection]: result = [] for _, connection in self.all_connections.items(): if connection.is_outbound: result.append(connection) return result def get_full_node_outgoing_connections(self) -> List[WSGreenBerryConnection]: result = [] connections = self.get_full_node_connections() for connection in connections: if connection.is_outbound: result.append(connection) return result def get_full_node_connections(self) -> List[WSGreenBerryConnection]: return list(self.connection_by_type[NodeType.FULL_NODE].values()) def get_connections(self) -> List[WSGreenBerryConnection]: result = [] for _, connection in self.all_connections.items(): result.append(connection) return result async def close_all_connections(self) -> None: keys = [a for a, b in self.all_connections.items()] for node_id in keys: try: if node_id in self.all_connections: connection = self.all_connections[node_id] await connection.close() except Exception as e: self.log.error(f"Exception while closing connection {e}") def close_all(self) -> None: self.connection_close_task = asyncio.create_task(self.close_all_connections()) if self.runner is not None: self.site_shutdown_task = asyncio.create_task(self.runner.cleanup()) if self.app is not None: self.app_shut_down_task = asyncio.create_task(self.app.shutdown()) for task_id, task in self.api_tasks.items(): task.cancel() self.shut_down_event.set() self.incoming_task.cancel() self.gc_task.cancel() async def await_closed(self) -> None: self.log.debug("Await Closed") await self.shut_down_event.wait() if self.connection_close_task is not None: await self.connection_close_task if self.app_shut_down_task is not None: await self.app_shut_down_task if self.site_shutdown_task is not None: await self.site_shutdown_task async def get_peer_info(self) -> Optional[PeerInfo]: ip = None port = self._port try: async with ClientSession() as session: async with session.get("https://checkip.amazonaws.com/") as resp: if resp.status == 200: ip = str(await resp.text()) ip = ip.rstrip() except Exception: ip = None if ip is None: return None peer = PeerInfo(ip, uint16(port)) if not peer.is_valid(): return None return peer def accept_inbound_connections(self, node_type: NodeType) -> bool: if not self._local_type == NodeType.FULL_NODE: return True inbound_count = len([conn for _, conn in self.connection_by_type[node_type].items() if not conn.is_outbound]) if node_type == NodeType.FULL_NODE: return inbound_count < self.config["target_peer_count"] - self.config["target_outbound_peer_count"] if node_type == NodeType.WALLET: return inbound_count < self.config["max_inbound_wallet"] if node_type == NodeType.FARMER: return inbound_count < self.config["max_inbound_farmer"] if node_type == NodeType.TIMELORD: return inbound_count < self.config["max_inbound_timelord"] return True def is_trusted_peer(self, peer: WSGreenBerryConnection, trusted_peers: Dict) -> bool: if trusted_peers is None: return False for trusted_peer in trusted_peers: cert = self.root_path / trusted_peers[trusted_peer] pem_cert = x509.load_pem_x509_certificate(cert.read_bytes()) cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER) der_cert = x509.load_der_x509_certificate(cert_bytes) peer_id = bytes32(der_cert.fingerprint(hashes.SHA256())) if peer_id == peer.peer_node_id: self.log.debug(f"trusted node {peer.peer_node_id} {peer.peer_host}") return True return False
45.655319
119
0.623047
[ "Apache-2.0" ]
GreenBerry-Network/greenberry-blockchain
greenberry/server/server.py
32,187
Python
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo # (either separately or in combination, "QuantumBlack Trademarks") are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. """ Tools to learn a ``StructureModel`` which describes the conditional dependencies between variables in a dataset. """ import logging from copy import deepcopy from typing import Dict, Iterable, List, Tuple, Union import numpy as np import pandas as pd from sklearn.utils import check_array from causalnex.structure.pytorch.core import NotearsMLP from causalnex.structure.pytorch.dist_type import DistTypeContinuous, dist_type_aliases from causalnex.structure.structuremodel import StructureModel __all__ = ["from_numpy", "from_pandas"] # pylint: disable=too-many-locals # pylint: disable=too-many-arguments def from_numpy( X: np.ndarray, dist_type_schema: Dict[int, str] = None, lasso_beta: float = 0.0, ridge_beta: float = 0.0, use_bias: bool = False, hidden_layer_units: Iterable[int] = None, w_threshold: float = None, max_iter: int = 100, tabu_edges: List[Tuple[int, int]] = None, tabu_parent_nodes: List[int] = None, tabu_child_nodes: List[int] = None, **kwargs ) -> StructureModel: """ Learn the `StructureModel`, the graph structure with lasso regularisation describing conditional dependencies between variables in data presented as a numpy array. Based on DAGs with NO TEARS. @inproceedings{zheng2018dags, author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.}, booktitle = {Advances in Neural Information Processing Systems}, title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}}, year = {2018}, codebase = {https://github.com/xunzheng/notears} } Args: X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented. dist_type_schema: The dist type schema corresponding to the passed in data X. It maps the positional column in X to the string alias of a dist type. A list of alias names can be found in ``dist_type/__init__.py``. If None, assumes that all data in X is continuous. lasso_beta: Constant that multiplies the lasso term (l1 regularisation). NOTE when using nonlinearities, the l1 loss only applies to the dag_layer. use_bias: Whether to fit a bias parameter in the NOTEARS algorithm. ridge_beta: Constant that multiplies the ridge term (l2 regularisation). When using nonlinear layers use of this parameter is recommended. hidden_layer_units: An iterable where its length determine the number of layers used, and the numbers determine the number of nodes used for the layer in order. w_threshold: fixed threshold for absolute edge weights. max_iter: max number of dual ascent steps during optimisation. tabu_edges: list of edges(from, to) not to be included in the graph. tabu_parent_nodes: list of nodes banned from being a parent of any other nodes. tabu_child_nodes: list of nodes banned from being a child of any other nodes. **kwargs: additional arguments for NOTEARS MLP model Returns: StructureModel: a graph of conditional dependencies between data variables. Raises: ValueError: If X does not contain data. ValueError: If schema does not correspond to columns. """ # n examples, d properties if not X.size: raise ValueError("Input data X is empty, cannot learn any structure") logging.info("Learning structure using 'NOTEARS' optimisation.") # Check array for NaN or inf values check_array(X) if dist_type_schema is not None: # make sure that there is one provided key per column if set(range(X.shape[1])).symmetric_difference(set(dist_type_schema.keys())): raise ValueError( "Difference indices and expected indices. Got {} schema".format( dist_type_schema ) ) # if dist_type_schema is None, assume all columns are continuous, else ini dist_types = ( [DistTypeContinuous(idx=idx) for idx in np.arange(X.shape[1])] if dist_type_schema is None else [ dist_type_aliases[alias](idx=idx) for idx, alias in dist_type_schema.items() ] ) _, d = X.shape # if None or empty, convert into a list with single item if hidden_layer_units is None: hidden_layer_units = [0] elif isinstance(hidden_layer_units, list) and not hidden_layer_units: hidden_layer_units = [0] # if no hidden layer units, still take 1 iteration step with bounds hidden_layer_bnds = hidden_layer_units[0] if hidden_layer_units[0] else 1 # Flip i and j because Pytorch flattens the vector in another direction bnds = [ (0, 0) if i == j else (0, 0) if tabu_edges is not None and (i, j) in tabu_edges else (0, 0) if tabu_parent_nodes is not None and i in tabu_parent_nodes else (0, 0) if tabu_child_nodes is not None and j in tabu_child_nodes else (None, None) for j in range(d) for _ in range(hidden_layer_bnds) for i in range(d) ] model = NotearsMLP( n_features=d, dist_types=dist_types, hidden_layer_units=hidden_layer_units, lasso_beta=lasso_beta, ridge_beta=ridge_beta, bounds=bnds, use_bias=use_bias, **kwargs ) model.fit(X, max_iter=max_iter) sm = StructureModel(model.adj) if w_threshold: sm.remove_edges_below_threshold(w_threshold) mean_effect = model.adj_mean_effect # extract the mean effect and add as edge attribute for u, v, edge_dict in sm.edges.data(True): sm.add_edge( u, v, origin="learned", weight=edge_dict["weight"], mean_effect=mean_effect[u, v], ) # set bias as node attribute bias = model.bias for node in sm.nodes(): value = None if bias is not None: value = bias[node] sm.nodes[node]["bias"] = value for dist_type in dist_types: # attach each dist_type object to corresponding node sm.nodes[dist_type.idx]["dist_type"] = dist_type # preserve the structure_learner as a graph attribute sm.graph["structure_learner"] = model return sm # pylint: disable=too-many-locals # pylint: disable=too-many-arguments def from_pandas( X: pd.DataFrame, dist_type_schema: Dict[Union[str, int], str] = None, lasso_beta: float = 0.0, ridge_beta: float = 0.0, use_bias: bool = False, hidden_layer_units: Iterable[int] = None, max_iter: int = 100, w_threshold: float = None, tabu_edges: List[Tuple[str, str]] = None, tabu_parent_nodes: List[str] = None, tabu_child_nodes: List[str] = None, **kwargs ) -> StructureModel: """ Learn the `StructureModel`, the graph structure describing conditional dependencies between variables in data presented as a pandas dataframe. The optimisation is to minimise a score function :math:`F(W)` over the graph's weighted adjacency matrix, :math:`W`, subject to the a constraint function :math:`h(W)`, where :math:`h(W) == 0` characterises an acyclic graph. :math:`h(W) > 0` is a continuous, differentiable function that encapsulated how acyclic the graph is (less == more acyclic). Full details of this approach to structure learning are provided in the publication: Based on DAGs with NO TEARS. @inproceedings{zheng2018dags, author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P.}, booktitle = {Advances in Neural Information Processing Systems}, title = {{DAGs with NO TEARS: Continuous Optimization for Structure Learning}}, year = {2018}, codebase = {https://github.com/xunzheng/notears} } Args: X: 2d input data, axis=0 is data rows, axis=1 is data columns. Data must be row oriented. dist_type_schema: The dist type schema corresponding to the passed in data X. It maps the pandas column name in X to the string alias of a dist type. A list of alias names can be found in ``dist_type/__init__.py``. If None, assumes that all data in X is continuous. lasso_beta: Constant that multiplies the lasso term (l1 regularisation). NOTE when using nonlinearities, the l1 loss only applies to the dag_layer. use_bias: Whether to fit a bias parameter in the NOTEARS algorithm. ridge_beta: Constant that multiplies the ridge term (l2 regularisation). When using nonlinear layers use of this parameter is recommended. hidden_layer_units: An iterable where its length determine the number of layers used, and the numbers determine the number of nodes used for the layer in order. w_threshold: fixed threshold for absolute edge weights. max_iter: max number of dual ascent steps during optimisation. tabu_edges: list of edges(from, to) not to be included in the graph. tabu_parent_nodes: list of nodes banned from being a parent of any other nodes. tabu_child_nodes: list of nodes banned from being a child of any other nodes. **kwargs: additional arguments for NOTEARS MLP model Returns: StructureModel: graph of conditional dependencies between data variables. Raises: ValueError: If X does not contain data. """ data = deepcopy(X) # if dist_type_schema is not None, convert dist_type_schema from cols to idx dist_type_schema = ( dist_type_schema if dist_type_schema is None else {X.columns.get_loc(col): alias for col, alias in dist_type_schema.items()} ) non_numeric_cols = data.select_dtypes(exclude="number").columns if len(non_numeric_cols) > 0: raise ValueError( "All columns must have numeric data. " "Consider mapping the following columns to int {non_numeric_cols}".format( non_numeric_cols=non_numeric_cols ) ) col_idx = {c: i for i, c in enumerate(data.columns)} idx_col = {i: c for c, i in col_idx.items()} if tabu_edges: tabu_edges = [(col_idx[u], col_idx[v]) for u, v in tabu_edges] if tabu_parent_nodes: tabu_parent_nodes = [col_idx[n] for n in tabu_parent_nodes] if tabu_child_nodes: tabu_child_nodes = [col_idx[n] for n in tabu_child_nodes] g = from_numpy( X=data.values, dist_type_schema=dist_type_schema, lasso_beta=lasso_beta, ridge_beta=ridge_beta, use_bias=use_bias, hidden_layer_units=hidden_layer_units, w_threshold=w_threshold, max_iter=max_iter, tabu_edges=tabu_edges, tabu_parent_nodes=tabu_parent_nodes, tabu_child_nodes=tabu_child_nodes, **kwargs ) sm = StructureModel() sm.add_nodes_from(data.columns) # recover the edge weights from g for u, v, edge_dict in g.edges.data(True): sm.add_edge( idx_col[u], idx_col[v], origin="learned", weight=edge_dict["weight"], mean_effect=edge_dict["mean_effect"], ) # retrieve all graphs attrs for key, val in g.graph.items(): sm.graph[key] = val # recover the node biases from g for node in g.nodes(data=True): node_name = idx_col[node[0]] sm.nodes[node_name]["bias"] = node[1]["bias"] # recover and preseve the node dist_types for node in g.nodes(data=True): node_name = idx_col[node[0]] sm.nodes[node_name]["dist_type"] = node[1]["dist_type"] return sm
36.782123
112
0.679982
[ "Apache-2.0" ]
mkretsch327/causalnex
causalnex/structure/pytorch/notears.py
13,168
Python
import re from . import inlinepatterns from . import util from . import odict def build_treeprocessors(md_instance, **kwargs): """ Build the default treeprocessors for Markdown. """ treeprocessors = odict.OrderedDict() treeprocessors["inline"] = InlineProcessor(md_instance) treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance) return treeprocessors def isString(s): """ Check if it's string """ if not isinstance(s, util.AtomicString): return isinstance(s, str) return False class Processor: def __init__(self, markdown_instance=None): if markdown_instance: self.markdown = markdown_instance class Treeprocessor(Processor): """ Treeprocessors are run on the ElementTree object before serialization. Each Treeprocessor implements a "run" method that takes a pointer to an ElementTree, modifies it as necessary and returns an ElementTree object. Treeprocessors must extend markdown.Treeprocessor. """ def run(self, root): """ Subclasses of Treeprocessor should implement a `run` method, which takes a root ElementTree. This method can return another ElementTree object, and the existing root ElementTree will be replaced, or it can modify the current tree and return None. """ pass class InlineProcessor(Treeprocessor): """ A Treeprocessor that traverses a tree, applying inline patterns. """ def __init__(self, md): self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX self.__placeholder_suffix = util.ETX self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + len(self.__placeholder_suffix) self.__placeholder_re = util.INLINE_PLACEHOLDER_RE self.markdown = md def __makePlaceholder(self, type): """ Generate a placeholder """ id = "%04d" % len(self.stashed_nodes) hash = util.INLINE_PLACEHOLDER % id return hash, id def __findPlaceholder(self, data, index): """ Extract id from data string, start from index Keyword arguments: * data: string * index: index, from which we start search Returns: placeholder id and string index, after the found placeholder. """ m = self.__placeholder_re.search(data, index) if m: return m.group(1), m.end() else: return None, index + 1 def __stashNode(self, node, type): """ Add node to stash """ placeholder, id = self.__makePlaceholder(type) self.stashed_nodes[id] = node return placeholder def __handleInline(self, data, patternIndex=0): """ Process string with inline patterns and replace it with placeholders Keyword arguments: * data: A line of Markdown text * patternIndex: The index of the inlinePattern to start with Returns: String with placeholders. """ if not isinstance(data, util.AtomicString): startIndex = 0 while patternIndex < len(self.markdown.inlinePatterns): data, matched, startIndex = self.__applyPattern( self.markdown.inlinePatterns.value_for_index(patternIndex), data, patternIndex, startIndex) if not matched: patternIndex += 1 return data def __processElementText(self, node, subnode, isText=True): """ Process placeholders in Element.text or Element.tail of Elements popped from self.stashed_nodes. Keywords arguments: * node: parent node * subnode: processing node * isText: bool variable, True - it's text, False - it's tail Returns: None """ if isText: text = subnode.text subnode.text = None else: text = subnode.tail subnode.tail = None childResult = self.__processPlaceholders(text, subnode) if not isText and node is not subnode: pos = node.getchildren().index(subnode) node.remove(subnode) else: pos = 0 childResult.reverse() for newChild in childResult: node.insert(pos, newChild) def __processPlaceholders(self, data, parent): """ Process string with placeholders and generate ElementTree tree. Keyword arguments: * data: string with placeholders instead of ElementTree elements. * parent: Element, which contains processing inline data Returns: list with ElementTree elements with applied inline patterns. """ def linkText(text): if text: if result: if result[-1].tail: result[-1].tail += text else: result[-1].tail = text else: if parent.text: parent.text += text else: parent.text = text result = [] strartIndex = 0 while data: index = data.find(self.__placeholder_prefix, strartIndex) if index != -1: id, phEndIndex = self.__findPlaceholder(data, index) if id in self.stashed_nodes: node = self.stashed_nodes.get(id) if index > 0: text = data[strartIndex:index] linkText(text) if not isString(node): # it's Element for child in [node] + node.getchildren(): if child.tail: if child.tail.strip(): self.__processElementText(node, child,False) if child.text: if child.text.strip(): self.__processElementText(child, child) else: # it's just a string linkText(node) strartIndex = phEndIndex continue strartIndex = phEndIndex result.append(node) else: # wrong placeholder end = index + len(self.__placeholder_prefix) linkText(data[strartIndex:end]) strartIndex = end else: text = data[strartIndex:] if isinstance(data, util.AtomicString): # We don't want to loose the AtomicString text = util.AtomicString(text) linkText(text) data = "" return result def __applyPattern(self, pattern, data, patternIndex, startIndex=0): """ Check if the line fits the pattern, create the necessary elements, add it to stashed_nodes. Keyword arguments: * data: the text to be processed * pattern: the pattern to be checked * patternIndex: index of current pattern * startIndex: string index, from which we start searching Returns: String with placeholders instead of ElementTree elements. """ match = pattern.getCompiledRegExp().match(data[startIndex:]) leftData = data[:startIndex] if not match: return data, False, 0 node = pattern.handleMatch(match) if node is None: return data, True, len(leftData)+match.span(len(match.groups()))[0] if not isString(node): if not isinstance(node.text, util.AtomicString): # We need to process current node too for child in [node] + node.getchildren(): if not isString(node): if child.text: child.text = self.__handleInline(child.text, patternIndex + 1) if child.tail: child.tail = self.__handleInline(child.tail, patternIndex) placeholder = self.__stashNode(node, pattern.type()) return "%s%s%s%s" % (leftData, match.group(1), placeholder, match.groups()[-1]), True, 0 def run(self, tree): """Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, use subclass AtomicString: node.text = markdown.AtomicString("This will not be processed.") Arguments: * tree: ElementTree object, representing Markdown tree. Returns: ElementTree object with applied inline patterns. """ self.stashed_nodes = {} stack = [tree] while stack: currElement = stack.pop() insertQueue = [] for child in currElement.getchildren(): if child.text and not isinstance(child.text, util.AtomicString): text = child.text child.text = None lst = self.__processPlaceholders(self.__handleInline( text), child) stack += lst insertQueue.append((child, lst)) if child.tail: tail = self.__handleInline(child.tail) dumby = util.etree.Element('d') tailResult = self.__processPlaceholders(tail, dumby) if dumby.text: child.tail = dumby.text else: child.tail = None pos = currElement.getchildren().index(child) + 1 tailResult.reverse() for newChild in tailResult: currElement.insert(pos, newChild) if child.getchildren(): stack.append(child) for element, lst in insertQueue: if self.markdown.enable_attributes: if element.text: element.text = \ inlinepatterns.handleAttributes(element.text, element) i = 0 for newChild in lst: if self.markdown.enable_attributes: # Processing attributes if newChild.tail: newChild.tail = \ inlinepatterns.handleAttributes(newChild.tail, element) if newChild.text: newChild.text = \ inlinepatterns.handleAttributes(newChild.text, newChild) element.insert(i, newChild) i += 1 return tree class PrettifyTreeprocessor(Treeprocessor): """ Add linebreaks to the html document. """ def _prettifyETree(self, elem): """ Recursively add linebreaks to ElementTree children. """ i = "\n" if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: if (not elem.text or not elem.text.strip()) \ and len(elem) and util.isBlockLevel(elem[0].tag): elem.text = i for e in elem: if util.isBlockLevel(e.tag): self._prettifyETree(e) if not elem.tail or not elem.tail.strip(): elem.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i def run(self, root): """ Add linebreaks to ElementTree root object. """ self._prettifyETree(root) # Do <br />'s seperately as they are often in the middle of # inline content and missed by _prettifyETree. brs = root.getiterator('br') for br in brs: if not br.tail or not br.tail.strip(): br.tail = '\n' else: br.tail = '\n%s' % br.tail
35.038781
80
0.528579
[ "MIT" ]
Con-Mi/lambda-packs
Tensorflow_LightGBM_Scipy_nightly/source/markdown/treeprocessors.py
12,649
Python
from selenium import webdriver import time url = "http://localhost/litecart/admin/" browser = webdriver.Chrome() browser.implicitly_wait(1) without_title = 0 try: browser.get(url) # логинемся login = browser.find_element_by_css_selector("[name='username']") login.send_keys("admin") password = browser.find_element_by_css_selector("[name='password']") password.send_keys("admin") button = browser.find_element_by_css_selector("[name='login']") button.click() time.sleep(1) # без этого слипа программа перестает работать, очень хотелось бы обсудить этот момент # читаем основное меню main_menu = browser.find_elements_by_css_selector("#box-apps-menu > li") for i in range(len(main_menu)): main_menu_temp = browser.find_elements_by_css_selector("#box-apps-menu > li") main_menu_temp[i].click() # читаем подменю sub_menu = browser.find_elements_by_css_selector(".docs > li") # условие для пунктов меню, в которых отсутствует подменю if len(sub_menu) < 1: title = browser.find_element_by_css_selector("#content > h1").text if len(title) == 0: without_title += 1 for j in range(len(sub_menu)): sub_menu_temp = browser.find_elements_by_css_selector(".docs > li") sub_menu_temp[j].click() title = browser.find_element_by_css_selector("#content > h1").text if len(title) == 0: without_title += 1 if without_title > 0: print('BUG!') else: print('NO BUG') finally: browser.quit()
30.018519
106
0.650833
[ "Apache-2.0" ]
aminzin-1990/software-testing-repository
selenium/find_elements/app_main_menu.py
1,780
Python
""" Considerando duas listas de inteiros ou floats (lista A e lista B) Some os valores nas listas retornando uma nova lista com os valores somados: Se uma lista for maior que a outra, a soma só vai considerar o tamanho da menor. Exemplo: lista_a = [1, 2, 3, 4, 5, 6, 7] lista_b = [1, 2, 3, 4] =================== resultado lista_soma = [2, 4, 6, 8] """ lista_a = [10, 2, 3, 40, 5, 6, 7] lista_b = [1, 2, 3, 4] lista_soma = [v1 + v2 for v1, v2 in zip(lista_a, lista_b)] print(lista_soma)
29.235294
76
0.633803
[ "MIT" ]
lel352/Curso-Python
aulaspythonintermediario/exercicios06/exercicio01.py
498
Python
import functools from teamiclink.slack.model import GoalContent from typing import Any, Dict from slack_bolt import Ack from slack_bolt.context import BoltContext from pydantic import ValidationError CREATE_GOAL_CALLBACK_ID = "create_goal_view_id" CREATE_GOAL_INPUT = "create_goal_action" CREATE_GOAL_INPUT_BLOCK = "create_goal_input_block" CREATE_GOAL = { "type": "modal", "callback_id": CREATE_GOAL_CALLBACK_ID, "title": {"type": "plain_text", "text": "Teamiclink"}, "submit": {"type": "plain_text", "text": "Submit"}, "close": {"type": "plain_text", "text": "Cancel"}, "blocks": [ { "type": "input", "block_id": CREATE_GOAL_INPUT_BLOCK, "element": {"type": "plain_text_input", "action_id": CREATE_GOAL_INPUT}, "label": {"type": "plain_text", "text": "Create goal"}, } ], } def add_goal_to_payload(func): """Adds a goal to payload for 't-goal' key.""" @functools.wraps(func) def wrapper_inject_goal(ack: Ack, payload: Dict[str, Any], context: BoltContext): try: content = GoalContent( content=payload["state"]["values"][CREATE_GOAL_INPUT_BLOCK][ CREATE_GOAL_INPUT ]["value"] ).content except ValidationError as error: return ack( response_action="errors", errors={CREATE_GOAL_INPUT_BLOCK: error.errors()[0]["msg"]}, ) payload["t-goal"] = content return func(ack=ack, payload=payload, context=context) return wrapper_inject_goal
32.816327
85
0.620647
[ "Apache-2.0" ]
e1004/teamiclink
teamiclink/slack/view_goal_create.py
1,608
Python
import concurrent.futures import contextlib import http.client import json import math import os import time from .common import FileDownloader from .http import HttpFD from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 from ..compat import compat_os_name, compat_struct_pack, compat_urllib_error from ..utils import ( DownloadError, encodeFilename, error_to_compat_str, sanitized_Request, traverse_obj, ) class HttpQuietDownloader(HttpFD): def to_screen(self, *args, **kargs): pass console_title = to_screen class FragmentFD(FileDownloader): """ A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests). Available options: fragment_retries: Number of times to retry a fragment for HTTP error (DASH and hlsnative only) skip_unavailable_fragments: Skip unavailable fragments (DASH and hlsnative only) keep_fragments: Keep downloaded fragments on disk after downloading is finished concurrent_fragment_downloads: The number of threads to use for native hls and dash downloads _no_ytdl_file: Don't use .ytdl file For each incomplete fragment download yt-dlp keeps on disk a special bookkeeping file with download state and metadata (in future such files will be used for any incomplete download handled by yt-dlp). This file is used to properly handle resuming, check download file consistency and detect potential errors. The file has a .ytdl extension and represents a standard JSON file of the following format: extractor: Dictionary of extractor related data. TBD. downloader: Dictionary of downloader related data. May contain following data: current_fragment: Dictionary with current (being downloaded) fragment data: index: 0-based index of current fragment among all fragments fragment_count: Total count of fragments This feature is experimental and file format may change in future. """ def report_retry_fragment(self, err, frag_index, count, retries): self.to_screen( '\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...' % (error_to_compat_str(err), frag_index, count, self.format_retries(retries))) self.sleep_retry('fragment', count) def report_skip_fragment(self, frag_index, err=None): err = f' {err};' if err else '' self.to_screen(f'[download]{err} Skipping fragment {frag_index:d} ...') def _prepare_url(self, info_dict, url): headers = info_dict.get('http_headers') return sanitized_Request(url, None, headers) if headers else url def _prepare_and_start_frag_download(self, ctx, info_dict): self._prepare_frag_download(ctx) self._start_frag_download(ctx, info_dict) def __do_ytdl_file(self, ctx): return ctx['live'] is not True and ctx['tmpfilename'] != '-' and not self.params.get('_no_ytdl_file') def _read_ytdl_file(self, ctx): assert 'ytdl_corrupt' not in ctx stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'r') try: ytdl_data = json.loads(stream.read()) ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index'] if 'extra_state' in ytdl_data['downloader']: ctx['extra_state'] = ytdl_data['downloader']['extra_state'] except Exception: ctx['ytdl_corrupt'] = True finally: stream.close() def _write_ytdl_file(self, ctx): frag_index_stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'w') try: downloader = { 'current_fragment': { 'index': ctx['fragment_index'], }, } if 'extra_state' in ctx: downloader['extra_state'] = ctx['extra_state'] if ctx.get('fragment_count') is not None: downloader['fragment_count'] = ctx['fragment_count'] frag_index_stream.write(json.dumps({'downloader': downloader})) finally: frag_index_stream.close() def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None): fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index']) fragment_info_dict = { 'url': frag_url, 'http_headers': headers or info_dict.get('http_headers'), 'request_data': request_data, 'ctx_id': ctx.get('ctx_id'), } success, _ = ctx['dl'].download(fragment_filename, fragment_info_dict) if not success: return False if fragment_info_dict.get('filetime'): ctx['fragment_filetime'] = fragment_info_dict.get('filetime') ctx['fragment_filename_sanitized'] = fragment_filename return True def _read_fragment(self, ctx): if not ctx.get('fragment_filename_sanitized'): return None try: down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb') except FileNotFoundError: if ctx.get('live'): return None raise ctx['fragment_filename_sanitized'] = frag_sanitized frag_content = down.read() down.close() return frag_content def _append_fragment(self, ctx, frag_content): try: ctx['dest_stream'].write(frag_content) ctx['dest_stream'].flush() finally: if self.__do_ytdl_file(ctx): self._write_ytdl_file(ctx) if not self.params.get('keep_fragments', False): self.try_remove(encodeFilename(ctx['fragment_filename_sanitized'])) del ctx['fragment_filename_sanitized'] def _prepare_frag_download(self, ctx): if 'live' not in ctx: ctx['live'] = False if not ctx['live']: total_frags_str = '%d' % ctx['total_frags'] ad_frags = ctx.get('ad_frags', 0) if ad_frags: total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') self.report_destination(ctx['filename']) dl = HttpQuietDownloader(self.ydl, { **self.params, 'noprogress': True, 'test': False, }) tmpfilename = self.temp_name(ctx['filename']) open_mode = 'wb' resume_len = 0 # Establish possible resume length if os.path.isfile(encodeFilename(tmpfilename)): open_mode = 'ab' resume_len = os.path.getsize(encodeFilename(tmpfilename)) # Should be initialized before ytdl file check ctx.update({ 'tmpfilename': tmpfilename, 'fragment_index': 0, }) if self.__do_ytdl_file(ctx): if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))): self._read_ytdl_file(ctx) is_corrupt = ctx.get('ytdl_corrupt') is True is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0 if is_corrupt or is_inconsistent: message = ( '.ytdl file is corrupt' if is_corrupt else 'Inconsistent state of incomplete fragment download') self.report_warning( '%s. Restarting from the beginning ...' % message) ctx['fragment_index'] = resume_len = 0 if 'ytdl_corrupt' in ctx: del ctx['ytdl_corrupt'] self._write_ytdl_file(ctx) else: self._write_ytdl_file(ctx) assert ctx['fragment_index'] == 0 dest_stream, tmpfilename = self.sanitize_open(tmpfilename, open_mode) ctx.update({ 'dl': dl, 'dest_stream': dest_stream, 'tmpfilename': tmpfilename, # Total complete fragments downloaded so far in bytes 'complete_frags_downloaded_bytes': resume_len, }) def _start_frag_download(self, ctx, info_dict): resume_len = ctx['complete_frags_downloaded_bytes'] total_frags = ctx['total_frags'] ctx_id = ctx.get('ctx_id') # This dict stores the download progress, it's updated by the progress # hook state = { 'status': 'downloading', 'downloaded_bytes': resume_len, 'fragment_index': ctx['fragment_index'], 'fragment_count': total_frags, 'filename': ctx['filename'], 'tmpfilename': ctx['tmpfilename'], } start = time.time() ctx.update({ 'started': start, 'fragment_started': start, # Amount of fragment's bytes downloaded by the time of the previous # frag progress hook invocation 'prev_frag_downloaded_bytes': 0, }) def frag_progress_hook(s): if s['status'] not in ('downloading', 'finished'): return if not total_frags and ctx.get('fragment_count'): state['fragment_count'] = ctx['fragment_count'] if ctx_id is not None and s.get('ctx_id') != ctx_id: return state['max_progress'] = ctx.get('max_progress') state['progress_idx'] = ctx.get('progress_idx') time_now = time.time() state['elapsed'] = time_now - start frag_total_bytes = s.get('total_bytes') or 0 s['fragment_info_dict'] = s.pop('info_dict', {}) if not ctx['live']: estimated_size = ( (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) / (state['fragment_index'] + 1) * total_frags) state['total_bytes_estimate'] = estimated_size if s['status'] == 'finished': state['fragment_index'] += 1 ctx['fragment_index'] = state['fragment_index'] state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes'] ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes'] ctx['speed'] = state['speed'] = self.calc_speed( ctx['fragment_started'], time_now, frag_total_bytes) ctx['fragment_started'] = time.time() ctx['prev_frag_downloaded_bytes'] = 0 else: frag_downloaded_bytes = s['downloaded_bytes'] state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes'] if not ctx['live']: state['eta'] = self.calc_eta( start, time_now, estimated_size - resume_len, state['downloaded_bytes'] - resume_len) ctx['speed'] = state['speed'] = self.calc_speed( ctx['fragment_started'], time_now, frag_downloaded_bytes) ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes self._hook_progress(state, info_dict) ctx['dl'].add_progress_hook(frag_progress_hook) return start def _finish_frag_download(self, ctx, info_dict): ctx['dest_stream'].close() if self.__do_ytdl_file(ctx): ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename'])) if os.path.isfile(ytdl_filename): self.try_remove(ytdl_filename) elapsed = time.time() - ctx['started'] if ctx['tmpfilename'] == '-': downloaded_bytes = ctx['complete_frags_downloaded_bytes'] else: self.try_rename(ctx['tmpfilename'], ctx['filename']) if self.params.get('updatetime', True): filetime = ctx.get('fragment_filetime') if filetime: with contextlib.suppress(Exception): os.utime(ctx['filename'], (time.time(), filetime)) downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename'])) self._hook_progress({ 'downloaded_bytes': downloaded_bytes, 'total_bytes': downloaded_bytes, 'filename': ctx['filename'], 'status': 'finished', 'elapsed': elapsed, 'ctx_id': ctx.get('ctx_id'), 'max_progress': ctx.get('max_progress'), 'progress_idx': ctx.get('progress_idx'), }, info_dict) def _prepare_external_frag_download(self, ctx): if 'live' not in ctx: ctx['live'] = False if not ctx['live']: total_frags_str = '%d' % ctx['total_frags'] ad_frags = ctx.get('ad_frags', 0) if ad_frags: total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') tmpfilename = self.temp_name(ctx['filename']) # Should be initialized before ytdl file check ctx.update({ 'tmpfilename': tmpfilename, 'fragment_index': 0, }) def decrypter(self, info_dict): _key_cache = {} def _get_key(url): if url not in _key_cache: _key_cache[url] = self.ydl.urlopen(self._prepare_url(info_dict, url)).read() return _key_cache[url] def decrypt_fragment(fragment, frag_content): decrypt_info = fragment.get('decrypt_info') if not decrypt_info or decrypt_info['METHOD'] != 'AES-128': return frag_content iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence']) decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI']) # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded, # not what it decrypts to. if self.params.get('test', False): return frag_content return unpad_pkcs7(aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv)) return decrypt_fragment def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None): ''' @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ... all args must be either tuple or list ''' interrupt_trigger = [True] max_progress = len(args) if max_progress == 1: return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func) max_workers = self.params.get('concurrent_fragment_downloads', 1) if max_progress > 1: self._prepare_multiline_status(max_progress) is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[])) def thread_func(idx, ctx, fragments, info_dict, tpe): ctx['max_progress'] = max_progress ctx['progress_idx'] = idx return self.download_and_append_fragments( ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe, interrupt_trigger=interrupt_trigger) class FTPE(concurrent.futures.ThreadPoolExecutor): # has to stop this or it's going to wait on the worker thread itself def __exit__(self, exc_type, exc_val, exc_tb): pass if compat_os_name == 'nt': def future_result(future): while True: try: return future.result(0.1) except KeyboardInterrupt: raise except concurrent.futures.TimeoutError: continue else: def future_result(future): return future.result() def interrupt_trigger_iter(fg): for f in fg: if not interrupt_trigger[0]: break yield f spins = [] for idx, (ctx, fragments, info_dict) in enumerate(args): tpe = FTPE(math.ceil(max_workers / max_progress)) job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe) spins.append((tpe, job)) result = True for tpe, job in spins: try: result = result and future_result(job) except KeyboardInterrupt: interrupt_trigger[0] = False finally: tpe.shutdown(wait=True) if not interrupt_trigger[0] and not is_live: raise KeyboardInterrupt() # we expect the user wants to stop and DO WANT the preceding postprocessors to run; # so returning a intermediate result here instead of KeyboardInterrupt on live return result def download_and_append_fragments( self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, tpe=None, interrupt_trigger=None): if not interrupt_trigger: interrupt_trigger = (True, ) fragment_retries = self.params.get('fragment_retries', 0) is_fatal = ( ((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0)) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)) if not pack_func: pack_func = lambda frag_content, _: frag_content def download_fragment(fragment, ctx): if not interrupt_trigger[0]: return frag_index = ctx['fragment_index'] = fragment['frag_index'] ctx['last_error'] = None headers = info_dict.get('http_headers', {}).copy() byte_range = fragment.get('byte_range') if byte_range: headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1) # Never skip the first fragment fatal, count = is_fatal(fragment.get('index') or (frag_index - 1)), 0 while count <= fragment_retries: try: ctx['fragment_count'] = fragment.get('fragment_count') if self._download_fragment(ctx, fragment['url'], info_dict, headers): break return except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err: # Unavailable (possibly temporary) fragments may be served. # First we try to retry then either skip or abort. # See https://github.com/ytdl-org/youtube-dl/issues/10165, # https://github.com/ytdl-org/youtube-dl/issues/10448). count += 1 ctx['last_error'] = err if count <= fragment_retries: self.report_retry_fragment(err, frag_index, count, fragment_retries) except DownloadError: # Don't retry fragment if error occurred during HTTP downloading # itself since it has own retry settings if not fatal: break raise if count > fragment_retries and fatal: ctx['dest_stream'].close() self.report_error('Giving up after %s fragment retries' % fragment_retries) def append_fragment(frag_content, frag_index, ctx): if frag_content: self._append_fragment(ctx, pack_func(frag_content, frag_index)) elif not is_fatal(frag_index - 1): self.report_skip_fragment(frag_index, 'fragment not found') else: ctx['dest_stream'].close() self.report_error(f'fragment {frag_index} not found, unable to continue') return False return True decrypt_fragment = self.decrypter(info_dict) max_workers = math.ceil( self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1)) if max_workers > 1: def _download_fragment(fragment): ctx_copy = ctx.copy() download_fragment(fragment, ctx_copy) return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized') self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome') with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool: try: for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments): ctx.update({ 'fragment_filename_sanitized': frag_filename, 'fragment_index': frag_index, }) if not append_fragment(decrypt_fragment(fragment, self._read_fragment(ctx)), frag_index, ctx): return False except KeyboardInterrupt: self._finish_multiline_status() self.report_error( 'Interrupted by user. Waiting for all threads to shutdown...', is_error=False, tb=False) pool.shutdown(wait=False) raise else: for fragment in fragments: if not interrupt_trigger[0]: break try: download_fragment(fragment, ctx) result = append_fragment( decrypt_fragment(fragment, self._read_fragment(ctx)), fragment['frag_index'], ctx) except KeyboardInterrupt: if info_dict.get('is_live'): break raise if not result: return False if finish_func is not None: ctx['dest_stream'].write(finish_func()) ctx['dest_stream'].flush() self._finish_frag_download(ctx, info_dict) return True
42.242086
130
0.582525
[ "Unlicense" ]
9Fork/yt-dlp
yt_dlp/downloader/fragment.py
22,684
Python
from functools import partial from typing import Any, Callable, List, Optional, Sequence, Type, Union from torch import nn from torchvision.prototype.transforms import VideoClassificationEval from torchvision.transforms.functional import InterpolationMode from ....models.video.resnet import ( BasicBlock, BasicStem, Bottleneck, Conv2Plus1D, Conv3DSimple, Conv3DNoTemporal, R2Plus1dStem, VideoResNet, ) from .._api import WeightsEnum, Weights from .._meta import _KINETICS400_CATEGORIES from .._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [ "VideoResNet", "R3D_18_Weights", "MC3_18_Weights", "R2Plus1D_18_Weights", "r3d_18", "mc3_18", "r2plus1d_18", ] def _video_resnet( block: Type[Union[BasicBlock, Bottleneck]], conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]], layers: List[int], stem: Callable[..., nn.Module], weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, ) -> VideoResNet: if weights is not None: _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) model = VideoResNet(block, conv_makers, layers, stem, **kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { "task": "video_classification", "publication_year": 2017, "size": (112, 112), "min_size": (1, 1), "categories": _KINETICS400_CATEGORIES, "interpolation": InterpolationMode.BILINEAR, "recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification", } class R3D_18_Weights(WeightsEnum): KINETICS400_V1 = Weights( url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth", transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)), meta={ **_COMMON_META, "architecture": "R3D", "num_params": 33371472, "acc@1": 52.75, "acc@5": 75.45, }, ) DEFAULT = KINETICS400_V1 class MC3_18_Weights(WeightsEnum): KINETICS400_V1 = Weights( url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth", transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)), meta={ **_COMMON_META, "architecture": "MC3", "num_params": 11695440, "acc@1": 53.90, "acc@5": 76.29, }, ) DEFAULT = KINETICS400_V1 class R2Plus1D_18_Weights(WeightsEnum): KINETICS400_V1 = Weights( url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth", transforms=partial(VideoClassificationEval, crop_size=(112, 112), resize_size=(128, 171)), meta={ **_COMMON_META, "architecture": "R(2+1)D", "num_params": 31505325, "acc@1": 57.50, "acc@5": 78.81, }, ) DEFAULT = KINETICS400_V1 @handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1)) def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: weights = R3D_18_Weights.verify(weights) return _video_resnet( BasicBlock, [Conv3DSimple] * 4, [2, 2, 2, 2], BasicStem, weights, progress, **kwargs, ) @handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1)) def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: weights = MC3_18_Weights.verify(weights) return _video_resnet( BasicBlock, [Conv3DSimple] + [Conv3DNoTemporal] * 3, # type: ignore[list-item] [2, 2, 2, 2], BasicStem, weights, progress, **kwargs, ) @handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1)) def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet: weights = R2Plus1D_18_Weights.verify(weights) return _video_resnet( BasicBlock, [Conv2Plus1D] * 4, [2, 2, 2, 2], R2Plus1dStem, weights, progress, **kwargs, )
28.431373
119
0.644828
[ "BSD-3-Clause" ]
Bethhhh/vision
torchvision/prototype/models/video/resnet.py
4,350
Python
from __future__ import print_function from __future__ import absolute_import from builtins import range from abc import ABCMeta, abstractmethod, abstractproperty import os import re import json from . import globalDictionaries from . import configTemplates from .dataset import Dataset from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring from .TkAlExceptions import AllInOneError from six import with_metaclass class ValidationMetaClass(ABCMeta): sets = ["mandatories", "optionals", "needpackages"] dicts = ["defaults"] def __new__(cls, clsname, bases, dct): for setname in cls.sets: if setname not in dct: dct[setname] = set() dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) for dictname in cls.dicts: if dictname not in dct: dct[dictname] = {} for base in bases: if not hasattr(base, dictname): continue newdict = getattr(base, dictname) for key in set(newdict) & set(dct[dictname]): if newdict[key] != dct[dictname][key]: raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) dct[dictname].update(newdict) for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation #use with caution if "remove"+setname not in dct: dct["remove"+setname] = set() dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) dct[setname] -= dct["remove"+setname] return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) class GenericValidation(with_metaclass(ValidationMetaClass,object)): defaultReferenceName = "DEFAULT" mandatories = set() defaults = { "cmssw": os.environ['CMSSW_BASE'], "parallelJobs": "1", "jobid": "", "needsproxy": "false", } needpackages = {"Alignment/OfflineValidation"} optionals = {"jobmode"} def __init__(self, valName, alignment, config): import random self.name = valName self.alignmentToValidate = alignment self.general = config.getGeneral() self.randomWorkdirPart = "%0i"%random.randint(1,10e9) self.configFiles = [] self.config = config self.jobid = "" theUpdate = config.getResultingSection(self.valType+":"+self.name, defaultDict = self.defaults, demandPars = self.mandatories) self.general.update(theUpdate) self.jobmode = self.general["jobmode"] self.NJobs = int(self.general["parallelJobs"]) self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") # limit maximum number of parallel jobs to 40 # (each output file is approximately 20MB) maximumNumberJobs = 40 if self.NJobs > maximumNumberJobs: msg = ("Maximum allowed number of parallel jobs " +str(maximumNumberJobs)+" exceeded!!!") raise AllInOneError(msg) if self.NJobs > 1 and not isinstance(self, ParallelValidation): raise AllInOneError("Parallel jobs not implemented for {}!\n" "Please set parallelJobs = 1.".format(type(self).__name__)) self.jobid = self.general["jobid"] if self.jobid: try: #make sure it's actually a valid jobid output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) if "is not found" in output: raise RuntimeError except RuntimeError: raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) self.cmssw = self.general["cmssw"] badcharacters = r"\'" for character in badcharacters: if character in self.cmssw: raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" "path name. If you really have it in such a ridiculously named location,\n" "try making a symbolic link somewhere with a decent name.") try: os.listdir(self.cmssw) except OSError: raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') if self.cmssw == os.environ["CMSSW_BASE"]: self.scramarch = os.environ["SCRAM_ARCH"] self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] else: command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') commandoutput = getCommandOutput2(command).split('\n') self.cmssw = commandoutput[0] self.scramarch = commandoutput[1] self.cmsswreleasebase = commandoutput[2] self.packages = {} for package in self.needpackages: for placetolook in self.cmssw, self.cmsswreleasebase: pkgpath = os.path.join(placetolook, "src", package) if os.path.exists(pkgpath): self.packages[package] = pkgpath break else: raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) self.AutoAlternates = True if config.has_option("alternateTemplates","AutoAlternates"): try: self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) except ValueError: raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals ignoreOpts = [] config.checkInput(self.valType+":"+self.name, knownSimpleOptions = knownOpts, ignoreOptions = ignoreOpts) def getRepMap(self, alignment = None): from .plottingOptions import PlottingOptions if alignment == None: alignment = self.alignmentToValidate try: result = PlottingOptions(self.config, self.valType) except KeyError: result = {} result.update(alignment.getRepMap()) result.update(self.general) result.update({ "workdir": os.path.join(self.general["workdir"], self.randomWorkdirPart), "datadir": self.general["datadir"], "logdir": self.general["logdir"], "CommandLineTemplate": ("#run configfile and post-proccess it\n" "cmsRun %(cfgFile)s\n" "%(postProcess)s "), "CMSSW_BASE": self.cmssw, "SCRAM_ARCH": self.scramarch, "CMSSW_RELEASE_BASE": self.cmsswreleasebase, "alignmentName": alignment.name, "condLoad": alignment.getConditions(), "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, }) result.update(self.packages) return result @abstractproperty def filesToCompare(self): pass def getCompareStrings( self, requestId = None, plain = False ): result = {} repMap = self.getRepMap().copy() for validationId in self.filesToCompare: repMap["file"] = self.filesToCompare[ validationId ] if repMap["file"].startswith( "/castor/" ): repMap["file"] = "rfio:%(file)s"%repMap elif repMap["file"].startswith( "/store/" ): repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if plain: result[validationId]=repMap["file"] else: result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap if requestId == None: return result else: if not "." in requestId: requestId += ".%s"%self.defaultReferenceName if not requestId.split(".")[-1] in result: msg = ("could not find %s in reference Objects!" %requestId.split(".")[-1]) raise AllInOneError(msg) return result[ requestId.split(".")[-1] ] def createFiles(self, fileContents, path, repMap = None, repMaps = None): """repMap: single map for all files repMaps: a dict, with the filenames as the keys""" if repMap is not None and repMaps is not None: raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") result = [] for fileName in fileContents: filePath = os.path.join(path, fileName) result.append(filePath) for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): theFile = open( filePathi, "w" ) fileContentsi = fileContents[ fileName ] if repMaps is not None: repMap = repMaps[fileName] if repMap is not None: repMap.update({"nIndex": str(i)}) fileContentsi = replaceByMap(fileContentsi, repMap) theFile.write( fileContentsi ) theFile.close() return result def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): self.configFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) if not schedule == None: schedule = [os.path.join( path, cfgName) for cfgName in schedule] for cfgName in schedule: if not cfgName in self.configFiles: msg = ("scheduled %s missing in generated configfiles: %s" %(cfgName, self.configFiles)) raise AllInOneError(msg) for cfgName in self.configFiles: if not cfgName in schedule: msg = ("generated configuration %s not scheduled: %s" %(cfgName, schedule)) raise AllInOneError(msg) self.configFiles = schedule return self.configFiles def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): self.scriptFiles = self.createFiles(fileContents, path, repMap = repMap, repMaps = repMaps) for script in self.scriptFiles: for scriptwithindex in addIndex(script, self.NJobs): os.chmod(scriptwithindex,0o755) return self.scriptFiles def createCrabCfg(self, fileContents, path ): if self.NJobs > 1: msg = ("jobmode 'crab' not supported for parallel validation." " Please set parallelJobs = 1.") raise AllInOneError(msg) self.crabConfigFiles = self.createFiles(fileContents, path) return self.crabConfigFiles class GenericValidationData(GenericValidation): """ Subclass of `GenericValidation` which is the base for validations using datasets. """ needParentFiles = False mandatories = {"dataset", "maxevents"} defaults = { "runRange": "", "firstRun": "", "lastRun": "", "begin": "", "end": "", "JSON": "", "dasinstance": "prod/global", "ttrhbuilder":"WithAngleAndTemplate", "usepixelqualityflag": "True", } optionals = {"magneticfield"} def __init__(self, valName, alignment, config): """ This method adds additional items to the `self.general` dictionary which are only needed for validations using datasets. Arguments: - `valName`: String which identifies individual validation instances - `alignment`: `Alignment` instance to validate - `config`: `BetterConfigParser` instance which includes the configuration of the validations """ super(GenericValidationData, self).__init__(valName, alignment, config) # if maxevents is not specified, cannot calculate number of events for # each parallel job, and therefore running only a single job if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: msg = ("Maximum number of events (maxevents) not specified: " "cannot use parallel jobs.") raise AllInOneError(msg) if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: msg = ("maxevents has to be divisible by parallelJobs") raise AllInOneError(msg) tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" and self.general["firstRun"] == "" and self.general["lastRun"] == "" and self.general["begin"] == "" and self.general["end"] == "") if self.general["dataset"] not in globalDictionaries.usedDatasets: globalDictionaries.usedDatasets[self.general["dataset"]] = {} if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} Bfield = self.general.get("magneticfield", None) if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: dataset = Dataset( self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, dasinstance = self.general["dasinstance"]) globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] self.general["magneticField"] = self.dataset.magneticField() self.general["defaultMagneticField"] = "MagneticField" if self.general["magneticField"] == "unknown": print("Could not get the magnetic field for this dataset.") print("Using the default: ", self.general["defaultMagneticField"]) self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' if not self.jobmode.split( ',' )[0] == "crab": try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], parent = self.needParentFiles ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str(e) raise AllInOneError(msg) else: if self.dataset.predefined(): msg = ("For jobmode 'crab' you cannot use predefined datasets " "(in your case: '%s')."%( self.dataset.name() )) raise AllInOneError( msg ) try: theUpdate = config.getResultingSection(self.valType+":"+self.name, demandPars = ["parallelJobs"]) except AllInOneError as e: msg = str(e)[:-1]+" when using 'jobmode: crab'." raise AllInOneError(msg) self.general.update(theUpdate) if self.general["begin"] or self.general["end"]: ( self.general["begin"], self.general["end"], self.general["firstRun"], self.general["lastRun"] ) = self.dataset.convertTimeToRun( firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], shortTuple = False) if self.general["begin"] == None: self.general["begin"] = "" if self.general["end"] == None: self.general["end"] = "" self.general["firstRun"] = str( self.general["firstRun"] ) self.general["lastRun"] = str( self.general["lastRun"] ) if ( not self.general["firstRun"] ) and \ ( self.general["end"] or self.general["lastRun"] ): self.general["firstRun"] = str( self.dataset.runList()[0]["run_number"]) if ( not self.general["lastRun"] ) and \ ( self.general["begin"] or self.general["firstRun"] ): self.general["lastRun"] = str( self.dataset.runList()[-1]["run_number"]) if self.general["firstRun"] and self.general["lastRun"]: if int(self.general["firstRun"]) > int(self.general["lastRun"]): msg = ( "The lower time/runrange limit ('begin'/'firstRun') " "chosen is greater than the upper time/runrange limit " "('end'/'lastRun').") raise AllInOneError( msg ) self.general["runRange"] = (self.general["firstRun"] + '-' + self.general["lastRun"]) try: self.general["datasetDefinition"] = self.dataset.datasetSnippet( jsonPath = self.general["JSON"], firstRun = self.general["firstRun"], lastRun = self.general["lastRun"], begin = self.general["begin"], end = self.general["end"], crab = True ) except AllInOneError as e: msg = "In section [%s:%s]: "%(self.valType, self.name) msg += str( e ) raise AllInOneError( msg ) self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") def getRepMap(self, alignment = None): result = super(GenericValidationData, self).getRepMap(alignment) outputfile = os.path.expandvars(replaceByMap( "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) , result)) resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) , result)) result.update({ "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", "resultFiles": addIndex(resultfile, self.NJobs), "finalResultFile": resultfile, "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", "outputFiles": addIndex(outputfile, self.NJobs), "finalOutputFile": outputfile, "ProcessName": self.ProcessName, "Bookkeeping": self.Bookkeeping, "LoadBasicModules": self.LoadBasicModules, "TrackSelectionRefitting": self.TrackSelectionRefitting, "ValidationConfig": self.ValidationTemplate, "FileOutputTemplate": self.FileOutputTemplate, "DefinePath": self.DefinePath, }) return result @property def cfgName(self): return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, self.alignmentToValidate.name ) @abstractproperty def ProcessName(self): pass @property def cfgTemplate(self): return configTemplates.cfgTemplate @abstractproperty def ValidationTemplate(self): pass @property def filesToCompare(self): return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} def createConfiguration(self, path ): repMap = self.getRepMap() cfgs = {self.cfgName: self.cfgTemplate} super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, self.alignmentToValidate.name ) if repMap is None and repMaps is None: repMap = self.getRepMap() repMap["CommandLine"]="" for cfg in self.configFiles: repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), "postProcess":"" } scripts = {scriptName: template} return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, repMap = repMap, repMaps = repMaps) def createCrabCfg(self, path, crabCfgBaseName): """ Method which creates a `crab.cfg` for a validation on datasets. Arguments: - `path`: Path at which the file will be stored. - `crabCfgBaseName`: String which depends on the actual type of validation calling this method. """ crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, self.alignmentToValidate.name ) repMap = self.getRepMap() repMap["script"] = "dummy_script.sh" # repMap["crabOutputDir"] = os.path.basename( path ) repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] self.crabWorkingDir = repMap["crabWorkingDir"] repMap["numberOfJobs"] = self.general["parallelJobs"] repMap["cfgFile"] = self.configFiles[0] repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] if self.dataset.dataType() == "mc": repMap["McOrData"] = "events = .oO[nEvents]Oo." elif self.dataset.dataType() == "data": repMap["McOrData"] = "lumis = -1" if self.jobmode.split( ',' )[0] == "crab": print ("For jobmode 'crab' the parameter 'maxevents' will be " "ignored and all events will be processed.") else: raise AllInOneError("Unknown data type! Can't run in crab mode") crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, repMap ) } return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) @property def Bookkeeping(self): return configTemplates.Bookkeeping @property def LoadBasicModules(self): return configTemplates.LoadBasicModules @abstractproperty def TrackSelectionRefitting(self): pass @property def FileOutputTemplate(self): return configTemplates.FileOutputTemplate @abstractproperty def DefinePath(self): pass class GenericValidationData_CTSR(GenericValidationData): #common track selection and refitting defaults = { "momentumconstraint": "None", "openmasswindow": "False", "cosmicsdecomode": "True", "removetrackhitfiltercommands": "", "appendtrackhitfiltercommands": "", } def getRepMap(self, alignment=None): result = super(GenericValidationData_CTSR, self).getRepMap(alignment) from .trackSplittingValidation import TrackSplittingValidation result.update({ "ValidationSequence": self.ValidationSequence, "istracksplitting": str(isinstance(self, TrackSplittingValidation)), "cosmics0T": str(self.cosmics0T), "use_d0cut": str(self.use_d0cut), "ispvvalidation": str(self.isPVValidation) }) commands = [] for removeorappend in "remove", "append": optionname = removeorappend + "trackhitfiltercommands" if result[optionname]: for command in result[optionname].split(","): command = command.strip() commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) result["trackhitfiltercommands"] = "\n".join(commands) return result @property def use_d0cut(self): return "Cosmics" not in self.general["trackcollection"] #use it for collisions only @property def isPVValidation(self): return False # only for PV Validation sequence @property def TrackSelectionRefitting(self): return configTemplates.CommonTrackSelectionRefitting @property def DefinePath(self): return configTemplates.DefinePath_CommonSelectionRefitting @abstractproperty def ValidationSequence(self): pass @property def cosmics0T(self): if "Cosmics" not in self.general["trackcollection"]: return False Bfield = self.dataset.magneticFieldForRun() if Bfield < 0.5: return True if isinstance(Bfield, str): if "unknown " in Bfield: msg = Bfield.replace("unknown ","",1) elif Bfield == "unknown": msg = "Can't get the B field for %s." % self.dataset.name() else: msg = "B field = {}???".format(Bfield) raise AllInOneError(msg + "\n" "To use this dataset, specify magneticfield = [value] in your .ini config file.") return False class ParallelValidation(GenericValidation): @classmethod def initMerge(cls): return "" @abstractmethod def appendToMerge(self): pass @classmethod def doInitMerge(cls): from .plottingOptions import PlottingOptions result = cls.initMerge() result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result def doMerge(self): result = self.appendToMerge() if result[-1] != "\n": result += "\n" result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" "fi\n" "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" " mergeRetCode=${tmpMergeRetCode}\n" "fi\n") result = replaceByMap(result, self.getRepMap()) return result class ValidationWithPlots(GenericValidation): @classmethod def runPlots(cls, validations): return ("cp .oO[plottingscriptpath]Oo. .\n" "root -x -b -q .oO[plottingscriptname]Oo.++") @abstractmethod def appendToPlots(self): pass @abstractmethod def plottingscriptname(cls): """override with a classmethod""" @abstractmethod def plottingscripttemplate(cls): """override with a classmethod""" @abstractmethod def plotsdirname(cls): """override with a classmethod""" @classmethod def doRunPlots(cls, validations): from .plottingOptions import PlottingOptions cls.createPlottingScript(validations) result = cls.runPlots(validations) result = replaceByMap(result, PlottingOptions(None, cls)) if result and result[-1] != "\n": result += "\n" return result @classmethod def createPlottingScript(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) repmap["PlottingInstantiation"] = "\n".join( replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") for v in validations ) plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) with open(filename, 'w') as f: f.write(plottingscript) class ValidationWithPlotsSummaryBase(ValidationWithPlots): class SummaryItem(object): def __init__(self, name, values, format=None, latexname=None, latexformat=None): """ name: name of the summary item, goes on top of the column values: value for each alignment (in order of rows) format: python format string (default: {:.3g}, meaning up to 3 significant digits) latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) latexformat: format for latex (default: format) """ if format is None: format = "{:.3g}" if latexname is None: latexname = name if latexformat is None: latexformat = format self.__name = name self.__values = values self.__format = format self.__latexname = latexname self.__latexformat = latexformat def name(self, latex=False): if latex: return self.__latexname else: return self.__name def format(self, value, latex=False): if latex: fmt = self.__latexformat else: fmt = self.__format if re.match(".*[{][^}]*[fg][}].*", fmt): value = float(value) return fmt.format(value) def values(self, latex=False): result = [self.format(v, latex=latex) for v in self.__values] return result def value(self, i, latex): return self.values(latex)[i] @abstractmethod def getsummaryitems(cls, folder): """override with a classmethod that returns a list of SummaryItems based on the plots saved in folder""" __summaryitems = None __lastfolder = None @classmethod def summaryitemsstring(cls, folder=None, latex=False, transpose=True): if folder is None: folder = cls.plotsdirname() if folder.startswith( "/castor/" ): folder = "rfio:%(file)s"%repMap elif folder.startswith( "/store/" ): folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap if cls.__summaryitems is None or cls.__lastfolder != folder: cls.__lastfolder = folder cls.__summaryitems = cls.getsummaryitems(folder) summaryitems = cls.__summaryitems if not summaryitems: raise AllInOneError("No summary items!") size = {len(_.values(latex)) for _ in summaryitems} if len(size) != 1: raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) size = size.pop() if transpose: columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) else: columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] if latex: join = " & " else: join = " " row = join.join("{{:{}}}".format(width) for width in columnwidths) if transpose: rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] else: rows = [] rows.append(row.format(*(_.name for _ in summaryitems))) for i in range(size): rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) if latex: join = " \\\\\n" else: join = "\n" result = join.join(rows) if latex: result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" + result + "\n" + r"\end{tabular}") return result @classmethod def printsummaryitems(cls, *args, **kwargs): print(cls.summaryitemsstring(*args, **kwargs)) @classmethod def writesummaryitems(cls, filename, *args, **kwargs): with open(filename, "w") as f: f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): @classmethod def getsummaryitems(cls, folder): result = [] with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: for line in f: split = line.rstrip("\n").split("\t") kwargs = {} for thing in split[:]: if thing.startswith("format="): kwargs["format"] = thing.replace("format=", "", 1) split.remove(thing) if thing.startswith("latexname="): kwargs["latexname"] = thing.replace("latexname=", "", 1) split.remove(thing) if thing.startswith("latexformat="): kwargs["latexformat"] = thing.replace("latexformat=", "", 1) split.remove(thing) name = split[0] values = split[1:] result.append(cls.SummaryItem(name, values, **kwargs)) return result class ValidationWithComparison(GenericValidation): @classmethod def doComparison(cls, validations): from .plottingOptions import PlottingOptions repmap = PlottingOptions(None, cls).copy() repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) comparison = replaceByMap(cls.comparisontemplate(), repmap) return comparison @classmethod def comparisontemplate(cls): return configTemplates.compareAlignmentsExecution @classmethod def comparealignmentspath(cls): return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." @abstractmethod def comparealignmentsname(cls): """classmethod""" class ValidationForPresentation(ValidationWithPlots): @abstractmethod def presentationsubsections(cls): """classmethod"""
45.268844
157
0.568852
[ "Apache-2.0" ]
4quarks/cmssw
Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py
36,034
Python
# ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # Application for searching PyLith .cfg files. import sys import argparse import pathlib import textwrap import os from pylith.utils.converters import string_to_list from pylith.utils.SimulationMetadata import fromFile class ConfigSearchApp(): """Application for searching PyLith .cfg files. """ def __init__(self): """Constructor. """ self.filters = {} def main(self, **kwargs): """Main entry point. Keyword arguments: searchpath (str), default: "." Search path for .cfg files. display (str), default: "all" List of metadata to display in search results. keywords (str), default: None Comma delimited list of keywords for filtering search results. features (str), default: None Comma delimited list of features for filtering search results. authors (str), default: None Comma delimited list of authors for filtering search results. version (str), default: None PyLith version for filtering search results. """ args = argparse.Namespace( **kwargs) if kwargs else self._parse_command_line() self._set_filters(args) for filename in sorted(pathlib.Path(args.searchpath).glob("**/*.cfg")): metadata = fromFile(filename) if metadata: if not len(metadata.arguments): if args.verbose: print(f"INFO: Skipping file {filename} with only base metadata.") continue filter_fn = self._apply_filters_incompatible if args.incompatible else self._apply_filters if filter_fn(metadata): self._display_metadata(filename, metadata, args.display) elif args.verbose: print(f"MISMATCH: File {filename} did not pass metadata filter.") elif args.verbose: print(f"INFO: File {filename} missing simulation metadata.") def _set_filters(self, options): """Set filters for display from command line option. Args: options (argsparse.Namespace) Command line options. """ if options.keywords: self.filters["keywords"] = string_to_list(options.keywords) if options.features: self.filters["features"] = string_to_list(options.features) if options.authors: self.filters["authors"] = string_to_list(options.authors) if options.version: self.filters["version"] = options.version def _apply_filters(self, metadata): """Apply filters to metadata. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata meets filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return False if not all(keyword in metadata.keywords for keyword in self.filters["keywords"]): return False if "features" in self.filters: if not metadata.features: return False if not all(feature in metadata.features for feature in self.filters["features"]): return False if "authors" in self.filters: if not metadata.authors: return False if not all(author in metadata.authors for author in self.filters["authors"]): return False if "version" in self.filters: if not metadata.pylith_version: return False for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return False return True def _apply_filters_incompatible(self, metadata): """Apply filters to metadata to find incompatible parameter files. Args: metadata (pylith.utils.SimulationMetadata) Simulation metadata. Returns: (bool) True if metadata is incompatible with filter requirements, False otherwise. """ if "keywords" in self.filters: if not metadata.keywords: return True if "features" in self.filters: if not "features" in metadata: return True if "authors" in self.filters: if not "authors" in metadata: return True if "version" in self.filters: if not metadata.pylith_version: return True for verMeta in metadata.pylith_version: if not eval("{ver} {verMeta}".format(ver=self.filters["version"], verMeta=verMeta)): return True return False def _display_metadata(self, filename, metadata, options): """Print metadata to stdout. Args: filename (str) Name of simulation .cfg file. metadata (pylith.utils.SimulationMetadata) Simulation metadata. options (list of str) List of metadata to display. """ INDENT = " "*4 show_all = "all" in options options = string_to_list(options) line0 = f"{filename}" if "version" in options or show_all: if metadata.version: line0 += f" v{metadata.version}" else: line0 += " missing 'version'" if "pylith_version" in options or show_all: if metadata.pylith_version: line0 += "; requires PyLith " + " and ".join(metadata.pylith_version) else: line0 += "; missing 'pylith_version'" lines = [] if "description" in options or show_all: if metadata.description: lines += [metadata.description] else: lines += ["missing 'description'"] if "authors" in options or show_all: if metadata.authors: lines += ["Authors: " + ", ".join(metadata.authors)] else: lines += ["missing 'authors'"] if "keywords" in options or show_all: if metadata.keywords: lines += ["Keywords: " + ", ".join(metadata.keywords)] else: lines += ["missing 'keywords'"] if "features" in options or show_all: if metadata.features: features = textwrap.fill(", ".join(metadata.features), width=120) lines += ["Features:"] + textwrap.indent(features, INDENT).split("\n") else: lines += ["missing 'features'"] if "arguments" in options or show_all: if metadata.arguments: lines += ["pylith " + " ".join(metadata.arguments)] else: lines += ["missing 'arguments'"] print(line0) if len(lines): print(textwrap.indent("\n".join(lines), INDENT)) def _parse_command_line(self): """Parse command line arguments. Returns (argsparse.Namespace) Command line arguments. """ DESCRIPTION = ( "Application for searching PyLith .cfg parameter files." ) parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--path", action="store", dest="searchpath", default=".", help="Search path for .cfg files.") parser.add_argument("--display", action="store", dest="display", default="all", help="List of metadata to display in search results.") parser.add_argument("--verbose", action="store_true", dest="verbose", help="Report missing metadata.") parser.add_argument("--keywords", action="store", dest="keywords", help="Comma delimited list of keywords for filtering search results.") parser.add_argument("--features", action="store", dest="features", help="Comma delimited list of features for filtering search results.") parser.add_argument("--authors", action="store", dest="authors", help="Comma delimited list of authors for filtering search results.") parser.add_argument("--version", action="store", dest="version", help="PyLith version for filtering search results.") parser.add_argument("--incompatible", action="store_true", dest="incompatible", help="Filter search results to show incompatible parameter files.") args = parser.parse_args() return args # End of file
39.260331
113
0.564362
[ "MIT" ]
Shengduo/pylith
pylith/apps/ConfigSearchApp.py
9,501
Python
# Interview Questions """ Given the following list of objects {user, loginTime, logoutTime}. What is the maximum number of concurrent users logged in at the same time? Input: [ {user: A, login: 1, logout: 3}, {user: B, login: 3, logout: 4}, {user: C, login: 1, logout: 2}, {user: D, login: 123123123, logout: 987987987}, {user: E, login: 1, logout: 3} ] Output: 3 """ datas = [ {'user': 'A', 'login': 1, 'logout': 3}, {'user': 'B', 'login': 3, 'logout': 4}, {'user': 'C', 'login': 1, 'logout': 2}, {'user': 'D', 'login': 123123123, 'logout': 987987987}, {'user': 'E', 'login': 1, 'logout': 3} ] from collections import Counter def c(data): v = [[e['login'] for e in data],[e['logout'] for e in data]] t = [Counter(v[0]),Counter(v[1])] tmp = Counter() allt = list(set(v[0]+v[1])) allt.sort() ret = [] cp = 0 for e in allt: cp += t[0][e] cp -= t[1][e] ret.append(cp) return max(ret) max = c(datas) print(max)
18.45614
141
0.520913
[ "MIT" ]
BizShuk/code_concept
interview/booking.com/max_concurrent.py
1,052
Python
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle import numpy as np import paddle.fluid.core as core from op_test import OpTest import paddle.fluid as fluid from paddle.fluid import Program, program_guard class TestRollOp(OpTest): def setUp(self): self.op_type = "roll" self.init_dtype_type() self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)} self.attrs = {'shifts': self.shifts, 'dims': self.dims} self.outputs = { 'Out': np.roll(self.inputs['X'], self.attrs['shifts'], self.attrs['dims']) } def init_dtype_type(self): self.dtype = np.float64 self.x_shape = (100, 4, 5) self.shifts = [101, -1] self.dims = [0, -2] def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X'], 'Out') class TestRollOpCase2(TestRollOp): def init_dtype_type(self): self.dtype = np.float32 self.x_shape = (100, 10, 5) self.shifts = [8, -1] self.dims = [-1, -2] class TestRollAPI(unittest.TestCase): def input_data(self): self.data_x = np.array( [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) def test_roll_op_api(self): self.input_data() # case 1: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 3]) z = paddle.roll(x, shifts=1) exe = fluid.Executor(fluid.CPUPlace()) res, = exe.run(feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False) expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) self.assertTrue(np.allclose(expect_out, np.array(res))) # case 2: with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[-1, 3]) z = paddle.roll(x, shifts=1, dims=0) exe = fluid.Executor(fluid.CPUPlace()) res, = exe.run(feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False) expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) self.assertTrue(np.allclose(expect_out, np.array(res))) def test_dygraph_api(self): self.input_data() # case 1: with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1) np_z = z.numpy() expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) self.assertTrue(np.allclose(expect_out, np_z)) # case 2: with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1, dims=0) np_z = z.numpy() expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) self.assertTrue(np.allclose(expect_out, np_z)) if __name__ == "__main__": unittest.main()
34.300885
78
0.557792
[ "Apache-2.0" ]
WuHaobo/Paddle
python/paddle/fluid/tests/unittests/test_roll_op.py
3,876
Python
import importlib import os from datasets.hdf5 import get_test_loaders from unet3d import utils from unet3d.config import load_config from unet3d.model import get_model logger = utils.get_logger('UNet3DPredictor') def _get_predictor(model, loader, output_file, config): predictor_config = config.get('predictor', {}) class_name = predictor_config.get('name', 'StandardPredictor') m = importlib.import_module('unet3d.predictor') predictor_class = getattr(m, class_name) # model: UNet3D, loader: test_loader, output_file: data.h5, config: config.yaml return predictor_class(model, loader, output_file, config, **predictor_config) def main(): # Load configuration config = load_config() # Create the model model = get_model(config) # Load model state model_path = config['model_path'] logger.info(f'Loading model from {model_path}...') utils.load_checkpoint(model_path, model) logger.info(f"Sending the model to '{config['device']}'") model = model.to(config['device']) logger.info('Loading HDF5 datasets...') test_loader = get_test_loaders(config)['test'] for i, data_pair in enumerate(test_loader): output_file = 'predict_' + str(i) + '.h5' predictor = _get_predictor(model, data_pair, output_file, config) predictor.predict() if __name__ == '__main__': main()
30.869565
84
0.687324
[ "MIT" ]
stonebegin/Promise12-3DUNet
predict.py
1,420
Python
# -*- coding: utf-8 -*- # # django-staticbuilder documentation build configuration file, created by # sphinx-quickstart on Wed Jan 30 22:32:51 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import re, sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'django-staticbuilder' copyright = u'2013, Matthew Tretter' pkgmeta = {} execfile(os.path.join(os.path.dirname(__file__), '..', '..', 'staticbuilder', 'pkgmeta.py'), pkgmeta) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = re.match('\d+\.\d+', pkgmeta['__version__']).group() # The full version, including alpha/beta/rc tags. release = pkgmeta['__version__'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'django-staticbuilderdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'django-staticbuilder.tex', u'django-staticbuilder Documentation', u'Matthew Tretter', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'django-staticbuilder', u'django-staticbuilder Documentation', [u'Matthew Tretter'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'django-staticbuilder', u'django-staticbuilder Documentation', u'Matthew Tretter', 'django-staticbuilder', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
32.708502
82
0.714692
[ "MIT" ]
hzdg/django-staticbuilder
docs/source/conf.py
8,079
Python
# coding: utf-8 """ Purity//FB REST Client Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class UsageUsersApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def list_user_usage(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_user_usage_with_http_info(**kwargs) else: (data) = self.list_user_usage_with_http_info(**kwargs) return data def list_user_usage_with_http_info(self, **kwargs): """ A list of usage user entries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_user_usage_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A list of names. :param str filter: The filter to be used for query. :param int limit: limit, should be >= 0 :param str sort: The way to order the results. :param int start: start :param str token: token :param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned. :param list[str] uids: A comma-separated list of user IDs. If after filtering, there is not at least one resource that matches each of the elements of user IDs, then an error is returned. This cannot be provided together with user_names query parameter. :return: QuotasUserResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'uids'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_user_usage" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'token' in params: query_params.append(('token', params['token'])) if 'file_system_names' in params: query_params.append(('file_system_names', params['file_system_names'])) collection_formats['file_system_names'] = 'csv' if 'uids' in params: query_params.append(('uids', params['uids'])) collection_formats['uids'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.6/usage/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='QuotasUserResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
42.758824
261
0.601458
[ "Apache-2.0" ]
unixtreme/purity_fb_python_client
purity_fb/purity_fb_1dot6/apis/usage_users_api.py
7,269
Python
from django.conf import settings MINIJS_MODE = getattr(settings, "MINIJS_MODE", "development") MINIJS_OUTPUT_DIR = getattr(settings, "MINIJS_OUTPUT_DIR", "minijs") MINIJS_BYPASS = getattr(settings, "MINIJS_BYPASS", False) MINIJS_ALWAYS_MINIFY = getattr(settings, "MINIJS_ALWAYS_MINIFY", False) MINIJS_ALWAYS_COMPILE_COFFEESCRIPT_DURING_BYPASS = getattr(settings, "MINIJS_ALWAYS_COMPILE_COFFEESCRIPT_DURING_BYPASS", False) MINIJS_COMPILE_COFFEESCRIPTS_TOGETHER = getattr(settings, "MINIJS_COMPILE_COFFEESCRIPTS_TOGETHER", True) COFFEESCRIPT_EXECUTABLE = getattr(settings, "COFFEESCRIPT_EXECUTABLE", "coffee")
67.555556
127
0.84375
[ "MIT" ]
tombenner/minijs
settings.py
608
Python
""" Unit tests for the structured metamodel component. """ import unittest import inspect import numpy as np from numpy.testing import assert_almost_equal import openmdao.api as om from openmdao.utils.assert_utils import assert_near_equal, assert_warning, assert_check_partials from openmdao.utils.general_utils import set_pyoptsparse_opt scipy_gte_019 = True try: from scipy.interpolate._bsplines import make_interp_spline except ImportError: scipy_gte_019 = False # check that pyoptsparse is installed # if it is, try to use SNOPT but fall back to SLSQP OPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT') if OPTIMIZER: from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver x = np.array([-0.97727788, -0.15135721, -0.10321885, 0.40015721, 0.4105985, 0.95008842, 0.97873798, 1.76405235, 1.86755799, 2.2408932 ]) y = np.array([ 0.12167502, 0.14404357, 0.44386323, 0.76103773, 1.45427351]) z = np.array([-2.55298982, -1.45436567, -0.85409574, -0.74216502, -0.20515826, 0.04575852, 0.3130677, 0.33367433, 0.6536186, 0.8644362, 1.49407907, 2.26975462]) f = np.array([ [[-0.18718385, 1.53277921, 1.46935877, 0.15494743, 0.37816252, -0.88778575, -1.98079647, -0.34791215, 0.15634897, 1.23029068, 1.20237985, -0.38732682], [-0.30230275, -1.04855297, -1.42001794, -1.70627019, 1.9507754, -0.50965218, -0.4380743, -1.25279536, 0.77749036, -1.61389785, -0.21274028, -0.89546656], [ 0.3869025, -0.51080514, -1.18063218, -0.02818223, 0.42833187, 0.06651722, 0.3024719, -0.63432209, -0.36274117, -0.67246045, -0.35955316, -0.81314628], [-1.7262826, 0.17742614, -0.40178094, -1.63019835, 0.46278226, -0.90729836, 0.0519454, 0.72909056, 0.12898291, 1.13940068, -1.23482582, 0.40234164], [-0.68481009, -0.87079715, -0.57884966, -0.31155253, 0.05616534, -1.16514984, 0.90082649, 0.46566244, -1.53624369, 1.48825219, 1.89588918, 1.17877957]], [[-0.17992484, -1.07075262, 1.05445173, -0.40317695, 1.22244507, 0.20827498, 0.97663904, 0.3563664, 0.70657317, 0.01050002, 1.78587049, 0.12691209], [ 0.40198936, 1.8831507, -1.34775906, -1.270485, 0.96939671, -1.17312341, 1.94362119, -0.41361898, -0.74745481, 1.92294203, 1.48051479, 1.86755896], [ 0.90604466, -0.86122569, 1.91006495, -0.26800337, 0.8024564, 0.94725197, -0.15501009, 0.61407937, 0.92220667, 0.37642553, -1.09940079, 0.29823817], [ 1.3263859, -0.69456786, -0.14963454, -0.43515355, 1.84926373, 0.67229476, 0.40746184, -0.76991607, 0.53924919, -0.67433266, 0.03183056, -0.63584608], [ 0.67643329, 0.57659082, -0.20829876, 0.39600671, -1.09306151, -1.49125759, 0.4393917, 0.1666735, 0.63503144, 2.38314477, 0.94447949, -0.91282223]], [[ 1.11701629, -1.31590741, -0.4615846, -0.06824161, 1.71334272, -0.74475482, -0.82643854, -0.09845252, -0.66347829, 1.12663592, -1.07993151, -1.14746865], [-0.43782004, -0.49803245, 1.92953205, 0.94942081, 0.08755124, -1.22543552, 0.84436298, -1.00021535, -1.5447711, 1.18802979, 0.31694261, 0.92085882], [ 0.31872765, 0.85683061, -0.65102559, -1.03424284, 0.68159452, -0.80340966, -0.68954978, -0.4555325, 0.01747916, -0.35399391, -1.37495129, -0.6436184 ], [-2.22340315, 0.62523145, -1.60205766, -1.10438334, 0.05216508, -0.739563, 1.5430146, -1.29285691, 0.26705087, -0.03928282, -1.1680935, 0.52327666], [-0.17154633, 0.77179055, 0.82350415, 2.16323595, 1.33652795, -0.36918184, -0.23937918, 1.0996596, 0.65526373, 0.64013153, -1.61695604, -0.02432612]], [[-0.73803091, 0.2799246, -0.09815039, 0.91017891, 0.31721822, 0.78632796, -0.4664191, -0.94444626, -0.41004969, -0.01702041, 0.37915174, 2.25930895], [-0.04225715, -0.955945 , -0.34598178, -0.46359597, 0.48148147, -1.54079701, 0.06326199, 0.15650654, 0.23218104, -0.59731607, -0.23792173, -1.42406091], [-0.49331988, -0.54286148, 0.41605005, -1.15618243, 0.7811981, 1.49448454, -2.06998503, 0.42625873, 0.67690804, -0.63743703, -0.39727181, -0.13288058], [-0.29779088, -0.30901297, -1.67600381, 1.15233156, 1.07961859, -0.81336426, -1.46642433, 0.52106488, -0.57578797, 0.14195316, -0.31932842, 0.69153875], [ 0.69474914, -0.72559738, -1.38336396, -1.5829384, 0.61037938, -1.18885926, -0.50681635, -0.59631404, -0.0525673, -1.93627981, 0.1887786, 0.52389102]], [[ 0.08842209, -0.31088617, 0.09740017, 0.39904635, -2.77259276, 1.95591231, 0.39009332, -0.65240858, -0.39095338, 0.49374178, -0.11610394, -2.03068447], [ 2.06449286, -0.11054066, 1.02017271, -0.69204985, 1.53637705, 0.28634369, 0.60884383, -1.04525337, 1.21114529, 0.68981816, 1.30184623, -0.62808756], [-0.48102712, 2.3039167, -1.06001582, -0.1359497, 1.13689136, 0.09772497, 0.58295368, -0.39944903, 0.37005589, -1.30652685, 1.65813068, -0.11816405], [-0.6801782, 0.66638308, -0.46071979, -1.33425847, -1.34671751, 0.69377315, -0.15957344, -0.13370156, 1.07774381, -1.12682581, -0.73067775, -0.38487981], [ 0.09435159, -0.04217145, -0.28688719, -0.0616264, -0.10730528, -0.71960439, -0.81299299, 0.27451636, -0.89091508, -1.15735526, -0.31229225, -0.15766702]], [[ 2.2567235, -0.70470028, 0.94326072, 0.74718833, -1.18894496, 0.77325298, -1.18388064, -2.65917224, 0.60631952, -1.75589058, 0.45093446, -0.6840109 ], [ 1.6595508, 1.0685094, -0.4533858, -0.68783761, -1.2140774, -0.44092263, -0.2803555, -0.36469354, 0.15670386, 0.5785215, 0.34965446, -0.76414392], [-1.43779147, 1.36453185, -0.68944918, -0.6522936, -0.52118931, -1.84306955, -0.477974 , -0.47965581, 0.6203583, 0.69845715, 0.00377089, 0.93184837], [ 0.33996498, -0.01568211, 0.16092817, -0.19065349, -0.39484951, -0.26773354, -1.12801133, 0.28044171, -0.99312361, 0.84163126, -0.24945858, 0.04949498], [ 0.49383678, 0.64331447, -1.57062341, -0.20690368, 0.88017891, -1.69810582, 0.38728048, -2.25556423, -1.02250684, 0.03863055, -1.6567151, -0.98551074]], [[-1.47183501, 1.64813493, 0.16422776, 0.56729028, -0.2226751, -0.35343175, -1.61647419, -0.29183736, -0.76149221, 0.85792392, 1.14110187, 1.46657872], [ 0.85255194, -0.59865394, -1.11589699, 0.76666318, 0.35629282, -1.76853845, 0.35548179, 0.81451982, 0.05892559, -0.18505367, -0.80764849, -1.4465347 ], [ 0.80029795, -0.30911444, -0.23346666, 1.73272119, 0.68450111, 0.370825 , 0.14206181, 1.51999486, 1.71958931, 0.92950511, 0.58222459, -2.09460307], [ 0.12372191, -0.13010695, 0.09395323, 0.94304609, -2.73967717, -0.56931205, 0.26990435, -0.46684555, -1.41690611, 0.86896349, 0.27687191, -0.97110457], [ 0.3148172, 0.82158571, 0.00529265, 0.8005648, 0.07826018, -0.39522898, -1.15942052, -0.08593077, 0.19429294, 0.87583276, -0.11510747, 0.45741561]], [[-0.96461201, -0.78262916, -0.1103893, -1.05462846, 0.82024784, 0.46313033, 0.27909576, 0.33890413, 2.02104356, -0.46886419, -2.20144129, 0.1993002 ], [-0.05060354, -0.51751904, -0.97882986, -0.43918952, 0.18133843, -0.5028167, 2.41245368, -0.96050438, -0.79311736, -2.28862004, 0.25148442, -2.01640663], [-0.53945463, -0.27567053, -0.70972797, 1.73887268, 0.99439439, 1.31913688, -0.88241882, 1.12859406, 0.49600095, 0.77140595, 1.02943883, -0.90876325], [-0.42431762, 0.86259601, -2.65561909, 1.51332808, 0.55313206, -0.04570396, 0.22050766, -1.02993528, -0.34994336, 1.10028434, 1.29802197, 2.69622405], [-0.07392467, -0.65855297, -0.51423397, -1.01804188, -0.07785476, 0.38273243, -0.03424228, 1.09634685, -0.2342158, -0.34745065, -0.58126848, -1.63263453]], [[-1.56776772, -1.17915793, 1.30142807, 0.89526027, 1.37496407, -1.33221165, -1.96862469, -0.66005632, 0.17581895, 0.49869027, 1.04797216, 0.28427967], [ 1.74266878, -0.22260568, -0.91307922, -1.68121822, -0.88897136, 0.24211796, -0.88872026, 0.93674246, 1.41232771, -2.36958691, 0.8640523, -2.23960406], [ 0.40149906, 1.22487056, 0.06485611, -1.27968917, -0.5854312, -0.26164545, -0.18224478, -0.20289684, -0.10988278, 0.21348005, -1.20857365, -0.24201983], [ 1.51826117, -0.38464542, -0.44383609, 1.0781973, -2.55918467, 1.1813786, -0.63190376, 0.16392857, 0.09632136, 0.94246812, -0.26759475, -0.67802578], [ 1.29784579, -2.36417382, 0.02033418, -1.34792542, -0.76157339, 2.01125668, -0.04459543, 0.1950697, -1.78156286, -0.72904466, 0.1965574, 0.35475769]], [[ 0.61688655, 0.0086279, 0.52700421, 0.45378191, -1.82974041, 0.03700572, 0.76790241, 0.58987982, -0.36385881, -0.80562651, -1.11831192, -0.13105401], [ 1.13307988, -1.9518041, -0.65989173, -1.13980246, 0.78495752, -0.55430963, -0.47063766, -0.21694957, 0.44539325, -0.392389, -3.04614305, 0.54331189], [ 0.43904296, -0.21954103, -1.08403662, 0.35178011, 0.37923553, -0.47003288, -0.21673147, -0.9301565, -0.17858909, -1.55042935, 0.41731882, -0.94436849], [ 0.23810315, -1.40596292, -0.59005765, -0.11048941, -1.66069981, 0.11514787, -0.37914756, -1.7423562, -1.30324275, 0.60512008, 0.89555599, -0.13190864], [ 0.40476181, 0.22384356, 0.32962298, 1.28598401, -1.5069984, 0.67646073, -0.38200896, -0.22425893, -0.30224973, -0.37514712, -1.22619619, 0.1833392 ] ]]) g = np.array([ [[ 1.67094303e+00, -5.61330204e-02, -1.38504274e-03, -6.87299037e-01, -1.17474546e-01, 4.66166426e-01, -3.70242441e-01, -4.53804041e-01, 4.03264540e-01, -9.18004770e-01, 2.52496627e-01, 8.20321797e-01], [ 1.35994854e+00, -9.03820073e-02, 1.36759724e+00, 1.03440989e+00, -9.96212640e-01, -1.21793851e+00, -3.04963638e-01, 1.02893549e+00, -7.22870076e-02, -6.00657558e-01, 1.55224318e+00, 2.86904488e-01], [-2.32059428e+00, 3.17160626e-01, 5.20040615e-01, 2.25608654e-01, 4.49712100e-01, -6.72756089e-02, -1.31839587e+00, -3.70704003e-01, -9.45615796e-01, -9.32740911e-01, -1.26306835e+00, 4.52489093e-01], [ 9.78961454e-02, -4.48165363e-01, -6.49337928e-01, -2.34231050e-02, 1.07919473e+00, -2.00421572e+00, 3.76876521e-01, -5.45711974e-01, -1.88458584e+00, -1.94570308e+00, -9.12783494e-01, 2.19509556e-01], [ 3.93062934e-01, -9.38981573e-01, 1.01702099e+00, 1.42298350e+00, 3.96086585e-01, -5.91402668e-01, 1.12441918e+00, 7.55395696e-01, 8.67407411e-01, -6.56463675e-01, -2.83455451e+00, 2.11679102e+00]], [[-1.61087840e+00, -3.57680719e-02, 2.38074535e+00, 3.30576756e-01, 9.49246474e-01, -1.50239657e+00, -1.77766695e+00, -5.32702792e-01, 1.09074973e+00, -3.46249448e-01, -7.94636321e-01, 1.97967290e-01], [ 1.08193522e+00, -1.44494020e+00, -1.21054299e+00, -7.88669255e-01, 1.09463837e+00, 2.34821526e-01, 2.13215341e+00, 9.36445726e-01, -3.50951769e-02, 1.26507784e+00, 2.11497013e-01, -7.04921353e-01], [ 6.79974844e-01, -6.96326654e-01, -2.90397101e-01, 1.32778270e+00, -1.01281486e-01, -8.03141387e-01, -4.64337691e-01, 1.02179059e+00, -5.52540673e-01, -3.86870847e-01, -5.10292740e-01, 1.83925494e-01], [-3.85489760e-01, -1.60183605e+00, -8.87180942e-01, -9.32789042e-01, 1.24331938e+00, 8.12674042e-01, 5.87259379e-01, -5.05358317e-01, -8.15791542e-01, -5.07517602e-01, -1.05188010e+00, 2.49720039e+00], [-2.24532165e+00, 5.64008535e-01, -1.28455230e+00, -1.04343491e-01, -9.88001942e-01, -1.17762896e+00, -1.14019630e+00, 1.75498615e+00, -1.32988422e-01, -7.65702194e-01, 5.55786964e-01, 1.03493146e-02]], [[ 7.20033759e-01, -1.82425666e+00, 3.03603904e-01, 7.72694837e-01, -1.66159829e+00, 4.48195284e-01, 1.69618157e+00, -1.48577034e-02, 8.21405937e-01, 6.70570450e-01, -7.07505698e-01, 3.97667346e-02], [-1.56699471e+00, -4.51303037e-01, 2.65687975e-01, 7.23100494e-01, 2.46121252e-02, 7.19983730e-01, -1.10290621e+00, -1.01697275e-01, 1.92793845e-02, 1.84959125e+00, -2.14166656e-01, -4.99016638e-01], [ 2.13512238e-02, -9.19113445e-01, 1.92753849e-01, -3.65055217e-01, -1.79132755e+00, -5.85865511e-02, -3.17543094e-01, -1.63242330e+00, -6.71341546e-02, 1.48935596e+00, 5.21303748e-01, 6.11927193e-01], [-1.34149673e+00, 4.76898369e-01, 1.48449581e-01, 5.29045238e-01, 4.22628622e-01, -1.35978073e+00, -4.14008116e-02, -7.57870860e-01, -5.00840943e-02, -8.97400927e-01, 1.31247037e+00, -8.58972388e-01], [-8.98942156e-01, 7.45864065e-02, -1.07709907e+00, -4.24663302e-01, -8.29964598e-01, 1.41117206e+00, 7.85803827e-01, -5.74695185e-02, -3.91217052e-01, 9.40917615e-01, 4.05204080e-01, 4.98052405e-01]], [[-2.61922373e-02, -1.68823003e+00, -1.12465983e-01, -5.32489919e-01, 6.45055273e-01, 1.01184243e+00, -6.57951045e-01, 4.68385234e-01, 1.73587900e+00, -6.67712721e-01, 1.68192174e+00, -8.52585847e-01], [ 2.29597556e-02, -1.11456118e-02, 1.14988999e-02, -8.37678042e-01, -5.91183104e-01, -6.67720286e-01, 3.26962595e-01, 3.30035115e-01, 2.22594433e+00, 1.37098901e+00, -5.09843242e-01, 3.24869616e-01], [ 9.97117981e-01, 3.06018243e-02, -6.96415784e-02, 5.15749428e-02, 8.67276629e-01, -8.48320523e-01, -3.25669469e-01, 4.70433145e-01, 3.11447072e-01, 2.39582760e-01, -3.69801166e-01, 9.72535789e-01], [ 2.13386825e+00, 4.06415494e-01, -1.93176702e-01, 7.55740289e-01, -5.39132637e-01, -7.49690345e-01, 3.28087476e-02, -2.58279663e+00, -1.15395036e+00, -3.47961856e-01, -1.35338886e+00, -1.03264310e+00], [-4.36748337e-01, -1.64296529e+00, -4.06071796e-01, -5.35270165e-01, 2.54052084e-02, 1.15418403e+00, 1.72504416e-01, 2.10620213e-02, 9.94544570e-02, 2.27392775e-01, -1.01673865e+00, -1.14775325e-01]], [[ 3.08751242e-01, -1.37075998e+00, 8.65652923e-01, 1.08137603e+00, -6.31375988e-01, -2.41337791e-01, -8.78190343e-01, 6.99380484e-01, -1.06122229e+00, -2.22477010e-01, -8.58919908e-01, 5.09542770e-02], [-1.79422927e+00, 1.32646164e+00, -9.64606424e-01, 5.98946831e-02, -2.12523045e-01, -7.62114512e-01, -8.87780137e-01, 9.36398544e-01, -5.25640593e-01, 2.71170185e-01, -8.01496885e-01, -6.47181432e-01], [ 4.72247150e-01, 9.30408496e-01, -1.75316402e-01, -1.42191987e+00, 1.99795608e+00, -8.56549308e-01, -1.54158740e+00, 2.59442459e+00, -4.04032294e-01, -1.46173269e+00, -6.83439767e-01, 3.67544896e-01], [ 1.90311558e-01, -8.51729197e-01, 1.82272360e+00, -5.21579678e-01, -1.18468659e+00, 9.60693398e-01, 1.32906285e+00, -8.17493098e-01, -1.40134729e+00, 1.03043827e+00, -2.04732361e+00, -1.22662166e+00], [ 9.67446150e-01, -5.53525480e-02, -2.63937349e-01, 3.52816606e-01, -1.52774424e-01, -1.29868672e+00, 1.27607535e+00, 1.32501405e+00, 2.05332564e-01, 4.51340154e-02, 2.33962481e+00, -2.76432845e-01]], [[-2.59576982e-01, 3.64481249e-01, 1.47132196e+00, 1.59277075e+00, -2.58572632e-01, 3.08331246e-01, -1.37808347e+00, -3.11976108e-01, -8.40290395e-01, -1.00683175e+00, 1.68157672e+00, -7.92286662e-01], [-5.31605908e-01, 3.65848788e-01, 1.29782527e+00, 4.81115126e-01, 2.75935511e+00, -7.46679783e-02, 2.58716440e-01, 2.75600674e-01, 1.43504939e+00, 5.07238951e-01, -1.16229700e-01, -9.47488595e-01], [ 2.44443456e-01, 1.40134483e+00, -4.10381794e-01, 5.28943618e-01, 2.46147789e-01, 8.63519658e-01, -8.04753741e-01, 2.34664703e+00, -1.27916111e+00, -3.65551090e-01, 9.38092541e-01, 2.96733172e-01], [ 8.29986159e-01, -4.96102334e-01, -7.48049827e-02, 1.22319836e-02, 1.56925961e+00, 6.90429024e-01, 7.96672108e-01, -6.57926093e-01, 9.68882639e-01, 2.25581664e-01, 1.38914532e+00, 2.01406015e+00], [-3.06765776e-01, -4.06303130e-01, -8.64044991e-01, -1.43579512e-01, -3.82025449e-01, 3.59504400e-01, -1.44566817e-01, -3.61599281e-01, 1.06458514e+00, -9.37880231e-01, 4.33107953e-01, -4.05941727e-01]], [[ 7.24368505e-01, 1.38526155e+00, -3.03098253e-01, 4.41032907e-01, 1.78792866e-01, -7.99422400e-01, 2.40787510e-01, 2.89120505e-01, 4.12870820e-01, -1.98398897e-01, 9.41923003e-02, -1.14761094e+00], [-3.58114075e-01, 5.55962680e-01, 8.92473887e-01, -4.22314824e-01, 1.04714029e-01, 2.28053325e-01, 2.01479947e-01, 5.40773585e-01, -1.81807763e+00, -4.93240701e-02, 2.39033601e-01, -1.00033035e+00], [ 1.67398571e+00, 1.61559267e-01, 1.56340475e+00, -7.90523022e-01, -9.07300122e-01, 2.24252221e-01, -1.67868836e+00, 2.14965591e-01, 9.72192320e-02, 1.01566528e+00, 7.01041341e-01, -4.17477350e-01], [-1.09749665e+00, 1.71230522e+00, -7.92115021e-01, -1.04552456e+00, -1.08485606e+00, 1.11730532e+00, -5.18900204e-01, -7.53704466e-01, 1.37689826e-01, -2.06944711e-01, -6.78095461e-01, 7.53991467e-01], [ 1.06531549e+00, 9.85317509e-01, 7.66919670e-01, 4.02625531e-01, -1.77588800e+00, 1.66925081e+00, 3.01989210e-01, 6.08156428e-01, 1.11496232e+00, 1.43335250e+00, 4.18398011e-01, 4.35546159e-01]], [[-5.99224277e-01, 3.30897511e-02, -8.54161261e-01, -7.19940532e-01, -8.93574402e-01, -1.56023891e-01, 1.04909319e+00, 3.17097477e+00, 1.89499638e-01, -1.34841309e+00, 1.26498333e+00, -3.00783876e-01], [-6.60608594e-01, 2.09849478e-01, -1.24062460e+00, 2.22463164e-01, -8.83755232e-02, 9.83779068e-02, 3.81416254e-01, 6.74922572e-02, 1.63380841e-02, 2.84314519e-01, 4.15400626e-01, -1.03148246e+00], [-1.42999126e+00, -6.16380522e-02, -1.43273549e+00, 8.75314709e-02, 9.38746876e-01, 6.07111672e-01, -1.04817041e+00, -8.60262452e-01, 3.28301295e-01, -4.01297805e-01, -3.16655295e-01, 5.96906481e-01], [-9.87286693e-01, -4.01234710e-01, -8.00082476e-01, -1.04312950e+00, -8.57078189e-01, 6.77462169e-01, 5.18203895e-02, -8.79160629e-01, -2.31101608e-01, -1.63880731e+00, -7.33312808e-01, 2.14957453e+00], [-9.02438497e-02, 7.31658927e-01, -6.54883751e-02, 3.48169235e-01, 6.63258090e-01, -1.10461660e+00, -3.09362573e-02, 1.57886519e+00, -7.95500550e-01, -5.66439854e-01, -3.07691277e-01, 2.69024073e-01]], [[ 5.24917864e-01, 1.26741165e+00, 4.99498233e-01, -6.20531258e-02, 1.25916713e+00, 7.04111022e-01, -1.49567952e+00, 2.52636824e+00, 1.76992139e+00, -1.68214223e-01, 3.77910102e-01, 1.32435875e+00], [-1.72200793e-01, 7.30351790e-01, 1.10457847e+00, -1.01482591e+00, -6.02331854e-01, 9.21408398e-01, 4.60814477e-01, 9.23796560e-01, -1.32568015e-01, -2.89005211e-01, -1.99863948e+00, -1.14600043e+00], [ 4.70660947e-02, 8.24557220e-01, 5.31178367e-01, -1.28241974e-01, -2.71771566e-01, 2.17179633e-01, 7.82111811e-02, 1.40454551e+00, 1.46440770e-01, -1.48124596e+00, -1.27255814e+00, 1.51875934e+00], [-1.17116046e+00, 7.64497453e-01, -2.68372735e-01, -1.69758294e-01, -1.34132783e-01, 1.22138496e+00, -1.92841829e-01, -3.33192828e-02, -1.53080350e+00, 2.06690512e-01, 5.31042507e-01, 2.39145581e-01], [ 1.39789626e+00, 5.51713548e-02, 2.98977456e-01, 1.64850401e+00, -1.55001419e+00, -4.55825348e-01, 1.42615875e+00, 9.36129148e-01, 6.78380099e-01, 8.32650739e-01, 3.27066209e-01, 1.63159743e+00]], [[ 3.77759170e-01, 2.39867106e-01, 1.58958674e-01, 1.92863956e-01, -1.15701728e+00, 7.70673054e-01, -1.30439734e-01, 1.82191510e+00, -7.56504706e-02, 4.20918284e-01, 2.46602186e-01, -6.25557035e-01], [ 9.92136829e-01, 1.90506364e+00, -1.47772197e-02, -3.00478786e-01, -3.55028731e-01, -1.89236189e+00, -1.77813144e-01, 2.50998116e-01, 1.05475793e+00, 9.60047741e-01, -4.16499082e-01, -2.76822995e-01], [ 1.12390531e+00, -1.73463897e-01, -5.10029540e-01, 1.39251845e+00, 1.03758567e+00, 1.87917918e-02, -5.93777448e-01, -2.01188032e+00, 5.89703606e-01, -8.96369723e-01, -1.96273201e+00, 1.58482053e+00], [ 6.47967791e-01, -1.13900819e+00, -1.21440138e+00, 8.70961782e-01, -8.77970617e-01, 1.29614987e+00, 6.16459313e-01, 5.36596521e-01, 4.04695456e-01, 1.91450872e-01, 8.80511199e-01, -4.54080363e-01], [ 8.59519734e-02, 7.51946588e-01, 5.62989719e-01, -1.19498681e+00, -5.00409667e-01, 2.52803505e-01, -4.08014709e-01, 1.77465856e+00, -3.93153195e-01, -1.62218448e-01, 7.69430178e-01, 3.30532743e-01]] ]) class SampleMap(object): param_data = [] np.random.seed(0) param_data.append({'name': 'x', 'units': None, 'default': 0, 'values': x}) param_data.append({'name': 'y', 'units': None, 'default': 0, 'values': y}) param_data.append({'name': 'z', 'units': None, 'default': 0, 'values': z}) output_data = [] output_data.append({'name': 'f', 'units': None, 'default': 0, 'values': f}) output_data.append({'name': 'g', 'units': None, 'default': 0, 'values': g}) @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") class TestMetaModelStructuredScipy(unittest.TestCase): """ Tests the regular grid map component. specifically the analytic derivatives vs. finite difference estimates. """ def setUp(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, z = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', x['default'], units=x['units']) ivc.add_output('y', y['default'], units=y['units']) ivc.add_output('z', z['default'], units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=True) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('comp', comp, promotes=["*"]) self.prob = om.Problem(model) self.prob.setup() self.prob['x'] = 1.0 self.prob['y'] = 0.75 self.prob['z'] = -1.7 def test_deriv1(self): # run at default pt self.run_and_check_derivs(self.prob) # test output values f, g = self.prob['comp.f'], self.prob['comp.g'] tol = 1e-6 assert_near_equal(f, -0.05624571, tol) assert_near_equal(g, 1.02068754, tol) def test_deriv1_swap(self): # Bugfix test that we can add outputs before inputs. model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, z = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', x['default'], units=x['units']) ivc.add_output('y', y['default'], units=y['units']) ivc.add_output('z', z['default'], units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=True) for out in outs: comp.add_output(out['name'], out['default'], out['values']) for param in params: comp.add_input(param['name'], param['default'], param['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob['x'] = 1.0 prob['y'] = 0.75 prob['z'] = -1.7 # run at default pt self.run_and_check_derivs(prob) def test_deriv2(self): self.prob['x'] = 10.0 self.prob['y'] = 0.81 self.prob['z'] = 1.1 self.run_and_check_derivs(self.prob) def test_deriv3(self): self.prob['x'] = 90.0 self.prob['y'] = 1.2 self.prob['z'] = 2.1 self.run_and_check_derivs(self.prob) def test_deriv4(self): # Tests extrapolation. self.prob['x'] = 65.0 self.prob['y'] = 0.951 self.prob['z'] = 2.5 self.run_and_check_derivs(self.prob) def test_raise_out_of_bounds_error(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, z = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', x['default'], units=x['units']) ivc.add_output('y', y['default'], units=y['units']) ivc.add_output('z', z['default'], units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) # Need to make sure extrapolate is False for bounds to be checked comp = om.MetaModelStructuredComp(method='scipy_slinear', extrapolate=False) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('comp', comp, promotes=["*"]) self.prob = om.Problem(model) self.prob.setup() self.prob['x'] = 1.0 self.prob['y'] = 0.75 self.prob['z'] = 9.0 # intentionally set to be out of bounds # The interpolating output name is given as a regexp because the exception could # happen with f or g first. The order those are evaluated comes from the keys of # dict so no guarantee on the order except for Python 3.6 ! msg = "'comp' <class MetaModelStructuredComp>: Error interpolating output '[f|g]' because input 'comp.z' was " \ "out of bounds \('.*', '.*'\) with value '9.0'" with self.assertRaisesRegex(om.AnalysisError, msg): self.run_and_check_derivs(self.prob) def test_training_gradient(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data outs = mapdata.output_data ivc.add_output('x', np.array([-0.3, 0.7, 1.2])) ivc.add_output('y', np.array([0.14, 0.313, 1.41])) ivc.add_output('z', np.array([-2.11, -1.2, 2.01])) ivc.add_output('f_train', outs[0]['values']) ivc.add_output('g_train', outs[1]['values']) comp = om.MetaModelStructuredComp(training_data_gradients=True, method='scipy_cubic', vec_size=3) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('ivc', ivc, promotes=["*"]) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob.run_model() val0 = np.array([ 50.26787317, 49.76106232, 19.66117913]) val1 = np.array([-32.62094041, -31.67449135, -27.46959668]) tol = 1e-5 assert_near_equal(prob['f'], val0, tol) assert_near_equal(prob['g'], val1, tol) self.run_and_check_derivs(prob) def test_training_gradient_setup_called_twice(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data outs = mapdata.output_data ivc.add_output('x', np.array([-0.3, 0.7, 1.2])) ivc.add_output('y', np.array([0.14, 0.313, 1.41])) ivc.add_output('z', np.array([-2.11, -1.2, 2.01])) ivc.add_output('f_train', outs[0]['values']) ivc.add_output('g_train', outs[1]['values']) comp = om.MetaModelStructuredComp(training_data_gradients=True, method='scipy_cubic', vec_size=3) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('ivc', ivc, promotes=["*"]) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob.run_model() val0 = np.array([ 50.26787317, 49.76106232, 19.66117913]) val1 = np.array([-32.62094041, -31.67449135, -27.46959668]) tol = 1e-5 assert_near_equal(prob['f'], val0, tol) assert_near_equal(prob['g'], val1, tol) self.run_and_check_derivs(prob) # Setup and run again prob.setup() prob.run_model() val0 = np.array([ 50.26787317, 49.76106232, 19.66117913]) val1 = np.array([-32.62094041, -31.67449135, -27.46959668]) tol = 1e-5 assert_near_equal(prob['f'], val0, tol) assert_near_equal(prob['g'], val1, tol) self.run_and_check_derivs(prob) def run_and_check_derivs(self, prob, tol=1e-5, verbose=False): """Runs check_partials and compares to analytic derivatives.""" prob.run_model() derivs = prob.check_partials(out_stream=None) for i in derivs['comp'].keys(): if verbose: print("Checking derivative pair:", i) if derivs['comp'][i]['J_fwd'].sum() != 0.0: rel_err = derivs['comp'][i]['rel error'][0] self.assertLessEqual(rel_err, tol) def test_error_msg_vectorized(self): # Tests bug in error message where it doesn't give the correct node value. x_bp = np.array([0., 1.]) y_data = np.array([0., 4.]) nn = 5 class MMComp(om.MetaModelStructuredComp): def setup(self): nn = self.options['vec_size'] self.add_input(name='x', val=np.ones(nn), units=None, training_data=x_bp) self.add_output(name='y', val=np.zeros(nn), units=None, training_data=y_data) p = om.Problem() ivc = om.IndepVarComp() ivc.add_output('x', val=np.linspace(.5, 1.1, nn)) p.model.add_subsystem('ivc', ivc, promotes=['x']) p.model.add_subsystem('MM', MMComp(vec_size=nn), promotes=['x', 'y']) p.setup() with self.assertRaises(om.AnalysisError) as cm: p.run_model() msg = ("'MM' <class MMComp>: Error interpolating output 'y' because input 'MM.x' was out of bounds ('0.0', '1.0') with value '1.1'") self.assertEqual(str(cm.exception), msg) class TestMetaModelStructuredPython(unittest.TestCase): """ Tests the regular grid map component. specifically the analytic derivatives vs. finite difference estimates. """ def setUp(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, z = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', x['default'], units=x['units']) ivc.add_output('y', y['default'], units=y['units']) ivc.add_output('z', z['default'], units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('comp', comp, promotes=["*"]) self.prob = om.Problem(model) self.prob.setup() self.prob['x'] = 1.0 self.prob['y'] = 0.75 self.prob['z'] = -1.7 def run_and_check_derivs(self, prob, tol=1e-5, verbose=False): """Runs check_partials and compares to analytic derivatives.""" prob.run_model() derivs = prob.check_partials(method='cs', out_stream=None) for i in derivs['comp'].keys(): if verbose: print("Checking derivative pair:", i) if derivs['comp'][i]['J_fwd'].sum() != 0.0: rel_err = derivs['comp'][i]['rel error'][0] self.assertLessEqual(rel_err, tol) def test_deriv1(self): # run at default pt self.run_and_check_derivs(self.prob) # test output values f, g = self.prob['comp.f'], self.prob['comp.g'] tol = 1e-6 assert_near_equal(f, -0.05624571, tol) assert_near_equal(g, 1.02068754, tol) def test_deriv1_swap(self): # Bugfix test that we can add outputs before inputs. model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, z = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', x['default'], units=x['units']) ivc.add_output('y', y['default'], units=y['units']) ivc.add_output('z', z['default'], units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True) for out in outs: comp.add_output(out['name'], out['default'], out['values']) for param in params: comp.add_input(param['name'], param['default'], param['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob['x'] = 1.0 prob['y'] = 0.75 prob['z'] = -1.7 # run at default pt self.run_and_check_derivs(prob) def test_deriv2(self): self.prob['x'] = 10.0 self.prob['y'] = 0.81 self.prob['z'] = 1.1 self.run_and_check_derivs(self.prob) def test_deriv3(self): self.prob['x'] = 90.0 self.prob['y'] = 1.2 self.prob['z'] = 2.1 self.run_and_check_derivs(self.prob) def test_deriv4(self): # Tests extrapolation. self.prob['x'] = 65.0 self.prob['y'] = 0.951 self.prob['z'] = 2.5 self.run_and_check_derivs(self.prob) def test_vectorized_linear(self): prob = om.Problem() model = prob.model ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, _ = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', np.array([x['default'], x['default'], x['default']]), units=x['units']) ivc.add_output('y', np.array([y['default'], y['default'], y['default']]), units=x['units']) ivc.add_output('z', np.array([z['default'], z['default'], z['default']]), units=x['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True, vec_size=3) for param in params: comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]), param['values']) for out in outs: comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]), out['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob.setup(force_alloc_complex=True) prob['x'] = np.array([1.0, 10.0, 90.0]) prob['y'] = np.array([0.75, 0.81, 1.2]) prob['z'] = np.array([-1.7, 1.1, 2.1]) prob.run_model() partials = prob.check_partials(method='cs', out_stream=None) assert_check_partials(partials, rtol=1e-10) def test_vectorized_lagrange2(self): prob = om.Problem() model = prob.model ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, _ = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', np.array([x['default'], x['default'], x['default']]), units=x['units']) ivc.add_output('y', np.array([y['default'], y['default'], y['default']]), units=x['units']) ivc.add_output('z', np.array([z['default'], z['default'], z['default']]), units=x['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='lagrange2', extrapolate=True, vec_size=3) for param in params: comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]), param['values']) for out in outs: comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]), out['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob.setup(force_alloc_complex=True) prob['x'] = np.array([1.0, 10.0, 90.0]) prob['y'] = np.array([0.75, 0.81, 1.2]) prob['z'] = np.array([-1.7, 1.1, 2.1]) prob.run_model() partials = prob.check_partials(method='cs', out_stream=None) # Derivs are large, so ignore atol. assert_check_partials(partials, atol=1e10, rtol=1e-10) def test_vectorized_lagrange3(self): prob = om.Problem() model = prob.model mapdata = SampleMap() params = mapdata.param_data x, y, _ = params outs = mapdata.output_data comp = om.MetaModelStructuredComp(method='lagrange3', extrapolate=True, vec_size=3) for param in params: comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]), param['values'], units=param['units']) for out in outs: comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]), out['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob.setup(force_alloc_complex=True) prob.set_val('x', np.array([1.0, 10.0, 90.0])) prob.set_val('y', np.array([0.75, 0.81, 1.2])) prob.set_val('z', np.array([-1.7, 1.1, 2.1])) prob.run_model() partials = prob.check_partials(method='cs', out_stream=None) # Derivs are large, so ignore atol. assert_check_partials(partials, atol=1e10, rtol=1e-10) def test_vectorized_akima(self): prob = om.Problem() model = prob.model ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, _ = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', np.array([x['default'], x['default'], x['default']]), units=x['units']) ivc.add_output('y', np.array([y['default'], y['default'], y['default']]), units=y['units']) ivc.add_output('z', np.array([z['default'], z['default'], z['default']]), units=z['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='akima', extrapolate=True, vec_size=3) for param in params: comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]), param['values']) for out in outs: comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]), out['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob.setup(force_alloc_complex=True) prob['x'] = np.array([1.0, 10.0, 90.0]) prob['y'] = np.array([0.75, 0.81, 1.2]) prob['z'] = np.array([-1.7, 1.1, 2.1]) prob.run_model() partials = prob.check_partials(method='cs', out_stream=None) # Derivs are large, so ignore atol. assert_check_partials(partials, atol=1e10, rtol=1e-10) def test_vectorized_cubic(self): prob = om.Problem() model = prob.model ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data x, y, _ = params outs = mapdata.output_data z = outs[0] ivc.add_output('x', np.array([x['default'], x['default'], x['default']]), units=x['units']) ivc.add_output('y', np.array([y['default'], y['default'], y['default']]), units=x['units']) ivc.add_output('z', np.array([z['default'], z['default'], z['default']]), units=x['units']) model.add_subsystem('des_vars', ivc, promotes=["*"]) comp = om.MetaModelStructuredComp(method='cubic', extrapolate=True, vec_size=3) for param in params: comp.add_input(param['name'], np.array([param['default'], param['default'], param['default']]), param['values']) for out in outs: comp.add_output(out['name'], np.array([out['default'], out['default'], out['default']]), out['values']) model.add_subsystem('comp', comp, promotes=["*"]) prob.setup(force_alloc_complex=True) prob['x'] = np.array([1.0, 10.0, 90.0]) prob['y'] = np.array([0.75, 0.81, 1.2]) prob['z'] = np.array([-1.7, 1.1, 2.1]) prob.run_model() partials = prob.check_partials(method='cs', out_stream=None) # Derivs are large, so ignore atol. assert_check_partials(partials, atol=1e10, rtol=1e-10) def test_training_gradient_lagrange3(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data outs = mapdata.output_data ivc.add_output('x', np.array([-0.3, 0.7, 1.2])) ivc.add_output('y', np.array([0.14, 0.313, 1.41])) ivc.add_output('z', np.array([-2.11, -1.2, 2.01])) ivc.add_output('f_train', outs[0]['values']) ivc.add_output('g_train', outs[1]['values']) comp = om.MetaModelStructuredComp(training_data_gradients=True, method='lagrange3', vec_size=3) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('ivc', ivc, promotes=["*"]) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob.run_model() self.run_and_check_derivs(prob) def test_training_gradient_akima(self): model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data outs = mapdata.output_data ivc.add_output('x', np.array([-0.3, 0.7, 1.2])) ivc.add_output('y', np.array([0.14, 0.313, 1.41])) ivc.add_output('z', np.array([-2.11, -1.2, 2.01])) ivc.add_output('f_train', outs[0]['values']) ivc.add_output('g_train', outs[1]['values']) comp = om.MetaModelStructuredComp(training_data_gradients=True, method='akima', vec_size=3) for param in params: comp.add_input(param['name'], param['default'], param['values']) for out in outs: comp.add_output(out['name'], out['default'], out['values']) model.add_subsystem('ivc', ivc, promotes=["*"]) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup(force_alloc_complex=True) prob.run_model() self.run_and_check_derivs(prob) def test_training_gradient_akima_basic(self): # Mimics usage as an order-reducing interpolating polynomial. model = om.Group() ivc = om.IndepVarComp() mapdata = SampleMap() params = mapdata.param_data outs = mapdata.output_data ivc.add_output('x', np.array([.33])) ivc.add_output('f_train', np.array([.3, .7, .5, .6, .3, .4, .2])) comp = om.MetaModelStructuredComp(training_data_gradients=True, method='akima', vec_size=1) comp.add_input('x', 0.0, np.array([.1, .2, .3, .4, .5, .6, .7])) comp.add_output('f', 0.0, np.array([.3, .7, .5, .6, .3, .4, .2])) model.add_subsystem('ivc', ivc, promotes=["*"]) model.add_subsystem('comp', comp, promotes=["*"]) prob = om.Problem(model) prob.setup(force_alloc_complex=True) prob.run_model() self.run_and_check_derivs(prob) @unittest.skipIf(OPT is None or OPTIMIZER is None, "only run if pyoptsparse is installed.") def test_analysis_error_warning_msg(self): x_tr = np.linspace(0, 2*np.pi, 100) y_tr = np.sin(x_tr) p = om.Problem(model=om.Group()) p.driver = om.pyOptSparseDriver(optimizer=OPTIMIZER) mm = om.MetaModelStructuredComp(extrapolate=False) mm.add_input('x', val=1.0, training_data=x_tr) mm.add_output('y', val=1.0, training_data=y_tr) p.model.add_subsystem('interp', mm, promotes_inputs=['x'], promotes_outputs=['y']) p.model.add_objective('y', scaler=-1) p.model.add_design_var('x', lower=6, upper=10) p.set_solver_print(level=0) p.setup() p.set_val('x', 0.75) msg = "Analysis Error: 'interp' <class MetaModelStructuredComp> " \ "Line 203 of file {}".format(inspect.getsourcefile(om.MetaModelStructuredComp)) with assert_warning(UserWarning, msg): p.run_driver() @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") class TestMetaModelStructuredCompFeature(unittest.TestCase): @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") def test_xor(self): import numpy as np import openmdao.api as om # Create regular grid interpolator instance xor_interp = om.MetaModelStructuredComp(method='scipy_slinear') # set up inputs and outputs xor_interp.add_input('x', 0.0, training_data=np.array([0.0, 1.0]), units=None) xor_interp.add_input('y', 1.0, training_data=np.array([0.0, 1.0]), units=None) xor_interp.add_output('xor', 1.0, training_data=np.array([[0.0, 1.0], [1.0, 0.0]]), units=None) # Set up the OpenMDAO model model = om.Group() model.add_subsystem('comp', xor_interp, promotes=["*"]) prob = om.Problem(model) prob.setup() prob.set_val('x', 0) # Now test out a 'fuzzy' XOR prob.set_val('x', 0.9) prob.set_val('y', 0.001242) prob.run_model() computed = prob.get_val('xor') actual = 0.8990064 assert_almost_equal(computed, actual) # we can verify all gradients by checking against finite-difference prob.check_partials(compact_print=True) @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") def test_shape(self): import numpy as np import openmdao.api as om # create input param training data, of sizes 25, 5, and 10 points resp. p1 = np.linspace(0, 100, 25) p2 = np.linspace(-10, 10, 5) p3 = np.linspace(0, 1, 10) # can use meshgrid to create a 3D array of test data P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij') f = np.sqrt(P1) + P2 * P3 # verify the shape matches the order and size of the input params print(f.shape) # Create regular grid interpolator instance interp = om.MetaModelStructuredComp(method='scipy_cubic') interp.add_input('p1', 0.5, training_data=p1) interp.add_input('p2', 0.0, training_data=p2) interp.add_input('p3', 3.14, training_data=p3) interp.add_output('f', 0.0, training_data=f) # Set up the OpenMDAO model model = om.Group() model.add_subsystem('comp', interp, promotes=["*"]) prob = om.Problem(model) prob.setup() # set inputs prob.set_val('p1', 55.12) prob.set_val('p2', -2.14) prob.set_val('p3', 0.323) prob.run_model() computed = prob.get_val('f') actual = 6.73306472 assert_almost_equal(computed, actual) # we can verify all gradients by checking against finite-difference prob.check_partials(compact_print=True) @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") def test_vectorized(self): import numpy as np import openmdao.api as om # create input param training data, of sizes 25, 5, and 10 points resp. p1 = np.linspace(0, 100, 25) p2 = np.linspace(-10, 10, 5) p3 = np.linspace(0, 1, 10) # can use meshgrid to create a 3D array of test data P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij') f = np.sqrt(P1) + P2 * P3 # Create regular grid interpolator instance interp = om.MetaModelStructuredComp(method='scipy_cubic', vec_size=2) interp.add_input('p1', 0.5, training_data=p1) interp.add_input('p2', 0.0, training_data=p2) interp.add_input('p3', 3.14, training_data=p3) interp.add_output('f', 0.0, training_data=f) # Set up the OpenMDAO model model = om.Group() model.add_subsystem('comp', interp, promotes=["*"]) prob = om.Problem(model) prob.setup() # set inputs prob.set_val('p1', np.array([55.12, 12.0])) prob.set_val('p2', np.array([-2.14, 3.5])) prob.set_val('p3', np.array([0.323, 0.5])) prob.run_model() computed = prob['f'] actual = np.array([6.73306472, 5.2118645]) assert_almost_equal(computed, actual) @unittest.skipIf(not scipy_gte_019, "only run if scipy>=0.19.") def test_training_derivatives(self): import numpy as np import openmdao.api as om # create input param training data, of sizes 25, 5, and 10 points resp. p1 = np.linspace(0, 100, 25) p2 = np.linspace(-10, 10, 5) p3 = np.linspace(0, 1, 10) # can use meshgrid to create a 3D array of test data P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij') f = np.sqrt(P1) + P2 * P3 # verify the shape matches the order and size of the input params print(f.shape) # Create regular grid interpolator instance interp = om.MetaModelStructuredComp(method='scipy_cubic', training_data_gradients=True) interp.add_input('p1', 0.5, p1) interp.add_input('p2', 0.0, p2) interp.add_input('p3', 3.14, p3) interp.add_output('f', 0.0, f) # Set up the OpenMDAO model model = om.Group() model.add_subsystem('comp', interp, promotes=["*"]) prob = om.Problem(model) prob.setup() # set inputs prob.set_val('p1', 55.12) prob.set_val('p2', -2.14) prob.set_val('p3', 0.323) prob.run_model() computed = prob.get_val('f') actual = 6.73306472 assert_almost_equal(computed, actual) # we can verify all gradients by checking against finite-difference prob.check_partials(compact_print=True) def test_error_messages_scalar_only(self): prob = om.Problem() model = prob.model comp = om.MetaModelStructuredComp(training_data_gradients=True, method='slinear', vec_size=3) with self.assertRaises(ValueError) as cm: comp.add_input('x1', np.array([1.0, 2.0])) msg = "MetaModelStructuredComp: Input x1 must either be scalar, or of length equal to vec_size." self.assertEqual(str(cm.exception), msg) with self.assertRaises(ValueError) as cm: comp.add_input('x1', np.zeros((3, 3))) self.assertEqual(str(cm.exception), msg) with self.assertRaises(ValueError) as cm: comp.add_output('x1', np.array([1.0, 2.0])) msg = "MetaModelStructuredComp: Output x1 must either be scalar, or of length equal to vec_size." self.assertEqual(str(cm.exception), msg) with self.assertRaises(ValueError) as cm: comp.add_output('x1', np.zeros((3, 3))) self.assertEqual(str(cm.exception), msg) if __name__ == "__main__": unittest.main()
40.718373
140
0.583719
[ "Apache-2.0" ]
JustinSGray/OpenMDAO
openmdao/components/tests/test_meta_model_structured_comp.py
54,074
Python
"""Collection of Jax network layers, wrapped to fit Ivy syntax and signature. """ # global import jax.numpy as jnp import jax.lax as jlax # local from ivy.functional.backends.jax import JaxArray def conv1d( x: JaxArray, filters: JaxArray, strides: int, padding: str, data_format: str = "NWC", dilations: int = 1, ) -> JaxArray: strides = (strides,) if isinstance(strides, int) else strides dilations = (dilations,) if isinstance(dilations, int) else dilations return jlax.conv_general_dilated( x, filters, strides, padding, None, dilations, (data_format, "WIO", data_format) ) def conv1d_transpose(*_): raise Exception("Convolutions not yet implemented for jax library") def conv2d(x, filters, strides, padding, data_format="NHWC", dilations=1): strides = [strides] * 2 if isinstance(strides, int) else strides dilations = [dilations] * 2 if isinstance(dilations, int) else dilations return jlax.conv_general_dilated( x, filters, strides, padding, None, dilations, (data_format, "HWIO", data_format), ) def depthwise_conv2d(*_): raise Exception("Convolutions not yet implemented for jax library") def conv2d_transpose(*_): raise Exception("Convolutions not yet implemented for jax library") def conv3d(*_): raise Exception("Convolutions not yet implemented for jax library") def conv3d_transpose(*_): raise Exception("Convolutions not yet implemented for jax library")
25.416667
88
0.691803
[ "Apache-2.0" ]
thatguuyG/ivy
ivy/functional/backends/jax/layers.py
1,525
Python
import numpy import torch import pytorch_pfn_extras as ppe from torch.utils.data import Dataset class TabularDataset(Dataset): """An abstract class that represents tabular dataset. This class represents a tabular dataset. In a tabular dataset, all examples have the same number of elements. For example, all examples of the dataset below have three elements (:obj:`a[i]`, :obj:`b[i]`, and :obj:`c[i]`). .. csv-table:: :header: , a, b, c 0, :obj:`a[0]`, :obj:`b[0]`, :obj:`c[0]` 1, :obj:`a[1]`, :obj:`b[1]`, :obj:`c[1]` 2, :obj:`a[2]`, :obj:`b[2]`, :obj:`c[2]` 3, :obj:`a[3]`, :obj:`b[3]`, :obj:`c[3]` Since an example can be represented by both tuple and dict ( :obj:`(a[i], b[i], c[i])` and :obj:`{'a': a[i], 'b': b[i], 'c': c[i]}`), this class uses :attr:`mode` to indicate which representation will be used. If there is only one column, an example also can be represented by a value (:obj:`a[i]`). In this case, :attr:`mode` is :obj:`None`. An inheritance should implement :meth:`__len__`, :attr:`keys`, :attr:`mode` and :meth:`get_examples`. >>> import numpy as np >>> >>> from pytorch_pfn_extras import dataset >>> >>> class MyDataset(dataset.TabularDataset): ... ... def __len__(self): ... return 4 ... ... @property ... def keys(self): ... return ('a', 'b', 'c') ... ... @property ... def mode(self): ... return tuple ... ... def get_examples(self, indices, key_indices): ... data = np.arange(12).reshape((4, 3)) ... if indices is not None: ... data = data[indices] ... if key_indices is not None: ... data = data[:, list(key_indices)] ... return tuple(data.transpose()) ... >>> dataset = MyDataset() >>> len(dataset) 4 >>> dataset.keys ('a', 'b', 'c') >>> dataset.astuple()[0] (0, 1, 2) >>> sorted(dataset.asdict()[0].items()) [('a', 0), ('b', 1), ('c', 2)] >>> >>> view = dataset.slice[[3, 2], ('c', 0)] >>> len(view) 2 >>> view.keys ('c', 'a') >>> view.astuple()[1] (8, 6) >>> sorted(view.asdict()[1].items()) [('a', 6), ('c', 8)] """ def __len__(self): raise NotImplementedError @property def keys(self): """Names of columns. A tuple of strings that indicate the names of columns. """ raise NotImplementedError @property def mode(self): """Mode of representation. This indicates the type of value returned by :meth:`fetch` and :meth:`__getitem__`. :class:`tuple`, :class:`dict`, and :obj:`None` are supported. """ raise NotImplementedError def get_examples(self, indices, key_indices): """Return a part of data. Args: indices (list of ints or slice): Indices of requested rows. If this argument is :obj:`None`, it indicates all rows. key_indices (tuple of ints): Indices of requested columns. If this argument is :obj:`None`, it indicates all columns. Returns: tuple of lists/arrays """ raise NotImplementedError @property def slice(self): """Get a slice of dataset. Args: indices (list/array of ints/bools or slice): Requested rows. keys (tuple of ints/strs or int or str): Requested columns. Returns: A view of specified range. """ return ppe.dataset.tabular._slice._SliceHelper(self) def fetch(self): """Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays. """ examples = self.get_examples(None, None) if self.mode is tuple: return examples elif self.mode is dict: return dict(zip(self.keys, examples)) elif self.mode is None: return examples[0] def convert(self, data): """Convert fetched data. This method takes data fetched by :meth:`fetch` and pre-process them before passing them to models. The default behaviour is converting each column into an ndarray. This behaviour can be overridden by :meth:`with_converter`. If the dataset is constructed by :meth:`concat` or :meth:`join`, the converter of the first dataset is used. Args: data (tuple or dict): Data from :meth:`fetch`. Returns: A tuple or dict. Each value is an ndarray. """ if isinstance(data, tuple): return tuple(_as_array(d) for d in data) elif isinstance(data, dict): return {k: _as_array(v) for k, v in data.items()} else: return _as_array(data) def astuple(self): """Return a view with tuple mode. Returns: A view whose :attr:`mode` is :class:`tuple`. """ return ppe.dataset.tabular._asmode._Astuple(self) def asdict(self): """Return a view with dict mode. Returns: A view whose :attr:`mode` is :class:`dict`. """ return ppe.dataset.tabular._asmode._Asdict(self) def concat(self, *datasets): """Stack datasets along rows. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same :attr:`keys`. Returns: A concatenated dataset. """ return ppe.dataset.tabular._concat._Concat( self, *datasets) def join(self, *datasets): """Stack datasets along columns. Args: datasets (iterable of :class:`TabularDataset`): Datasets to be concatenated. All datasets must have the same length Returns: A joined dataset. """ return ppe.dataset.tabular._join._Join(self, *datasets) def transform(self, keys, transform): """Apply a transform to each example. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes an example and returns transformed example. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._Transform( self, keys, transform) def transform_batch(self, keys, transform_batch): """Apply a transform to examples. The transformations are a list where each element is a tuple that holds the transformation signature and a callable that is the transformation itself. The transformation signature is a tuple of 2 elements with the first one being the keys of the dataset that are taken as inputs. And the last one the outputs it produces for the transformation `keys` argument. When multiple transformations are specified, the outputs must be disjoint or `ValueError` will be risen. Args: keys (tuple of strs): The keys of transformed examples. transform_batch (list of tuples): A list where each element specifies a transformation with a tuple with the transformation signature and a callable that takes a batch of examples and returns a batch of transformed examples. :attr:`mode` of transformed dataset is determined by the transformed examples. Returns: A transfromed dataset. """ return ppe.dataset.tabular._transform._TransformBatch( self, keys, transform_batch) def with_converter(self, converter): """Override the behaviour of :meth:`convert`. This method overrides :meth:`convert`. Args: converter (callable): A new converter. Returns: A dataset with the new converter. """ return ppe.dataset.tabular._with_converter._WithConverter( self, converter) def get_example(self, i): example = self.get_examples([i], None) example = tuple(col[0] for col in example) if self.mode is tuple: return example elif self.mode is dict: return dict(zip(self.keys, example)) elif self.mode is None: return example[0] def __iter__(self): return (self.get_example(i) for i in range(len(self))) def __getitem__(self, index): """Returns an example or a sequence of examples. It implements the standard Python indexing and one-dimensional integer array indexing. It uses the :meth:`get_example` method by default, but it may be overridden by the implementation to, for example, improve the slicing performance. Args: index (int, slice, list or numpy.ndarray): An index of an example or indexes of examples. Returns: If index is int, returns an example created by `get_example`. If index is either slice or one-dimensional list or numpy.ndarray, returns a list of examples created by `get_example`. """ if isinstance(index, slice): current, stop, step = index.indices(len(self)) return [self.get_example(i) for i in range(current, stop, step)] elif isinstance(index, list) or isinstance(index, numpy.ndarray): return [self.get_example(i) for i in index] else: return self.get_example(index) def _as_array(data): if isinstance(data, (numpy.ndarray, torch.Tensor)): return data else: return numpy.array(data)
33.38024
79
0.577092
[ "MIT" ]
HiroakiMikami/pytorch-pfn-extras
pytorch_pfn_extras/dataset/tabular/tabular_dataset.py
11,149
Python
from collections import defaultdict from operator import attrgetter from typing import Union from hypergraph.network import HyperGraph, StateNode, Node, BipartiteNetwork, BipartiteStateNetwork from hypergraph.transition import gamma, d, pi def create_network(hypergraph: HyperGraph, non_backtracking: bool) -> Union[BipartiteNetwork, BipartiteStateNetwork]: nodes, edges, weights = hypergraph print("[bipartite] creating bipartite...") gamma_ = gamma(weights) d_ = d(edges) pi_ = pi(edges, weights) bipartite_start_id = max(map(attrgetter("id"), nodes)) + 1 features = [Node(bipartite_start_id + i, f"Hyperedge {edge.id}") for i, edge in enumerate(edges)] edge_to_feature_id = {edge.id: bipartite_start_id + i for i, edge in enumerate(edges)} links = defaultdict(float) if non_backtracking: get_state_id = defaultdict(lambda: len(get_state_id) + 1) states = [StateNode(get_state_id[node.id], node.id) for node in sorted(nodes)] for edge in edges: feature_id = edge_to_feature_id[edge.id] state_ids = (get_state_id[node.id] for node in edge.nodes) feature_states = [StateNode(get_state_id[feature_id, state_id], feature_id) for state_id in state_ids] states.extend(feature_states) for node in edge.nodes: P_ue = edge.omega / d_(node) P_ev = gamma_(edge, node) if P_ue * P_ev < 1e-10: continue state_id = get_state_id[node.id] target_feature_state_id = get_state_id[feature_id, state_id] links[state_id, target_feature_state_id] = pi_(node) * P_ue for source_feature_state_id, node_id in feature_states: if source_feature_state_id != target_feature_state_id: links[source_feature_state_id, state_id] = P_ev links = [(source, target, weight) for (source, target), weight in sorted(links.items())] return BipartiteStateNetwork(nodes, links, states, features) else: for edge in edges: for node in edge.nodes: P_ue = edge.omega / d_(node) P_ev = gamma_(edge, node) if P_ue * P_ev < 1e-10: continue feature_id = edge_to_feature_id[edge.id] links[node.id, feature_id] = pi_(node) * P_ue links[feature_id, node.id] = P_ev links = [(source, target, weight) for (source, target), weight in sorted(links.items())] return BipartiteNetwork(nodes, links, features)
33.609756
117
0.607402
[ "MIT" ]
antoneri/mapping-hypergraphs
hypergraph/representation/bipartite.py
2,756
Python
def invert_binary_tree(node): if node: node.left, node.right = invert_binary_tree(node.right), invert_binary_tree(node.left) return node class BinaryTreeNode(object): def __init__(self, value, left=None, right=None): self.value = value self.left = None self.right = None btn_root = BinaryTreeNode(10) btn_1 = BinaryTreeNode(8) btn_2 = BinaryTreeNode(9) btn_root.left = btn_1 btn_root.right = btn_2 print(btn_root.left.value) print(btn_root.right.value) btn_root = invert_binary_tree(btn_root) print(btn_root.left.value) print(btn_root.right.value)
26.043478
93
0.72621
[ "MIT" ]
YazzyYaz/codinginterviews
practice_problems/trees_graphs/invert_binary_tree.py
599
Python
import network import torch if __name__ == '__main__': net = network.modeling.__dict__['deeplabv3plus_resnet50']() print(net) input=torch.FloatTensor(2,3,512,512) output=net(input) print(output.shape)
22.6
63
0.69469
[ "MIT" ]
WuShaogui/DeepLabV3Plus-Pytorch
test.py
226
Python
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Serial Console.""" import socket import mock import six.moves from nova.console import serial from nova import exception from nova import test class SerialTestCase(test.NoDBTestCase): def setUp(self): super(SerialTestCase, self).setUp() serial.ALLOCATED_PORTS = set() def test_get_port_range(self): start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_customized(self): self.flags(port_range='30000:40000', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(30000, start) self.assertEqual(40000, stop) def test_get_port_range_bad_range(self): self.flags(port_range='40000:30000', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_not_numeric(self): self.flags(port_range='xxx:yyy', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) def test_get_port_range_invalid_syntax(self): self.flags(port_range='10:20:30', group='serial_console') start, stop = serial._get_port_range() self.assertEqual(10000, start) self.assertEqual(20000, stop) @mock.patch('socket.socket') def test_verify_port(self, fake_socket): s = mock.MagicMock() fake_socket.return_value = s serial._verify_port('127.0.0.1', 10) s.bind.assert_called_once_with(('127.0.0.1', 10)) @mock.patch('socket.socket') def test_verify_port_in_use(self, fake_socket): s = mock.MagicMock() s.bind.side_effect = socket.error() fake_socket.return_value = s self.assertRaises( exception.SocketPortInUseException, serial._verify_port, '127.0.0.1', 10) s.bind.assert_called_once_with(('127.0.0.1', 10)) @mock.patch('nova.console.serial._verify_port', lambda x, y: None) def test_acquire_port(self): start, stop = 15, 20 self.flags( port_range='%d:%d' % (start, stop), group='serial_console') for port in six.moves.range(start, stop): self.assertEqual(port, serial.acquire_port('127.0.0.1')) for port in six.moves.range(start, stop): self.assertEqual(port, serial.acquire_port('127.0.0.2')) self.assertTrue(10, len(serial.ALLOCATED_PORTS)) @mock.patch('nova.console.serial._verify_port') def test_acquire_port_in_use(self, fake_verify_port): def port_10000_already_used(host, port): if port == 10000 and host == '127.0.0.1': raise exception.SocketPortInUseException( port=port, host=host, error="already in use") fake_verify_port.side_effect = port_10000_already_used self.assertEqual(10001, serial.acquire_port('127.0.0.1')) self.assertEqual(10000, serial.acquire_port('127.0.0.2')) self.assertNotIn(('127.0.0.1', 10000), serial.ALLOCATED_PORTS) self.assertIn(('127.0.0.1', 10001), serial.ALLOCATED_PORTS) self.assertIn(('127.0.0.2', 10000), serial.ALLOCATED_PORTS) @mock.patch('nova.console.serial._verify_port') def test_acquire_port_not_ble_to_bind_at_any_port(self, fake_verify_port): start, stop = 15, 20 self.flags( port_range='%d:%d' % (start, stop), group='serial_console') fake_verify_port.side_effect = ( exception.SocketPortRangeExhaustedException(host='127.0.0.1')) self.assertRaises( exception.SocketPortRangeExhaustedException, serial.acquire_port, '127.0.0.1') def test_release_port(self): serial.ALLOCATED_PORTS.add(('127.0.0.1', 100)) serial.ALLOCATED_PORTS.add(('127.0.0.2', 100)) self.assertEqual(2, len(serial.ALLOCATED_PORTS)) serial.release_port('127.0.0.1', 100) self.assertEqual(1, len(serial.ALLOCATED_PORTS)) serial.release_port('127.0.0.2', 100) self.assertEqual(0, len(serial.ALLOCATED_PORTS))
35.449275
78
0.656378
[ "Apache-2.0" ]
ChinaMassClouds/nova
nova/tests/unit/console/test_serial.py
4,892
Python
# Copyright 2018-2021 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import signal import sys from typing import Any, Dict import click import tornado.ioloop from streamlit.git_util import GitRepo, MIN_GIT_VERSION from streamlit import version from streamlit import config from streamlit import net_util from streamlit import url_util from streamlit import env_util from streamlit import secrets from streamlit import util from streamlit.config import CONFIG_FILENAMES from streamlit.logger import get_logger from streamlit.report import Report from streamlit.secrets import SECRETS_FILE_LOC from streamlit.server.server import Server, server_address_is_unix_socket from streamlit.watcher.file_watcher import watch_file from streamlit.watcher.file_watcher import report_watchdog_availability LOGGER = get_logger(__name__) # Wait for 1 second before opening a browser. This gives old tabs a chance to # reconnect. # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS. BROWSER_WAIT_TIMEOUT_SEC = 1 NEW_VERSION_TEXT = """ %(new_version)s See what's new at https://discuss.streamlit.io/c/announcements Enter the following command to upgrade: %(prompt)s %(command)s """ % { "new_version": click.style( "A new version of Streamlit is available.", fg="blue", bold=True ), "prompt": click.style("$", fg="blue"), "command": click.style("pip install streamlit --upgrade", bold=True), } def _set_up_signal_handler(): LOGGER.debug("Setting up signal handler") def signal_handler(signal_number, stack_frame): # The server will shut down its threads and stop the ioloop Server.get_current().stop() signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if sys.platform == "win32": signal.signal(signal.SIGBREAK, signal_handler) else: signal.signal(signal.SIGQUIT, signal_handler) def _fix_sys_path(script_path): """Add the script's folder to the sys path. Python normally does this automatically, but since we exec the script ourselves we need to do it instead. """ sys.path.insert(0, os.path.dirname(script_path)) def _fix_matplotlib_crash(): """Set Matplotlib backend to avoid a crash. The default Matplotlib backend crashes Python on OSX when run on a thread that's not the main thread, so here we set a safer backend as a fix. Users can always disable this behavior by setting the config runner.fixMatplotlib = false. This fix is OS-independent. We didn't see a good reason to make this Mac-only. Consistency within Streamlit seemed more important. """ if config.get_option("runner.fixMatplotlib"): try: # TODO: a better option may be to set # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards # the top of __init__.py, before importing anything that imports # pandas (which imports matplotlib). Alternately, we could set # this environment variable in a new entrypoint defined in # setup.py. Both of these introduce additional trickiness: they # need to run without consulting streamlit.config.get_option, # because this would import streamlit, and therefore matplotlib. import matplotlib matplotlib.use("Agg") except ImportError: pass def _fix_tornado_crash(): """Set default asyncio policy to be compatible with Tornado 6. Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows. So here we pick the older SelectorEventLoopPolicy when the OS is Windows if the known-incompatible default policy is in use. This has to happen as early as possible to make it a low priority and overrideable See: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 """ if env_util.IS_WINDOWS and sys.version_info >= (3, 8): import asyncio try: from asyncio import ( # type: ignore[attr-defined] WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy, ) except ImportError: pass # Not affected else: if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: # WindowsProactorEventLoopPolicy is not compatible with # Tornado 6 fallback to the pre-3.8 default of Selector asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) def _fix_sys_argv(script_path, args): """sys.argv needs to exclude streamlit arguments and parameters and be set to what a user's script may expect. """ import sys sys.argv = [script_path] + list(args) def _on_server_start(server): _maybe_print_old_git_warning(server.script_path) _print_url(server.is_running_hello) report_watchdog_availability() _print_new_version_message() # Load secrets.toml if it exists. If the file doesn't exist, this # function will return without raising an exception. We catch any parse # errors and display them here. try: secrets.load_if_toml_exists() except BaseException as e: LOGGER.error(f"Failed to load {SECRETS_FILE_LOC}", exc_info=e) def maybe_open_browser(): if config.get_option("server.headless"): # Don't open browser when in headless mode. return if server.browser_is_connected: # Don't auto-open browser if there's already a browser connected. # This can happen if there's an old tab repeatedly trying to # connect, and it happens to success before we launch the browser. return if config.is_manually_set("browser.serverAddress"): addr = config.get_option("browser.serverAddress") elif config.is_manually_set("server.address"): if server_address_is_unix_socket(): # Don't open browser when server address is an unix socket return addr = config.get_option("server.address") else: addr = "localhost" util.open_browser(Report.get_url(addr)) # Schedule the browser to open using the IO Loop on the main thread, but # only if no other browser connects within 1s. ioloop = tornado.ioloop.IOLoop.current() ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser) def _fix_pydeck_mapbox_api_warning(): """Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception""" os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token") def _print_new_version_message(): if version.should_show_new_version_notice(): click.secho(NEW_VERSION_TEXT) def _print_url(is_running_hello): if is_running_hello: title_message = "Welcome to Streamlit. Check out our demo in your browser." else: title_message = "You can now view your Streamlit app in your browser." named_urls = [] if config.is_manually_set("browser.serverAddress"): named_urls = [ ("URL", Report.get_url(config.get_option("browser.serverAddress"))) ] elif ( config.is_manually_set("server.address") and not server_address_is_unix_socket() ): named_urls = [ ("URL", Report.get_url(config.get_option("server.address"))), ] elif config.get_option("server.headless"): internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) external_ip = net_util.get_external_ip() if external_ip: named_urls.append(("External URL", Report.get_url(external_ip))) else: named_urls = [ ("Local URL", Report.get_url("localhost")), ] internal_ip = net_util.get_internal_ip() if internal_ip: named_urls.append(("Network URL", Report.get_url(internal_ip))) click.secho("") click.secho(" %s" % title_message, fg="blue", bold=True) click.secho("") for url_name, url in named_urls: url_util.print_url(url_name, url) click.secho("") if is_running_hello: click.secho(" Ready to create your own Python apps super quickly?") click.secho(" Head over to ", nl=False) click.secho("https://docs.streamlit.io", bold=True) click.secho("") click.secho(" May you create awesome apps!") click.secho("") click.secho("") def _maybe_print_old_git_warning(script_path: str) -> None: """If our script is running in a Git repo, and we're running a very old Git version, print a warning that Git integration will be unavailable. """ repo = GitRepo(script_path) if ( not repo.is_valid() and repo.git_version is not None and repo.git_version < MIN_GIT_VERSION ): git_version_string = ".".join(str(val) for val in repo.git_version) min_version_string = ".".join(str(val) for val in MIN_GIT_VERSION) click.secho("") click.secho(" Git integration is disabled.", fg="yellow", bold=True) click.secho("") click.secho( f" Streamlit requires Git {min_version_string} or later, " f"but you have {git_version_string}.", fg="yellow", ) click.secho( " Git is used by Streamlit Sharing (https://streamlit.io/sharing).", fg="yellow", ) click.secho(" To enable this feature, please update Git.", fg="yellow") def load_config_options(flag_options: Dict[str, Any]): """Load config options from config.toml files, then overlay the ones set by flag_options. The "streamlit run" command supports passing Streamlit's config options as flags. This function reads through the config options set via flag, massages them, and passes them to get_config_options() so that they overwrite config option defaults and those loaded from config.toml files. Parameters ---------- flag_options : Dict[str, Any] A dict of config options where the keys are the CLI flag version of the config option names. """ options_from_flags = { name.replace("_", "."): val for name, val in flag_options.items() if val is not None } # Force a reparse of config files (if they exist). The result is cached # for future calls. config.get_config_options(force_reparse=True, options_from_flags=options_from_flags) def _install_config_watchers(flag_options: Dict[str, Any]): def on_config_changed(_path): load_config_options(flag_options) for filename in CONFIG_FILENAMES: if os.path.exists(filename): watch_file(filename, on_config_changed) def run(script_path, command_line, args, flag_options): """Run a script in a separate thread and start a server for the app. This starts a blocking ioloop. Parameters ---------- script_path : str command_line : str args : [str] flag_options : Dict[str, Any] """ _fix_sys_path(script_path) _fix_matplotlib_crash() _fix_tornado_crash() _fix_sys_argv(script_path, args) _fix_pydeck_mapbox_api_warning() _install_config_watchers(flag_options) # Install a signal handler that will shut down the ioloop # and close all our threads _set_up_signal_handler() ioloop = tornado.ioloop.IOLoop.current() # Create and start the server. server = Server(ioloop, script_path, command_line) server.start(_on_server_start) # (Must come after start(), because this starts a new thread and start() # may call sys.exit() which doesn't kill other threads. server.add_preheated_report_session() # Start the ioloop. This function will not return until the # server is shut down. ioloop.start()
34.469945
105
0.684052
[ "MIT" ]
Deepanjalkumar/Attacksurfacemanagement
notebook/lib/python3.9/site-packages/streamlit/bootstrap.py
12,616
Python
{ 'repo_type' : 'archive', 'download_locations' : [ #UPDATECHECKS: http://fftw.org/download.html #{ "url" : "http://fftw.org/fftw-3.3.9.tar.gz", "hashes" : [ { "type" : "sha256", "sum" : "bf2c7ce40b04ae811af714deb512510cc2c17b9ab9d6ddcf49fe4487eea7af3d" }, ], }, #{ "url" : "https://fossies.org/linux/misc/fftw-3.3.9.tar.gz", "hashes" : [ { "type" : "sha256", "sum" : "bf2c7ce40b04ae811af714deb512510cc2c17b9ab9d6ddcf49fe4487eea7af3d" }, ], }, { "url" : "http://fftw.org/fftw-3.3.10.tar.gz", "hashes" : [ { "type" : "sha256", "sum" : "56c932549852cddcfafdab3820b0200c7742675be92179e59e6215b340e26467" }, ], }, { "url" : "https://fossies.org/linux/misc/fftw-3.3.10.tar.gz", "hashes" : [ { "type" : "sha256", "sum" : "56c932549852cddcfafdab3820b0200c7742675be92179e59e6215b340e26467" }, ], }, ], 'rename_folder' : 'fftw3_dll_double', 'configure_options': '--host={target_host} --prefix={output_prefix}/fftw3_dll --exec-prefix={output_prefix}/fftw3_dll ' '--enable-shared --disable-static ' '--disable-silent-rules --disable-doc ' '--disable-alloca --with-our-malloc --with-windows-f77-mangling ' '--enable-threads --with-combined-threads ' '--disable-float --disable-long-double -disable-quad-precision ' # 2019.12.13 the default is "DOUBLE" '--enable-sse2 --enable-avx --enable-avx2 --disable-altivec --disable-vsx --disable-neon ' # 2019.12.13 removed --enable-sse as SSE only builds with "FLOAT/SINGLE" , 'regex_replace': { 'post_patch': [ { 0: r'fftw\${{PREC_SUFFIX}}\.pc', 1: r'fftw3${{PREC_SUFFIX}}.pc', 'in_file': './CMakeLists.txt' }, ], }, 'run_post_install' : ( 'ls -alR {output_prefix}/fftw3_dll/bin', ), 'update_check' : { 'url' : 'ftp://ftp.fftw.org/pub/fftw/', 'type' : 'ftpindex', 'regex' : r'fftw-(?P<version_num>[\d.]+)\.tar\.gz' }, '_info' : { 'version' : '3.3.10', 'fancy_name' : 'fftw3_dll_double' }, }
61.272727
188
0.605836
[ "MPL-2.0", "MPL-2.0-no-copyleft-exception" ]
hydra3333/h3333_python_cross_compile_script_v100
packages/dependencies/fftw3_dll_double.py
2,022
Python
from django.shortcuts import render, get_object_or_404 from .models import PostReview from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger def single_review(request, year, month, day, review): review = get_object_or_404(PostReview, slug=review, status='published', publish__year=year, publish__month=month, publish__day=day) return render(request, 'reviews/review/single.html', {'review': review}) def review_list(request): list_object = PostReview.published.all() paginator = Paginator(list_object, 1) page = request.GET.get('page') try: reviews = paginator.page(page) except PageNotAnInteger: reviews = paginator.page(1) except EmptyPage: reviews = paginator.page(paginator.num_pages) return render(request, 'reviews/review/list.html', {'page': page, 'reviews': reviews})
35.758621
76
0.608486
[ "MIT" ]
Karol-Zielke/book_post-review
main/reviews/views.py
1,037
Python
import json import time import datetime import requests import json playbackStampRecords = [{"b500c6b0-633b-11ec-85c5-cba80427674d2021-10-10"}] def getConfigurations(): url = "https://data.mongodb-api.com/app/data-mtybs/endpoint/data/beta/action/find" payload = json.dumps({ "collection": "configurations", "database": "themorningprojectdb", "dataSource": "Cluster0" }) headers = { 'Content-Type': 'application/json', 'Access-Control-Request-Headers': '*', 'api-key': 'kvErM5pzFQaISsF733UpenYeDTT7bWrJ85mAxhz956wb91U5igFxsJoDEDpyW6NJ' } response = requests.request("POST", url, headers=headers, data=payload) jsonResponse = response.json() return jsonResponse['documents'] def isNotificationScheduled(): configList = getConfigurations() currentDate = datetime.datetime.now().date() currentTime = datetime.datetime.now().strftime("%H:%M") for config in configList: if config["id"]+str(currentDate) not in playbackStampRecords and currentTime >= config['settings'][0]['starttime'] and currentTime <= config['settings'][0]['endtime']: return True def getUserConfiguration(): configList = getConfigurations() currentDate = datetime.datetime.now().date() currentTime = datetime.datetime.now().strftime("%H:%M") for config in configList: if config["id"]+str(currentDate) not in playbackStampRecords and currentTime >= config['settings'][0]['starttime'] and currentTime <= config['settings'][0]['endtime']: email = config['settings'][0]['amemail'] password = config['settings'][0]['ampassword'] name = config['settings'][0]['name'] playbackInformation = config['settings'][0]['info'] if currentTime >= '00:00'and currentTime < '12:00': dayTimeDescriptor = 'morning' elif currentTime >= '12:00' and currentTime <= '16:00': dayTimeDescriptor = 'afternoon' else: dayTimeDescriptor = 'night' playbackStampRecords.append(config['id']+str(currentDate)) return [email, password, dayTimeDescriptor, name, playbackInformation]
39.678571
175
0.651215
[ "MIT" ]
briankinsella26/thegoodmorningproject
pi_scripts/user_details.py
2,222
Python
from django.apps import AppConfig class VideoBackConfig(AppConfig): name = 'video_background' verbose_name = "Video Backgrounds"
19.857143
38
0.76259
[ "BSD-3-Clause" ]
LegionMarket/django-cms-base
video_background/apps.py
139
Python
from django.shortcuts import render # Create your views here. from django.http import HttpResponse def index(request): category_list = Category.objects.order_by('-name')[:5] #category_list = Category.objects().order_by('-name') context = {'categories': category_list} # Aquí van la las variables para la plantilla return render(request,'index.html', context)
29.307692
91
0.732283
[ "Apache-2.0" ]
pmmre/SSBW2
rango/views.py
382
Python
import numpy as np from LoopStructural.utils import getLogger logger = getLogger(__name__) def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1, epsscale=0.5): """ Calculate the partial derivatives of a function at a set of values. The derivatives are calculated using the central difference, using an iterative method to check that the values converge as step size decreases. Parameters ---------- vals: array_like A set of values, that are passed to a function, at which to calculate the gradient of that function func: A function that takes in an array of values. releps: float, array_like, 1e-3 The initial relative step size for calculating the derivative. abseps: float, array_like, None The initial absolute step size for calculating the derivative. This overrides `releps` if set. `releps` is set then that is used. mineps: float, 1e-9 The minimum relative step size at which to stop iterations if no convergence is achieved. epsscale: float, 0.5 The factor by which releps if scaled in each iteration. Returns ------- grads: array_like An array of gradients for each non-fixed value. """ grads = np.zeros(len(vals)) # maximum number of times the gradient can change sign flipflopmax = 10. # set steps if abseps is None: if isinstance(releps, float): eps = np.abs(vals) * releps eps[eps == 0.] = releps # if any values are zero set eps to releps teps = releps * np.ones(len(vals)) elif isinstance(releps, (list, np.ndarray)): if len(releps) != len(vals): raise ValueError("Problem with input relative step sizes") eps = np.multiply(np.abs(vals), releps) eps[eps == 0.] = np.array(releps)[eps == 0.] teps = releps else: raise RuntimeError("Relative step sizes are not a recognised type!") else: if isinstance(abseps, float): eps = abseps * np.ones(len(vals)) elif isinstance(abseps, (list, np.ndarray)): if len(abseps) != len(vals): raise ValueError("Problem with input absolute step sizes") eps = np.array(abseps) else: raise RuntimeError("Absolute step sizes are not a recognised type!") teps = eps # for each value in vals calculate the gradient count = 0 for i in range(len(vals)): # initial parameter diffs leps = eps[i] cureps = teps[i] flipflop = 0 # get central finite difference fvals = np.copy(vals) bvals = np.copy(vals) # central difference fvals[i] += 0.5 * leps # change forwards distance to half eps bvals[i] -= 0.5 * leps # change backwards distance to half eps cdiff = (func(fvals) - func(bvals)) / leps while 1: fvals[i] -= 0.5 * leps # remove old step bvals[i] += 0.5 * leps # change the difference by a factor of two cureps *= epsscale if cureps < mineps or flipflop > flipflopmax: # if no convergence set flat derivative (TODO: check if there is a better thing to do instead) logger.warn("Derivative calculation did not converge: setting flat derivative.") grads[count] = 0. break leps *= epsscale # central difference fvals[i] += 0.5 * leps # change forwards distance to half eps bvals[i] -= 0.5 * leps # change backwards distance to half eps cdiffnew = (func(fvals) - func(bvals)) / leps if cdiffnew == cdiff: grads[count] = cdiff break # check whether previous diff and current diff are the same within reltol rat = (cdiff / cdiffnew) if np.isfinite(rat) and rat > 0.: # gradient has not changed sign if np.abs(1. - rat) < reltol: grads[count] = cdiffnew break else: cdiff = cdiffnew continue else: cdiff = cdiffnew flipflop += 1 continue count += 1 return grads
35.456
110
0.569946
[ "MIT" ]
Loop3D/LoopStructural
LoopStructural/probability/_gradient_calculator.py
4,432
Python
''' @author: xiayuanhuang ''' import csv import matchECtoDemog import decisionTreeV7 import family_treeV4 import inference import combine_new_ped def combine(addressFile, nameFile, demoFile, accountFile, outputFile, patientFile, ecFile, familyTreeOutput): reader_add = csv.reader(open(addressFile, 'r'), delimiter = ',') h_add = next(reader_add) exp_header_add = ['study_id', 'street_1', 'street_2', 'city', 'state', 'zip', 'from_year', 'thru_year'] if not h_add == exp_header_add: raise Exception("Address file (%s) doesn't have the header expected: %s" % (addressFile, exp_header_add)) reader_name = csv.reader(open(nameFile, 'r'), delimiter = ',') h_name = next(reader_name) exp_header_name = ['study_id', 'last_name_id', 'first_name_id', 'middle_name_id', 'from_year', 'thru_year'] if not h_name == exp_header_name: raise Exception("Name file (%s) doesn't have the header expected: %s" % (nameFile, exp_header_name)) reader_demo = csv.reader(open(demoFile, 'r'), delimiter = ',') h_demo = next(reader_demo) exp_header_demo = ['study_id', 'GENDER_CODE', 'birth_year', 'deceased_year', 'PHONE_NUM_id', 'from_year', 'thru_year'] if not h_demo == exp_header_demo: raise Exception("Demographic data file (%s) doesn't have the header expected: %s" % (demoFile, exp_header_demo)) reader_acc = csv.reader(open(accountFile, 'r'), delimiter = ',') h_acc = next(reader_acc) exp_header_acc = ['study_id', 'ACCT_NUM_id', 'from_year', 'thru_year'] if not h_acc == exp_header_acc: raise Exception("Account file (%s) doesn't have the header expected: %s" % (accountFile, exp_header_acc)) reader_p = csv.reader(open(patientFile, 'r'), delimiter = ',') h_p = next(reader_p) exp_header_p = ['PatientID', 'FirstName', 'LastName', 'Sex', 'PhoneNumber', 'Zipcode', 'birth_year', 'deceased_year'] if not h_p == exp_header_p: raise Exception("Patient data file (%s) doesn't have the header expected: %s" % (patientFile, exp_header_p)) reader_ec = csv.reader(open(ecFile, 'r'), delimiter = ',') h_ec = next(reader_ec) exp_header_ec = ['PatientID', 'EC_FirstName', 'EC_LastName', 'EC_PhoneNumber', 'EC_Zipcode', 'EC_Relationship'] if not h_ec == exp_header_ec: raise Exception("Emergency contact data file (%s) doesn't have the header expected: %s" % (ecFile, exp_header_ec)) args = input("Enter one PED file if any:") if args != '': ped = args.strip().split(' ')[0] reader_ped = csv.reader(open(ped, 'r'), delimiter = ',') h_ped = next(reader_ped) exp_header_ped = ['familyID', 'family_member', 'study_ID', 'StudyID_MATERNAL', 'StudyID_PATERNAL', 'Sex'] if not h_ped == exp_header_ped: raise Exception("PED data file (%s) doesn't have the header expected: %s" % (ped, exp_header_ped)) ### run combined algorithm ### riftehr mt = matchECtoDemog.matches(patientFile, ecFile) mt.assignFamily('riftehr_pedigree.csv') ### fppa newDT = decisionTreeV7.DT(addressFile, nameFile, demoFile, accountFile) newDT.predict() newDT.writeToFile(outputFile) newFamilyTree = family_treeV4.familyTree(newDT) newFamilyTree.filter(outputFile) newFamilyTree.buildTree() #newFamilyTree.connected('fppa_pedigree.csv') new_infer = inference.matches(mt.qc_matches, mt.sex, newFamilyTree.edges, newFamilyTree.gender, familyTreeOutput) #new_infer.assignFamilies(familyTreeOutput) comb = combine_new_ped.matches(ped, new_infer.ec, new_infer.sex, new_infer.fppa_pair, new_infer.p_c_gender, new_infer.famOut) else: ### run combined algorithm ### riftehr mt = matchECtoDemog.matches(patientFile, ecFile) mt.assignFamily('riftehr_pedigree.csv') ### fppa newDT = decisionTreeV7.DT(addressFile, nameFile, demoFile, accountFile) newDT.predict() newDT.writeToFile(outputFile) newFamilyTree = family_treeV4.familyTree(newDT) newFamilyTree.filter(outputFile) newFamilyTree.buildTree() #newFamilyTree.connected('fppa_pedigree.csv') new_infer = inference.matches(mt.qc_matches, mt.sex, newFamilyTree.edges, newFamilyTree.gender, familyTreeOutput) new_infer.assignFamilies() ''' ### run combined algorithm ### riftehr mt = matchECtoDemog.matches(patientFile, ecFile, 'riftehr_pedigree.csv') ### fppa newDT = decisionTreeV7.DT(addressFile, nameFile, demoFile, accountFile) newDT.predict() newDT.writeToFile(outputFile) newFamilyTree = family_treeV4.familyTree(newDT) newFamilyTree.filter(outputFile) newFamilyTree.buildTree() #newFamilyTree.connected('fppa_pedigree.csv') new_infer = inference.matches(mt.qc_matches, mt.sex, newFamilyTree.edges, newFamilyTree.gender, familyTreeOutput) '''
38.488372
133
0.678751
[ "MIT" ]
xiayuan-huang/E-pedigrees
Source/combine.py
4,965
Python
#!flask/bin/python from migrate.versioning import api from config2 import SQLALCHEMY_DATABASE_URI from config2 import SQLALCHEMY_MIGRATE_REPO api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) print('Current database version: ' + str(v))
45.285714
68
0.85489
[ "BSD-3-Clause" ]
djgidwani/remotetest
db_upgrage2.py
317
Python
import acequia as aq from acequia import KnmiStations if 1: # test KnmiStations.prec_stns() knmi = KnmiStations() print("Retrieving list of all available precipitation stations.") #filepath = r'..\02_data\knmi_locations\stn-prc-available.csv' filepath = 'stn-prc-available.csv' dfprc = knmi.prc_stns(filepath) print(f'{len(dfprc)} precipitation stations available on KMNI website.') print() if 1: # test KnmiStations.wheater_stns() knmi = KnmiStations() print("retrieving list of all available weather stations.") #filepath = r'..\02_data\knmi_locations\stn-wht-available.csv' filepath = 'stn-wht-available.csv' dfwt = knmi.wtr_stns(filepath) print(f'{len(dfwt)} weather stations available on KMNI website.') print()
29.074074
76
0.704459
[ "MIT" ]
acequia-package/Acequia
old_tests/test_read_knmistations.py
785
Python
from kivy.core.window import Window from kivy.app import App from kivy.uix.button import Button from kivy.uix.textinput import TextInput from kivy.uix.label import Label from kivy.uix.boxlayout import BoxLayout from kivy.uix.screenmanager import ScreenManager, Screen from ins import * from ruffier import* from kivy.core.window import Window from seconds import Seconds age = None name = None p1,p2,p3 = None , None , None ruf_index = 0 window_color = (0, 0.20, 0.20, 1) Window.clearcolor = window_color btn_color = (0, 0.6, 0.9, 1) Window.size = (300, 600) def get_num(num): try: num = int(num) if num > 7 : return num else : return False except: return False class MainScr(Screen): def __init__(self,**kwargs): super().__init__(**kwargs) # имя экрана должно передаваться конструктору класса Screen txt = Label(text=txt_ins) self.user_name = TextInput(multiline=False) self.user_age = TextInput(multiline=False) txt_name = Label(text = "Name:") txt_age = Label(text = "Age") self.btn = Button(text = "Next") self.btn.background_color = btn_color self.btn.on_press = self.next self.on_enter = self.clean_text_input main_bl = BoxLayout(orientation="vertical") bl1 = BoxLayout(orientation="vertical" , size_hint=(1,.70)) bl2 = BoxLayout(size_hint=(1,.10)) bl3 = BoxLayout(size_hint=(1,.10)) bl4 = BoxLayout(size_hint=(1,.10)) bl1.add_widget(txt) bl2.add_widget(txt_name) bl2.add_widget(self.user_name) bl3.add_widget(txt_age) bl3.add_widget(self.user_age) bl4.add_widget(self.btn) main_bl.add_widget(bl1) main_bl.add_widget(bl2) main_bl.add_widget(bl3) main_bl.add_widget(bl4) self.add_widget(main_bl) def next(self): global name global age name = self.user_name.text age = get_num(self.user_age.text) if age == False : self.user_age.text = "" else : self.manager.transition.direction = 'left' self.manager.current = '2' def clean_text_input(self): self.user_name.text = "" self.user_age.text = "" Window.clearcolor = window_color class Scr2(Screen): def __init__(self,**kwargs): super().__init__(**kwargs) # имя экрана должно передаваться конструктору класса Screen txt = Label(text=ins_test1) self.pulse = TextInput(multiline=False) txt_pulse = Label(text = "Pulse") self.btn = Button(text = "Start") self.btn.on_press = self.next self.lbl_sec = Seconds(15) self.on_enter = self.clean_text_input main_bl = BoxLayout(orientation="vertical") bl1 = BoxLayout(orientation="vertical" , size_hint=(1,.56)) bl2 = BoxLayout(size_hint=(1,.08)) bl3 = BoxLayout(size_hint=(1,.08)) bl4 = BoxLayout(size_hint=(1,.08)) self.btn.background_color = btn_color bl1.add_widget(txt) bl2.add_widget(txt_pulse) bl2.add_widget(self.pulse) bl3.add_widget(self.btn) bl4.add_widget(self.lbl_sec) main_bl.add_widget(bl1) main_bl.add_widget(bl4) main_bl.add_widget(bl2) main_bl.add_widget(bl3) self.add_widget(main_bl) def next(self): global p1 if self.btn.text == "Start" : self.lbl_sec.start() self.btn.text = "Next" else : p1 = get_num(self.pulse.text) if p1 == False: self.pulse.text="" else: self.manager.transition.direction = 'left' self.manager.current = '3' def clean_text_input(self): self.pulse.text = "" self.btn.text = "Start" Window.clearcolor = window_color class Scr3(Screen): def __init__(self,**kwargs): super().__init__(**kwargs) # имя экрана должно передаваться конструктору класса Screen txt = Label(text=ins_test2) self.btn = Button(text = "Start") self.btn.on_press = self.next self.btn.background_color = btn_color self.lbl_sec = Seconds(15) self.on_enter = self.clean_text_input main_bl = BoxLayout(orientation="vertical") bl1 = BoxLayout(orientation="vertical" , size_hint=(1,.60)) bl2 = BoxLayout(size_hint=(1,.10)) bl4 = BoxLayout(size_hint=(1,.08)) bl1.add_widget(txt) bl2.add_widget(self.btn) bl4.add_widget(self.lbl_sec) main_bl.add_widget(bl1) main_bl.add_widget(bl4) main_bl.add_widget(bl2) self.add_widget(main_bl) def next(self): if self.btn.text == "Start" : self.lbl_sec.start() self.btn.text = "Next" else : self.manager.transition.direction = 'left' self.manager.current = '4' def clean_text_input(self): self.btn.text = "Start" Window.clearcolor = window_color class Scr4(Screen): def __init__(self,**kwargs): super().__init__(**kwargs) # имя экрана должно передаваться конструктору класса Screen txt = Label(text=txt_test3) self.pulse2 = TextInput(multiline=False) self.pulse3 = TextInput(multiline=False) txt_name = Label(text = "Pulse_1") txt_age = Label(text = "Pulse_2") self.btn = Button(text = "Next") self.btn.on_press = self.next self.btn.background_color = btn_color self.on_enter = self.clean_text_input main_bl = BoxLayout(orientation="vertical") bl1 = BoxLayout(orientation="vertical" , size_hint=(1,.70)) bl2 = BoxLayout(size_hint=(1,.10)) bl3 = BoxLayout(size_hint=(1,.10)) bl4 = BoxLayout(size_hint=(1,.10)) bl1.add_widget(txt) bl2.add_widget(txt_name) bl2.add_widget(self.pulse2) bl3.add_widget(txt_age) bl3.add_widget(self.pulse3) bl4.add_widget(self.btn) main_bl.add_widget(bl1) main_bl.add_widget(bl2) main_bl.add_widget(bl3) main_bl.add_widget(bl4) self.add_widget(main_bl) def next(self): global p2 global p3 if self.btn.text == "Start" : self.lbl_sec.start() self.btn.text = "Next" else : p2 = get_num(self.pulse2.text) p3 = get_num(self.pulse3.text) self.manager.transition.direction = 'left' self.manager.current = '5' def clean_text_input(self): self.pulse2.text = "" self.pulse3.text = "" Window.clearcolor = window_color class Scr5(Screen): def __init__(self,**kwargs): super().__init__(**kwargs) # имя экрана должно передаваться конструктору класса Screen self.txt = Label(text = "") self.btn = Button(text = "rebut") self.btn.on_press = self.back self.btn.background_color = btn_color main_bl = BoxLayout(orientation="vertical") bl1 = BoxLayout(orientation="vertical" , size_hint=(1,.90)) bl2 = BoxLayout(size_hint=(1,.10)) bl1.add_widget(self.txt) bl2.add_widget(self.btn) main_bl.add_widget(bl1) main_bl.add_widget(bl2) self.add_widget(main_bl) self.on_enter = self.index_ruffier def index_ruffier(self): ir = (4*(p1+p2+p3)-200)/10 self.txt.text = txt_ending +str(ir) def back(self): self.manager.transition.direction = 'right' self.manager.current = 'main' global p1 global p2 global p3 def clean_text_input(self): Window.clearcolor = window_color class MyApp(App): def build(self): sm = ScreenManager() sm.add_widget(MainScr(name='main')) sm.add_widget(Scr2(name='2')) sm.add_widget(Scr3(name="3")) sm.add_widget(Scr4(name='4')) sm.add_widget(Scr5(name='5')) return sm app = MyApp() app.run()
30.970909
95
0.573676
[ "CC0-1.0" ]
RetiredTea/kettle
main.py
8,742
Python
from rest_framework import serializers class HelloSerializer(serializers.Serializer): """serializers a name field for testing our APIView""" name = serializers.CharField(max_length = 10)
39
58
0.784615
[ "MIT" ]
roith44/JustDijangoApi
profile_api/serializers.py
195
Python
#!/usr/bin/env python3 import iterm2 # To install, update, or remove packages from PyPI, use Scripts > Manage > Manage Dependencies... import subprocess async def main(connection): component = iterm2.StatusBarComponent( short_description = 'k8s current context', detailed_description = 'Display k8s current context', knobs = [], exemplar = 'cluster-1', update_cadence = 3, identifier = 'com.github.bassaer.iterm2-k8s-context' ) @iterm2.StatusBarRPC async def coro(knobs): result = subprocess.run( ['kubectl', 'config', 'current-context'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') tmpl = '⎈ {}' if result.returncode != 0: return tmpl.format('Error'); return tmpl.format(result.stdout.strip()); await component.async_register(connection, coro) # This instructs the script to run the "main" coroutine and to keep running even after it returns. iterm2.run_forever(main)
33.242424
98
0.618049
[ "MIT" ]
bassaer/iterm2-k8s-context
k8s-context.py
1,099
Python
# celery settings broker_url = 'amqp://guest:guest@rabbitmq:5672/' result_backend = 'rpc://' accept_content = ['json'] task_serializer = 'json' task_soft_time_limit = 60 * 3 # 3 minute timeout result_serializer = 'json' timezone = 'UTC' enable_utc = True
21.5
49
0.724806
[ "Apache-2.0" ]
phacic/photos_album
config/celeryconfig.py
258
Python
# Copyright (C) 2019-2021 HERE Europe B.V. # SPDX-License-Identifier: Apache-2.0 """This module defines all the configs which will be required as inputs to autosuggest API.""" from .base_config import Bunch class SearchCircle: """A class to define ``SearchCircle`` Results will be returned if they are located within the specified circular area defined by its center and radius(in meters). """ def __init__(self, lat: float, lng: float, radius: int): self.lat = lat self.lng = lng self.radius = radius class PoliticalView(Bunch): """A Class to define constant values for political view ``RUS``: expressing the Russian view on Crimea ``SRB``: expressing the Serbian view on Kosovo, Vukovar and Sarengrad Islands ``MAR``: expressing the Moroccan view on Western Sahara """ #: Use this config for political_view of Autosuggest API. #: Example: for ``RUS`` political_view use ``POLITICAL_VIEW.RUS``. POLITICAL_VIEW = PoliticalView( **{ "RUS": "RUS", "SRB": "SRB", "MAR": "MAR", } ) class Show(Bunch): """A Class to define constant values for showing additional fields to be rendered in the response. ``phonemes``: Renders phonemes for address and place names into the results. ``tz``: BETA: Renders result items with additional time zone information. Please note that this may impact latency significantly. """ #: Use this config for show of Autosuggest API. #: Example: for ``RUS`` show use ``SHOW.phonemes``. SHOW = Show(**{"phonemes": "phonemes", "tz": "tz"})
25.746032
94
0.663379
[ "Apache-2.0" ]
heremaps/here-location-services-python
here_location_services/config/autosuggest_config.py
1,622
Python
import random import pdb from torchvision.transforms import functional as F from utils.box_list import BoxList def resize(img_list, box_list=None, min_size=None, max_size=None): assert type(min_size) in (int, tuple), f'The type of min_size_train shoule be int or tuple, got {type(min_size)}.' if isinstance(min_size, tuple): min_size = random.randint(min_size[0], min_size[1]) assert img_list.img.size == img_list.ori_size, 'img size error when resizing.' w, h = img_list.ori_size short_side, long_side = min(w, h), max(w, h) if min_size / short_side * long_side > max_size: scale = max_size / long_side else: scale = min_size / short_side new_h, new_w = int(scale * h), int(scale * w) assert (min(new_h, new_w)) <= min_size and (max(new_h, new_w) <= max_size), 'Scale error when resizing.' resized_img = F.resize(img_list.img, (new_h, new_w)) img_list.img = resized_img img_list.resized_size = (new_w, new_h) if box_list is None: return img_list else: box_list.resize(new_size=(new_w, new_h)) return img_list, box_list def random_flip(img_list, box_list, h_prob=0.5, v_prob=None): if h_prob and random.random() < h_prob: new_img = F.hflip(img_list.img) img_list.img = new_img assert img_list.resized_size == box_list.img_size, 'img size != box size when flipping.' box_list.box_flip(method='h_flip') if v_prob and random.random() < v_prob: raise NotImplementedError('Vertical flip has not been implemented.') return img_list, box_list def to_tensor(img_list): new_img = F.to_tensor(img_list.img) img_list.img = new_img return img_list def normalize(img_list, mean=(102.9801, 115.9465, 122.7717), std=(1., 1., 1.)): new_img = img_list.img[[2, 1, 0]] * 255 # to BGR, 255 new_img = F.normalize(new_img, mean=mean, std=std) img_list.img = new_img return img_list def train_aug(img_list, box_list, cfg): img_list, box_list = resize(img_list, box_list, min_size=cfg.min_size_train, max_size=cfg.max_size_train) img_list, box_list = random_flip(img_list, box_list, h_prob=0.5) img_list = to_tensor(img_list) img_list = normalize(img_list) return img_list, box_list def val_aug(img_list, box_list, cfg): img_list = resize(img_list, box_list=None, min_size=cfg.min_size_test, max_size=cfg.max_size_test) img_list = to_tensor(img_list) img_list = normalize(img_list) return img_list, None
34
118
0.694754
[ "MIT" ]
feiyuhuahuo/PAA_minimal
data/transforms.py
2,516
Python
LAB_SOURCE_FILE = "lab05.py" """ Lab 05: Trees and Proj2 Prep """ def couple(lst1, lst2): """Return a list that contains lists with i-th elements of two sequences coupled together. >>> lst1 = [1, 2, 3] >>> lst2 = [4, 5, 6] >>> couple(lst1, lst2) [[1, 4], [2, 5], [3, 6]] >>> lst3 = ['c', 6] >>> lst4 = ['s', '1'] >>> couple(lst3, lst4) [['c', 's'], [6, '1']] """ assert len(lst1) == len(lst2) a = [] for i in range(len(lst1)): a.append([lst1[i], lst2[i]]) return a from math import sqrt def distance(city1, city2): """ >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 """ x1, y1 = get_lat(city1), get_lon(city1) x2, y2 = get_lat(city2), get_lon(city2) return sqrt((x1 - x2)**2 + (y1 - y2)**2) def closer_city(lat, lon, city1, city2): """ Returns the name of either city1 or city2, whichever is closest to coordinate (lat, lon). >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' """ tmp = make_city('tmp', lat, lon) dis1 = distance(city1, tmp) dis2 = distance(city2, tmp) if dis1 > dis2: return get_name(city2) else: return get_name(city1) def check_abstraction(): """ There's nothing for you to do for this function, it's just here for the extra doctest >>> change_abstraction(True) >>> city1 = make_city('city1', 0, 1) >>> city2 = make_city('city2', 0, 2) >>> distance(city1, city2) 1.0 >>> city3 = make_city('city3', 6.5, 12) >>> city4 = make_city('city4', 2.5, 15) >>> distance(city3, city4) 5.0 >>> berkeley = make_city('Berkeley', 37.87, 112.26) >>> stanford = make_city('Stanford', 34.05, 118.25) >>> closer_city(38.33, 121.44, berkeley, stanford) 'Stanford' >>> bucharest = make_city('Bucharest', 44.43, 26.10) >>> vienna = make_city('Vienna', 48.20, 16.37) >>> closer_city(41.29, 174.78, bucharest, vienna) 'Bucharest' >>> change_abstraction(False) """ # Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it! def make_city(name, lat, lon): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' >>> get_lat(city) 0 >>> get_lon(city) 1 """ if change_abstraction.changed: return {"name" : name, "lat" : lat, "lon" : lon} else: return [name, lat, lon] def get_name(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_name(city) 'Berkeley' """ if change_abstraction.changed: return city["name"] else: return city[0] def get_lat(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lat(city) 0 """ if change_abstraction.changed: return city["lat"] else: return city[1] def get_lon(city): """ >>> city = make_city('Berkeley', 0, 1) >>> get_lon(city) 1 """ if change_abstraction.changed: return city["lon"] else: return city[2] def change_abstraction(change): change_abstraction.changed = change change_abstraction.changed = False def nut_finder(t): """Returns True if t contains a node with the value 'nut' and False otherwise. >>> scrat = tree('nut') >>> nut_finder(scrat) True >>> sproul = tree('roots', [tree('branch1', [tree('leaf'), tree('nut')]), tree('branch2')]) >>> nut_finder(sproul) True >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> nut_finder(numbers) False >>> t = tree(1, [tree('nut',[tree('not nut')])]) >>> nut_finder(t) True """ if label(t) == 'nut': return True for node in branches(t): if nut_finder(node): return True return False def sprout_leaves(t, values): """Sprout new leaves containing the data in values at each leaf in the original tree t and return the resulting tree. >>> t1 = tree(1, [tree(2), tree(3)]) >>> print_tree(t1) 1 2 3 >>> new1 = sprout_leaves(t1, [4, 5]) >>> print_tree(new1) 1 2 4 5 3 4 5 >>> t2 = tree(1, [tree(2, [tree(3)])]) >>> print_tree(t2) 1 2 3 >>> new2 = sprout_leaves(t2, [6, 1, 2]) >>> print_tree(new2) 1 2 3 6 1 2 """ if is_leaf(t): return tree(label(t),[tree(v) for v in values]) return tree(label(t),[sprout_leaves(b, values) for b in branches(t)]) # Tree ADT def tree(label, branches=[]): """Construct a tree with the given label value and a list of branches.""" for branch in branches: assert is_tree(branch), 'branches must be trees' return [label] + list(branches) def label(tree): """Return the label value of a tree.""" return tree[0] def branches(tree): """Return the list of branches of the given tree.""" return tree[1:] def is_tree(tree): """Returns True if the given tree is a tree, and False otherwise.""" if type(tree) != list or len(tree) < 1: return False for branch in branches(tree): if not is_tree(branch): return False return True def is_leaf(tree): """Returns True if the given tree's list of branches is empty, and False otherwise. """ return not branches(tree) def print_tree(t, indent=0): """Print a representation of this tree in which each node is indented by two spaces times its depth from the root. >>> print_tree(tree(1)) 1 >>> print_tree(tree(1, [tree(2)])) 1 2 >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])]) >>> print_tree(numbers) 1 2 3 4 5 6 7 """ print(' ' * indent + str(label(t))) for b in branches(t): print_tree(b, indent + 1) def copy_tree(t): """Returns a copy of t. Only for testing purposes. >>> t = tree(5) >>> copy = copy_tree(t) >>> t = tree(6) >>> print_tree(copy) 5 """ return tree(label(t), [copy_tree(b) for b in branches(t)]) def add_chars(w1, w2): """ Return a string containing the characters you need to add to w1 to get w2. You may assume that w1 is a subsequence of w2. >>> add_chars("owl", "howl") 'h' >>> add_chars("want", "wanton") 'on' >>> add_chars("rat", "radiate") 'diae' >>> add_chars("a", "prepare") 'prepre' >>> add_chars("resin", "recursion") 'curo' >>> add_chars("fin", "effusion") 'efuso' >>> add_chars("coy", "cacophony") 'acphon' >>> from construct_check import check >>> # ban iteration and sets >>> check(LAB_SOURCE_FILE, 'add_chars', ... ['For', 'While', 'Set', 'SetComp']) # Must use recursion True """ "*** YOUR CODE HERE ***" def add_trees(t1, t2): """ >>> numbers = tree(1, ... [tree(2, ... [tree(3), ... tree(4)]), ... tree(5, ... [tree(6, ... [tree(7)]), ... tree(8)])]) >>> print_tree(add_trees(numbers, numbers)) 2 4 6 8 10 12 14 16 >>> print_tree(add_trees(tree(2), tree(3, [tree(4), tree(5)]))) 5 4 5 >>> print_tree(add_trees(tree(2, [tree(3)]), tree(2, [tree(3), tree(4)]))) 4 6 4 >>> print_tree(add_trees(tree(2, [tree(3, [tree(4), tree(5)])]), \ tree(2, [tree(3, [tree(4)]), tree(5)]))) 4 6 8 5 5 """ "*** YOUR CODE HERE ***" # Shakespeare and Dictionaries def build_successors_table(tokens): """Return a dictionary: keys are words; values are lists of successors. >>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.'] >>> table = build_successors_table(text) >>> sorted(table) [',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to'] >>> table['to'] ['investigate', 'eat'] >>> table['pie'] ['.'] >>> table['.'] ['We'] """ table = {} prev = '.' for word in tokens: if prev not in table: "*** YOUR CODE HERE ***" "*** YOUR CODE HERE ***" prev = word return table def construct_sent(word, table): """Prints a random sentence starting with word, sampling from table. >>> table = {'Wow': ['!'], 'Sentences': ['are'], 'are': ['cool'], 'cool': ['.']} >>> construct_sent('Wow', table) 'Wow!' >>> construct_sent('Sentences', table) 'Sentences are cool.' """ import random result = '' while word not in ['.', '!', '?']: "*** YOUR CODE HERE ***" return result.strip() + word def shakespeare_tokens(path='shakespeare.txt', url='http://composingprograms.com/shakespeare.txt'): """Return the words of Shakespeare's plays as a list.""" import os from urllib.request import urlopen if os.path.exists(path): return open('shakespeare.txt', encoding='ascii').read().split() else: shakespeare = urlopen(url) return shakespeare.read().decode(encoding='ascii').split() # Uncomment the following two lines # tokens = shakespeare_tokens() # table = build_successors_table(tokens) def random_sent(): import random return construct_sent(random.choice(table['.']), table)
25.746803
111
0.540081
[ "MIT" ]
weijiew/cs61a
lab/lab05/lab05.py
10,067
Python
import itertools import re import detectEnglish import freqAnalysis vigenereCipher = __import__('vigenereCipher') LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # SILENT_MODE = False # If set to True, program doesn't print anything. SILENT_MODE = True NUM_MOST_FREQ_LETTERS = 4 # Attempt this many letters per subkey. MAX_KEY_LENGTH = 16 # Will not attempt keys longer than this. NONLETTERS_PATTERN = re.compile('[^A-Z]') def find_repeat_sequences_spacings(message): # Goes through the message and finds any 3 to 5 letter sequences # that are repeated. Returns a dict with the keys of the sequence and # values of a list of spacings (num of letters between the repeats). # Use a regular expression to remove non-letters from the message: message = NONLETTERS_PATTERN.sub('', message.upper()) # Compile a list of seqLen-letter sequences found in the message: seq_spacings = {} # Keys are sequences, values are lists of int spacings. for seqLen in range(3, 6): for seqStart in range(len(message) - seqLen): # Determine what the sequence is, and store it in seq: seq = message[seqStart:seqStart + seqLen] # Look for this sequence in the rest of the message: for i in range(seqStart + seqLen, len(message) - seqLen): if message[i:i + seqLen] == seq: # Found a repeated sequence. if seq not in seq_spacings: seq_spacings[seq] = [] # Initialize a blank list. # Append the spacing distance between the repeated # sequence and the original sequence: seq_spacings[seq].append(i - seqStart) return seq_spacings def get_useful_factors(num): # Returns a list of useful factors of num. By "useful" we mean factors # less than MAX_KEY_LENGTH + 1 and not 1. For example, # getUsefulFactors(144) returns [2, 3, 4, 6, 8, 9, 12, 16] if num < 2: return [] # Numbers less than 2 have no useful factors. factors = [] # The list of factors found. # When finding factors, you only need to check the integers up to # MAX_KEY_LENGTH. for i in range(2, MAX_KEY_LENGTH + 1): # Don't test 1: it's not useful. if num % i == 0: factors.append(i) other_factor = int(num / i) if other_factor < MAX_KEY_LENGTH + 1 and other_factor != 1: factors.append(other_factor) return list(set(factors)) # Remove duplicate factors. def get_item_at_index_one(items): return items[1] def get_most_common_factors(seq_factors): # First, get a count of how many times a factor occurs in seqFactors: factor_counts = {} # Key is a factor, value is how often it occurs. # seqFactors keys are sequences, values are lists of factors of the # spacings. seqFactors has a value like: {'GFD': [2, 3, 4, 6, 9, 12, # 18, 23, 36, 46, 69, 92, 138, 207], 'ALW': [2, 3, 4, 6, ...], ...} for seq in seq_factors: factor_list = seq_factors[seq] for factor in factor_list: if factor not in factor_counts: factor_counts[factor] = 0 factor_counts[factor] += 1 # Second, put the factor and its count into a tuple, and make a list # of these tuples so we can sort them: factors_by_count = [] for factor in factor_counts: # Exclude factors larger than MAX_KEY_LENGTH: if factor <= MAX_KEY_LENGTH: # factors_by_count is a list of tuples: (factor, factorCount) # factors_by_count has a value like: [(3, 497), (2, 487), ...] factors_by_count.append((factor, factor_counts[factor])) # Sort the list by the factor count: factors_by_count.sort(key=get_item_at_index_one, reverse=True) return factors_by_count def kasiski_examination(ciphertext): # Find out the sequences of 3 to 5 letters that occur multiple times # in the ciphertext. repeated_seq_spacings has a value like: # {'EXG': [192], 'NAF': [339, 972, 633], ... } repeated_seq_spacings = find_repeat_sequences_spacings(ciphertext) # (See getMostCommonFactors() for a description of seq_factors.) seq_factors = {} for seq in repeated_seq_spacings: seq_factors[seq] = [] for spacing in repeated_seq_spacings[seq]: seq_factors[seq].extend(get_useful_factors(spacing)) # (See getMostCommonFactors() for a description of factors_by_count.) factors_by_count = get_most_common_factors(seq_factors) # Now we extract the factor counts from factors_by_count and # put them in all_likely_key_lengths so that they are easier to # use later: all_likely_key_lengths = [] for twoIntTuple in factors_by_count: all_likely_key_lengths.append(twoIntTuple[0]) return all_likely_key_lengths def get_nth_subkeys_letters(nth, key_length, message): # Returns every nth letter for each keyLength set of letters in text. # E.g. getNthSubkeysLetters(1, 3, 'ABCABCABC') returns 'AAA' # getNthSubkeysLetters(2, 3, 'ABCABCABC') returns 'BBB' # getNthSubkeysLetters(3, 3, 'ABCABCABC') returns 'CCC' # getNthSubkeysLetters(1, 5, 'ABCDEFGHI') returns 'AF' # Use a regular expression to remove non-letters from the message: message = NONLETTERS_PATTERN.sub('', message) i = nth - 1 letters = [] while i < len(message): letters.append(message[i]) i += key_length return ''.join(letters) def attempt_hack_with_key_length(ciphertext, most_likely_key_length): # Determine the most likely letters for each letter in the key: ciphertext_up = ciphertext.upper() # all_freq_scores is a list of mostLikelyKeyLength number of lists. # These inner lists are the freq_scores lists. all_freq_scores = [] for nth in range(1, most_likely_key_length + 1): nth_letters = get_nth_subkeys_letters(nth, most_likely_key_length, ciphertext_up) # freq_scores is a list of tuples like: # [(<letter>, <Eng. Freq. match score>), ... ] # List is sorted by match score. Higher score means better match. # See the englishFreqMatchScore() comments in freqAnalysis.py. freq_scores = [] for possible_key in LETTERS: crypt = vigenereCipher.Vigenere(possible_key) decrypted_text = crypt.decrypt(nth_letters) key_and_freq_match_tuple = (possible_key, freqAnalysis.englishFreqMatchScore(decrypted_text)) freq_scores.append(key_and_freq_match_tuple) # Sort by match score: freq_scores.sort(key=get_item_at_index_one, reverse=True) all_freq_scores.append(freq_scores[:NUM_MOST_FREQ_LETTERS]) if not SILENT_MODE: for i in range(len(all_freq_scores)): # Use i + 1 so the first letter is not called the "0th" letter: print('Possible letters for letter %s of the key: ' % (i + 1) + ' ') for freqScore in all_freq_scores[i]: print('%s ' % freqScore[0] + ' ') print('') # Try every combination of the most likely letters for each position # in the key: for indexes in itertools.product(range(NUM_MOST_FREQ_LETTERS), repeat=most_likely_key_length): # Create a possible key from the letters in all_freq_scores: possible_key = '' for i in range(most_likely_key_length): possible_key += all_freq_scores[i][indexes[i]][0] if not SILENT_MODE: print(f'Attempting with key: {possible_key}') crypt = vigenereCipher.Vigenere(possible_key) decrypted_text = crypt.decrypt(ciphertext_up) if detectEnglish.isEnglish(decrypted_text): # Set the hacked ciphertext to the original casing: orig_case = [] for i in range(len(ciphertext)): if ciphertext[i].isupper(): orig_case.append(decrypted_text[i].upper()) else: orig_case.append(decrypted_text[i].lower()) decrypted_text = ''.join(orig_case) print("\nPossible decrypted message:") print(f" Key {possible_key}({len(possible_key)}). part of decrypted message: {decrypted_text[:200]}") response = input("\nEnter D if done, or any other key to continue the attack: ") if response.strip().upper().startswith('D'): return decrypted_text # No English-looking decryption found, so return None: return None def hack_vigenere(ciphertext): # First, we need to do Kasiski Examination to figure out what the # length of the ciphertext's encryption key is: all_likely_key_lengths = kasiski_examination(ciphertext) if not SILENT_MODE: key_length_str = '' for keyLength in all_likely_key_lengths: key_length_str += f'{keyLength} ' print('\nKasiski Examination results say the most likely key lengths are: ' + key_length_str + '\n') hacked_message = None for keyLength in all_likely_key_lengths: if not SILENT_MODE: print('Attempting decrypt with key length %s (%s possible keys)...' % ( keyLength, NUM_MOST_FREQ_LETTERS ** keyLength)) hacked_message = attempt_hack_with_key_length(ciphertext, keyLength) if hacked_message is not None: break # If none of the key lengths we found using Kasiski Examination # worked, start brute-forcing through key lengths: if hacked_message is None: if not SILENT_MODE: print('Unable to hack message with likely key length(s). Brute forcing key length...') for keyLength in range(1, MAX_KEY_LENGTH + 1): # Don't re-check key lengths already tried from Kasiski: if keyLength not in all_likely_key_lengths: if not SILENT_MODE: print( f'Attempting decrypt with key length {keyLength} ({NUM_MOST_FREQ_LETTERS ** keyLength} possible keys)...') hacked_message = attempt_hack_with_key_length(ciphertext, keyLength) if hacked_message is not None: break return hacked_message def main(): # Instead of typing this ciphertext out, you can copy & paste it # from https://www.nostarch.com/crackingcodes/. ciphertext = """Adiz Avtzqeci Tmzubb wsa m Pmilqev halpqavtakuoi, lgouqdaf, kdmktsvmztsl, izr xoexghzr kkusitaaf. Vz wsa twbhdg ubalmmzhdad qz hce vmhsgohuqbo ox kaakulmd gxiwvos, krgdurdny i rcmmstugvtawz ca tzm ocicwxfg jf "stscmilpy" oid "uwydptsbuci" wabt hce Lcdwig eiovdnw. Bgfdny qe kddwtk qjnkqpsmev ba pz tzm roohwz at xoexghzr kkusicw izr vrlqrwxist uboedtuuznum. Pimifo Icmlv Emf DI, Lcdwig owdyzd xwd hce Ywhsmnemzh Xovm mby Cqxtsm Supacg (GUKE) oo Bdmfqclwg Bomk, Tzuhvif'a ocyetzqofifo ositjm. Rcm a lqys ce oie vzav wr Vpt 8, lpq gzclqab mekxabnittq tjr Ymdavn fihog cjgbhvnstkgds. Zm psqikmp o iuejqf jf lmoviiicqg aoj jdsvkavs Uzreiz qdpzmdg, dnutgrdny bts helpar jf lpq pjmtm, mb zlwkffjmwktoiiuix avczqzs ohsb ocplv nuby swbfwigk naf ohw Mzwbms umqcifm. Mtoej bts raj pq kjrcmp oo tzm Zooigvmz Khqauqvl Dincmalwdm, rhwzq vz cjmmhzd gvq ca tzm rwmsl lqgdgfa rcm a kbafzd-hzaumae kaakulmd, hce SKQ. Wi 1948 Tmzubb jgqzsy Msf Zsrmsv'e Qjmhcfwig Dincmalwdm vt Eizqcekbqf Pnadqfnilg, ivzrw pq onsaafsy if bts yenmxckmwvf ca tzm Yoiczmehzr uwydptwze oid tmoohe avfsmekbqr dn eifvzmsbuqvl tqazjgq. Pq kmolm m dvpwz ab ohw ktshiuix pvsaa at hojxtcbefmewn, afl bfzdakfsy okkuzgalqzu xhwuuqvl jmmqoigve gpcz ie hce Tmxcpsgd-Lvvbgbubnkq zqoxtawz, kciup isme xqdgo otaqfqev qz hce 1960k. Bgfdny'a tchokmjivlabk fzsmtfsy if i ofdmavmz krgaqqptawz wi 1952, wzmz vjmgaqlpad iohn wwzq goidt uzgeyix wi tzm Gbdtwl Wwigvwy. Vz aukqdoev bdsvtemzh rilp rshadm tcmmgvqg (xhwuuqvl uiehmalqab) vs sv mzoejvmhdvw ba dmikwz. Hpravs rdev qz 1954, xpsl whsm tow iszkk jqtjrw pug 42id tqdhcdsg, rfjm ugmbddw xawnofqzu. Vn avcizsl lqhzreqzsy tzif vds vmmhc wsa eidcalq; vds ewfvzr svp gjmw wfvzrk jqzdenmp vds vmmhc wsa mqxivmzhvl. Gv 10 Esktwunsm 2009, fgtxcrifo mb Dnlmdbzt uiydviyv, Nfdtaat Dmiem Ywiikbqf Bojlab Wrgez avdw iz cafakuog pmjxwx ahwxcby gv nscadn at ohw Jdwoikp scqejvysit xwd "hce sxboglavs kvy zm ion tjmmhzd." Sa at Haq 2012 i bfdvsbq azmtmd'g widt ion bwnafz tzm Tcpsw wr Zjrva ivdcz eaigd yzmbo Tmzubb a kbmhptgzk dvrvwz wa efiohzd.""" # ciphertext = input('\nEnter ciphertext: ') decoded_message = hack_vigenere(ciphertext) if decoded_message is None: print("\nFailed to break encryption.\n") else: print("Decoded Message:") print("--------------------") print(decoded_message) if __name__ == '__main__': main()
48.218868
2,051
0.679136
[ "MIT" ]
asiman161/university
crypto/vigenere/vigenere_breaker.py
12,778
Python
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2020 Dan <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. from io import BytesIO from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector from pyrogram.raw.core import TLObject from pyrogram import raw from typing import List, Union, Any # # # # # # # # # # # # # # # # # # # # # # # # # !!! WARNING !!! # # This is a generated file! # # All changes made in this file will be lost! # # # # # # # # # # # # # # # # # # # # # # # # # class GetDialogUnreadMarks(TLObject): # type: ignore """Telegram API method. Details: - Layer: ``117`` - ID: ``0x22e24e22`` **No parameters required.** Returns: List of :obj:`DialogPeer <pyrogram.raw.base.DialogPeer>` """ __slots__: List[str] = [] ID = 0x22e24e22 QUALNAME = "functions.messages.GetDialogUnreadMarks" def __init__(self) -> None: pass @staticmethod def read(data: BytesIO, *args: Any) -> "GetDialogUnreadMarks": # No flags return GetDialogUnreadMarks() def write(self) -> bytes: data = BytesIO() data.write(Int(self.ID, False)) # No flags return data.getvalue()
30.149254
103
0.631188
[ "Apache-2.0" ]
Georgiy123456/heroku-userbot
venv/Lib/site-packages/pyrogram/raw/functions/messages/get_dialog_unread_marks.py
2,020
Python
# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for concrete model entities.""" from typing import Dict, Optional, List from model.entity_field import EntityField from model.entity_type import EntityType class Entity(object): """Class to represent entities within the concrete model. Attributes: name: Human readable name of an entity. guid: UUID4 value for an entity. cloud_device_id: IoT application device id. type_name: Instance of EntityType class. fields: Mapping of standard field names to EntityField instances. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. metadata: Contextual metadata coming from a physical device. i.e. { location: '/Sif-Solo/Site 1 - Sif/Charleston Road North/B13 - 1875 Charleston/Roof', control_programs: ['Trane AC-1', '1875 Charleston'], device_id: 'DEV:2809009' } """ def __init__(self, name: str, cloud_device_id: Optional[str], type_name: EntityType, fields: List[EntityField], is_reporting: bool, guid: Optional[str] = None, metadata: Optional[Dict[str, str]] = None): """Init. Args: name: the entity's name. cloud_device_id: Device id iot core or any iot application. type_name: DBO entity type stored in EntityType instance. fields: List of standard field names. is_reporting: if an entity maps 1:1 to a reporting device, it is a reporting entity. guid: [Optional] Universally Unique identification code for an entity. metadata: Contextual metadata about an entity. """ self.name = name self._guid = guid self.cloud_device_id = cloud_device_id self.type_name = type_name self._fields = fields self.is_reporting = is_reporting self.metadata = metadata @classmethod def FromDict(cls, entity_dict: Dict[str, object]): """class method to create an instance of Entity from mapping of entity attributes to values. Args: entity_dict: dictionary mapping field attributes to values from a loadsheet or building configuration. Returns: An instance of Entity class. """ @property def fields(self) -> Dict[str, EntityField]: """Returns a mapping of standard field names to EntityField instances associated with self.""" return self._fields @fields.setter def fields(self, new_fields: Dict[str, EntityField]) -> None: """Validates that each value of new_fields is an instance of EntityField class and sets. Arguments: new_fields: A mapping of standard field names to EntityField instances. """ @property def guid(self) -> str: """Returns the GUID associated with self.""" return self._guid @guid.setter def guid(self, guid: Optional[str] = None) -> None: """If guid argument is none, generate a new guid for set or just set if none. Args: guid: [Optional] A UUID string. """
33.157407
98
0.685283
[ "Apache-2.0" ]
ghairapetian/digitalbuildings
tools/concrete_model/model/entity.py
3,581
Python
#!/usr/bin/env python #encoding: utf8 import unittest, rostest import rosnode, rospy import time from pimouse_ros.msg import MotorFreqs from geometry_msgs.msg import Twist from std_srvs.srv import Trigger, TriggerResponse from pimouse_ros.srv import TimedMotion class MotorTest(unittest.TestCase): def setUp(self): rospy.wait_for_service('/motor_on') rospy.wait_for_service('/motor_off') rospy.wait_for_service('/timed_motion') on = rospy.ServiceProxy('/motor_on', Trigger) ret = on() def file_check(self,dev,value,message): with open("/dev/" + dev,"r") as f: self.assertEqual(f.readline(),str(value)+"\n",message) def test_node_exist(self): nodes = rosnode.get_node_names() self.assertIn('/motors', nodes, "node does not exist") def test_put_freq(self): pub = rospy.Publisher('/motor_raw', MotorFreqs) m = MotorFreqs() m.left_hz = 123 m.right_hz = 456 for i in range(10): pub.publish(m) time.sleep(0.1) self.file_check("rtmotor_raw_l0",m.left_hz,"wrong left value from motor_raw") self.file_check("rtmotor_raw_r0",m.right_hz,"wrong right value from motor_raw") def test_put_cmd_vel(self): pub = rospy.Publisher('/cmd_vel', Twist) m = Twist() m.linear.x = 0.1414 m.angular.z = 1.57 for i in range(10): pub.publish(m) time.sleep(0.1) self.file_check("rtmotor_raw_l0",200,"wrong left value from cmd_vel") self.file_check("rtmotor_raw_r0",600,"wrong right value from cmd_vel") time.sleep(1.1) self.file_check("rtmotor_raw_r0",0,"don't stop after 1[s]") self.file_check("rtmotor_raw_l0",0,"don't stop after 1[s]") def test_on_off(self): off = rospy.ServiceProxy('/motor_off', Trigger) ret = off() self.assertEqual(ret.success, True, "motor off does not succeeded") self.assertEqual(ret.message, "OFF", "motor off wrong message") with open("/dev/rtmotoren0","r") as f: data = f.readline() self.assertEqual(data,"0\n","wrong value in rtmotor0 at motor off") on = rospy.ServiceProxy('motor_on', Trigger) ret = on() self.assertEqual(ret.success, True, "motor on does not succeeded") self.assertEqual(ret.message, "ON", "motor on wrong message") with open("/dev/rtmotoren0","r") as f: data = f.readline() self.assertEqual(data,"1\n","wrong value in rtmotor0 at motor on") def test_put_value_timed(self): tm = rospy.ServiceProxy('/timed_motion', TimedMotion) tm(-321, 654, 1500) with open("/dev/rtmotor0", "r") as f: data = f.readline() self.assertEqual(data, "-321 654 1500\n", "value does not written to rtmotor0") if __name__ == '__main__': rospy.init_node('travis_test_motors') rostest.rosrun('pimouse_ros','travis_test_motors', MotorTest)
36.853659
91
0.630046
[ "BSD-3-Clause" ]
ei0410/pimouse_ros
test/travis_test_motors.py
3,022
Python
# Python GLFW hello world example based on C++ guide at # http://www.glfw.org/docs/latest/quick.html import sys import glfw import numpy from OpenGL import GL from OpenGL.GL.shaders import compileShader, compileProgram from OpenGL.arrays import vbo from openvr.glframework.glmatrix import rotate_z, ortho, pack def main(): # Initialize GLFW OpenGL API glfw.set_error_callback(error_callback) if not glfw.init(): raise Exception("GLFW Initialization error") # Use modern OpenGL version 4.5 core glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4) glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 5) glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) # Create OpenGL window and context window = glfw.create_window(640, 480, "Triangle Viewer", None, None) if not window: glfw.terminate() raise RuntimeError("Failed to create glfw window") glfw.make_context_current(window) glfw.swap_interval(1) # Create vertex array object, apparently required for modern OpenGL vao = GL.glGenVertexArrays(1) GL.glBindVertexArray(vao) # Create triangle geometry: corner 2D location and colors vertices = vbo.VBO(numpy.array([ [-0.6, -0.4, 1.0, 0.0, 0.0], # x, y, r, g, b [0.6, -0.4, 0.0, 1.0, 0.0], [0.0, 0.6, 0.0, 0.0, 1.0], ], dtype='float32')) vertices.bind() # hard-code shader parameter location indices mvp_location = 0 vpos_location = 0 vcol_location = 1 GL.glEnableVertexAttribArray(vpos_location) fsize = vertices.dtype.itemsize # 4 bytes per float32 GL.glVertexAttribPointer(vpos_location, 2, GL.GL_FLOAT, False, fsize * 5, vertices + fsize * 0) GL.glEnableVertexAttribArray(vcol_location) GL.glVertexAttribPointer(vcol_location, 3, GL.GL_FLOAT, False, fsize * 5, vertices + fsize * 2) # Create GLSL shader program vertex_shader = compileShader( """#version 450 core #line 55 layout(location = %d) uniform mat4 MVP = mat4(1); layout(location = %d) in vec2 vPos; layout(location = %d) in vec3 vCol; out vec3 color; void main() { gl_Position = MVP * vec4(vPos, 0.0, 1.0); color = vCol; } """ % (mvp_location, vpos_location, vcol_location), GL.GL_VERTEX_SHADER) fragment_shader = compileShader( """#version 450 core #line 73 in vec3 color; out vec4 fragColor; void main() { fragColor = vec4(color, 1); } """, GL.GL_FRAGMENT_SHADER) program = compileProgram(vertex_shader, fragment_shader) # Repeatedly draw until some event stops the program while not glfw.window_should_close(window): glfw.make_context_current(window) width, height = glfw.get_framebuffer_size(window) GL.glViewport(0, 0, width, height) GL.glClear(GL.GL_COLOR_BUFFER_BIT) m = rotate_z(glfw.get_time()) # modelview matrix, m ratio = width / float(height) # projection matrix, p p = ortho(-ratio, ratio, -1.0, 1.0, 1.0, -1.0) mvp = m * p GL.glBindVertexArray(vao) GL.glUseProgram(program) GL.glUniformMatrix4fv(mvp_location, 1, False, pack(mvp)) GL.glDrawArrays(GL.GL_TRIANGLES, 0, 3) glfw.swap_buffers(window) glfw.poll_events() # Clean up and exit glfw.make_context_current(window) glfw.destroy_window(window) glfw.terminate() sys.exit(0) def error_callback(description): raise RuntimeError(description) if __name__ == "__main__": main()
33.426087
73
0.616285
[ "MIT" ]
cmbruns/vr_samples
src/python/vrprim/mesh/glfw_triangle.py
3,844
Python
"""general utility functions for HTML Map templates""" def safe_quotes(text, escape_single_quotes=False): """htmlify string""" if isinstance(text, str): safe_text = text.replace('"', "&quot;") if escape_single_quotes: safe_text = safe_text.replace("'", "&#92;'") return safe_text.replace('True', 'true') return text def quote_filter(value): return safe_quotes(value.unescape()) def iframe_size_filter(value): if isinstance(value, str): return value return '%spx;' % value def clear_none_filter(value): return dict(filter(lambda item: item[1] is not None, value.items()))
24.148148
72
0.653374
[ "BSD-3-Clause" ]
CartoDB/cartoframes
cartoframes/viz/html/utils.py
652
Python
import unittest from quarkchain.cluster.tests.test_utils import ( create_transfer_transaction, ClusterContext, ) from quarkchain.core import ( Address, Branch, Identity, TokenBalanceMap, XshardTxCursorInfo, ) from quarkchain.evm import opcodes from quarkchain.utils import call_async, assert_true_with_timeout from quarkchain.cluster.p2p_commands import ( CommandOp, GetRootBlockHeaderListWithSkipRequest, GetMinorBlockHeaderListWithSkipRequest, Direction, ) def _tip_gen(shard_state): coinbase_amount = ( shard_state.env.quark_chain_config.shards[ shard_state.full_shard_id ].COINBASE_AMOUNT // 2 ) b = shard_state.get_tip().create_block_to_append() evm_state = shard_state.run_block(b) coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens) coinbase_amount_map.add( {shard_state.env.quark_chain_config.genesis_token: coinbase_amount} ) b.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map) return b class TestCluster(unittest.TestCase): def test_single_cluster(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(1, acc1) as clusters: self.assertEqual(len(clusters), 1) def test_three_clusters(self): with ClusterContext(3) as clusters: self.assertEqual(len(clusters), 3) def test_create_shard_at_different_height(self): acc1 = Address.create_random_account(0) id1 = 0 << 16 | 1 | 0 id2 = 1 << 16 | 1 | 0 genesis_root_heights = {id1: 1, id2: 2} with ClusterContext( 1, acc1, chain_size=2, shard_size=1, genesis_root_heights=genesis_root_heights, ) as clusters: master = clusters[0].master self.assertIsNone(clusters[0].get_shard(id1)) self.assertIsNone(clusters[0].get_shard(id2)) # Add root block with height 1, which will automatically create genesis block for shard 0 root0 = call_async(master.get_next_block_to_mine(acc1, branch_value=None)) self.assertEqual(root0.header.height, 1) self.assertEqual(len(root0.minor_block_header_list), 0) self.assertEqual( root0.header.coinbase_amount_map.balance_map[ master.env.quark_chain_config.genesis_token ], master.env.quark_chain_config.ROOT.COINBASE_AMOUNT, ) call_async(master.add_root_block(root0)) # shard 0 created at root height 1 self.assertIsNotNone(clusters[0].get_shard(id1)) self.assertIsNone(clusters[0].get_shard(id2)) # shard 0 block should have correct root block and cursor info shard_state = clusters[0].get_shard(id1).state self.assertEqual( shard_state.header_tip.hash_prev_root_block, root0.header.get_hash() ) self.assertEqual( shard_state.get_tip().meta.xshard_tx_cursor_info, XshardTxCursorInfo(1, 0, 0), ) self.assertEqual( shard_state.get_token_balance( acc1.recipient, shard_state.env.quark_chain_config.genesis_token ), 1000000, # from create_test_clusters in genesis alloc ) # Add root block with height 2, which will automatically create genesis block for shard 1 root1 = call_async(master.get_next_block_to_mine(acc1, branch_value=None)) self.assertEqual(len(root1.minor_block_header_list), 1) self.assertEqual( root1.header.coinbase_amount_map.balance_map[ master.env.quark_chain_config.genesis_token ], master.env.quark_chain_config.ROOT.COINBASE_AMOUNT + root1.minor_block_header_list[0].coinbase_amount_map.balance_map[ master.env.quark_chain_config.genesis_token ], ) self.assertEqual(root1.minor_block_header_list[0], shard_state.header_tip) call_async(master.add_root_block(root1)) self.assertIsNotNone(clusters[0].get_shard(id1)) # shard 1 created at root height 2 self.assertIsNotNone(clusters[0].get_shard(id2)) # X-shard from root should be deposited to the shard mblock = shard_state.create_block_to_mine() self.assertEqual( mblock.meta.xshard_tx_cursor_info, XshardTxCursorInfo(root1.header.height + 1, 0, 0), ) call_async(clusters[0].get_shard(id1).add_block(mblock)) self.assertEqual( shard_state.get_token_balance( acc1.recipient, shard_state.env.quark_chain_config.genesis_token ), root1.header.coinbase_amount_map.balance_map[ shard_state.env.quark_chain_config.genesis_token ] + root0.header.coinbase_amount_map.balance_map[ shard_state.env.quark_chain_config.genesis_token ] + 1000000, # from create_test_clusters in genesis alloc ) self.assertEqual( mblock.header.coinbase_amount_map.balance_map[ shard_state.env.quark_chain_config.genesis_token ], shard_state.shard_config.COINBASE_AMOUNT // 2, ) # Add root block with height 3, which will include # - the genesis block for shard 1; and # - the added block for shard 0. root2 = call_async(master.get_next_block_to_mine(acc1, branch_value=None)) self.assertEqual(len(root2.minor_block_header_list), 2) def test_get_primary_account_data(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) acc2 = Address.create_random_account(full_shard_key=1) with ClusterContext(1, acc1) as clusters: master = clusters[0].master slaves = clusters[0].slave_list self.assertEqual( call_async(master.get_primary_account_data(acc1)).transaction_count, 0 ) tx = create_transfer_transaction( shard_state=clusters[0].get_shard_state(0b10), key=id1.get_key(), from_address=acc1, to_address=acc1, value=12345, ) self.assertTrue(slaves[0].add_tx(tx)) root = call_async( master.get_next_block_to_mine(address=acc1, branch_value=None) ) call_async(master.add_root_block(root)) block1 = call_async( master.get_next_block_to_mine(address=acc1, branch_value=0b10) ) self.assertTrue( call_async( master.add_raw_minor_block(block1.header.branch, block1.serialize()) ) ) self.assertEqual( call_async(master.get_primary_account_data(acc1)).transaction_count, 1 ) self.assertEqual( call_async(master.get_primary_account_data(acc2)).transaction_count, 0 ) def test_add_transaction(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) acc2 = Address.create_from_identity(id1, full_shard_key=1) with ClusterContext(2, acc1) as clusters: master = clusters[0].master root = call_async(master.get_next_block_to_mine(acc1, branch_value=None)) call_async(master.add_root_block(root)) tx1 = create_transfer_transaction( shard_state=clusters[0].get_shard_state(0b10), key=id1.get_key(), from_address=acc1, to_address=acc1, value=12345, ) self.assertTrue(call_async(master.add_transaction(tx1))) self.assertEqual(len(clusters[0].get_shard_state(0b10).tx_queue), 1) tx2 = create_transfer_transaction( shard_state=clusters[0].get_shard_state(0b11), key=id1.get_key(), from_address=acc2, to_address=acc1, value=12345, gas=30000, ) self.assertTrue(call_async(master.add_transaction(tx2))) self.assertEqual(len(clusters[0].get_shard_state(0b11).tx_queue), 1) # check the tx is received by the other cluster tx_queue = clusters[1].get_shard_state(0b10).tx_queue assert_true_with_timeout(lambda: len(tx_queue) == 1) self.assertEqual(tx_queue.pop_transaction(), tx1.tx.to_evm_tx()) tx_queue = clusters[1].get_shard_state(0b11).tx_queue assert_true_with_timeout(lambda: len(tx_queue) == 1) self.assertEqual(tx_queue.pop_transaction(), tx2.tx.to_evm_tx()) def test_add_minor_block_request_list(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: shard_state = clusters[0].get_shard_state(0b10) b1 = _tip_gen(shard_state) add_result = call_async( clusters[0].master.add_raw_minor_block(b1.header.branch, b1.serialize()) ) self.assertTrue(add_result) # Make sure the xshard list is not broadcasted to the other shard self.assertFalse( clusters[0] .get_shard_state(0b11) .contain_remote_minor_block_hash(b1.header.get_hash()) ) self.assertTrue( clusters[0].master.root_state.db.contain_minor_block_by_hash( b1.header.get_hash() ) ) # Make sure another cluster received the new block assert_true_with_timeout( lambda: clusters[0] .get_shard_state(0b10) .contain_block_by_hash(b1.header.get_hash()) ) assert_true_with_timeout( lambda: clusters[1].master.root_state.db.contain_minor_block_by_hash( b1.header.get_hash() ) ) def test_add_root_block_request_list(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: # shutdown cluster connection clusters[1].peer.close() # add blocks in cluster 0 block_header_list = [clusters[0].get_shard_state(2 | 0).header_tip] shard_state0 = clusters[0].get_shard_state(0b10) for i in range(7): b1 = _tip_gen(shard_state0) add_result = call_async( clusters[0].master.add_raw_minor_block( b1.header.branch, b1.serialize() ) ) self.assertTrue(add_result) block_header_list.append(b1.header) block_header_list.append(clusters[0].get_shard_state(2 | 1).header_tip) shard_state0 = clusters[0].get_shard_state(0b11) b2 = _tip_gen(shard_state0) add_result = call_async( clusters[0].master.add_raw_minor_block(b2.header.branch, b2.serialize()) ) self.assertTrue(add_result) block_header_list.append(b2.header) # add 1 block in cluster 1 shard_state1 = clusters[1].get_shard_state(0b11) b3 = _tip_gen(shard_state1) add_result = call_async( clusters[1].master.add_raw_minor_block(b3.header.branch, b3.serialize()) ) self.assertTrue(add_result) self.assertEqual(clusters[1].get_shard_state(0b11).header_tip, b3.header) # reestablish cluster connection call_async( clusters[1].network.connect( "127.0.0.1", clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT, ) ) root_block1 = clusters[0].master.root_state.create_block_to_mine( block_header_list, acc1 ) call_async(clusters[0].master.add_root_block(root_block1)) # Make sure the root block tip of local cluster is changed self.assertEqual(clusters[0].master.root_state.tip, root_block1.header) # Make sure the root block tip of cluster 1 is changed assert_true_with_timeout( lambda: clusters[1].master.root_state.tip == root_block1.header, 2 ) # Minor block is downloaded self.assertEqual(b1.header.height, 7) assert_true_with_timeout( lambda: clusters[1].get_shard_state(0b10).header_tip == b1.header ) # The tip is overwritten due to root chain first consensus assert_true_with_timeout( lambda: clusters[1].get_shard_state(0b11).header_tip == b2.header ) def test_shard_synchronizer_with_fork(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: # shutdown cluster connection clusters[1].peer.close() block_list = [] # cluster 0 has 13 blocks added shard_state0 = clusters[0].get_shard_state(0b10) for i in range(13): block = _tip_gen(shard_state0) add_result = call_async( clusters[0].master.add_raw_minor_block( block.header.branch, block.serialize() ) ) self.assertTrue(add_result) block_list.append(block) self.assertEqual(clusters[0].get_shard_state(0b10).header_tip.height, 13) # cluster 1 has 12 blocks added shard_state0 = clusters[1].get_shard_state(0b10) for i in range(12): block = _tip_gen(shard_state0) add_result = call_async( clusters[1].master.add_raw_minor_block( block.header.branch, block.serialize() ) ) self.assertTrue(add_result) self.assertEqual(clusters[1].get_shard_state(0b10).header_tip.height, 12) # reestablish cluster connection call_async( clusters[1].network.connect( "127.0.0.1", clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT, ) ) # a new block from cluster 0 will trigger sync in cluster 1 shard_state0 = clusters[0].get_shard_state(0b10) block = _tip_gen(shard_state0) add_result = call_async( clusters[0].master.add_raw_minor_block( block.header.branch, block.serialize() ) ) self.assertTrue(add_result) block_list.append(block) # expect cluster 1 has all the blocks from cluter 0 and # has the same tip as cluster 0 for block in block_list: assert_true_with_timeout( lambda: clusters[1] .slave_list[0] .shards[Branch(0b10)] .state.contain_block_by_hash(block.header.get_hash()) ) assert_true_with_timeout( lambda: clusters[ 1 ].master.root_state.db.contain_minor_block_by_hash( block.header.get_hash() ) ) self.assertEqual( clusters[1].get_shard_state(0b10).header_tip, clusters[0].get_shard_state(0b10).header_tip, ) def test_shard_genesis_fork_fork(self): """ Test shard forks at genesis blocks due to root chain fork at GENESIS.ROOT_HEIGHT""" acc1 = Address.create_random_account(0) acc2 = Address.create_random_account(1) genesis_root_heights = {2: 0, 3: 1} with ClusterContext( 2, acc1, chain_size=1, shard_size=2, genesis_root_heights=genesis_root_heights, ) as clusters: # shutdown cluster connection clusters[1].peer.close() master0 = clusters[0].master root0 = call_async(master0.get_next_block_to_mine(acc1, branch_value=None)) call_async(master0.add_root_block(root0)) genesis0 = ( clusters[0].get_shard_state(2 | 1).db.get_minor_block_by_height(0) ) self.assertEqual( genesis0.header.hash_prev_root_block, root0.header.get_hash() ) master1 = clusters[1].master root1 = call_async(master1.get_next_block_to_mine(acc2, branch_value=None)) self.assertNotEqual(root0.header.get_hash(), root1.header.get_hash()) call_async(master1.add_root_block(root1)) genesis1 = ( clusters[1].get_shard_state(2 | 1).db.get_minor_block_by_height(0) ) self.assertEqual( genesis1.header.hash_prev_root_block, root1.header.get_hash() ) self.assertNotEqual(genesis0.header.get_hash(), genesis1.header.get_hash()) # let's make cluster1's root chain longer than cluster0's root2 = call_async(master1.get_next_block_to_mine(acc2, branch_value=None)) call_async(master1.add_root_block(root2)) self.assertEqual(master1.root_state.tip.height, 2) # reestablish cluster connection call_async( clusters[1].network.connect( "127.0.0.1", clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT, ) ) # Expect cluster0's genesis change to genesis1 assert_true_with_timeout( lambda: clusters[0] .get_shard_state(2 | 1) .db.get_minor_block_by_height(0) .header.get_hash() == genesis1.header.get_hash() ) self.assertTrue(clusters[0].get_shard_state(2 | 1).root_tip == root2.header) def test_broadcast_cross_shard_transactions(self): """ Test the cross shard transactions are broadcasted to the destination shards """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) acc3 = Address.create_random_account(full_shard_key=1) with ClusterContext(1, acc1) as clusters: master = clusters[0].master slaves = clusters[0].slave_list genesis_token = ( clusters[0].get_shard_state(2 | 0).env.quark_chain_config.genesis_token ) # Add a root block first so that later minor blocks referring to this root # can be broadcasted to other shards root_block = call_async( master.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master.add_root_block(root_block)) tx1 = create_transfer_transaction( shard_state=clusters[0].get_shard_state(2 | 0), key=id1.get_key(), from_address=acc1, to_address=acc3, value=54321, gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST, ) self.assertTrue(slaves[0].add_tx(tx1)) b1 = clusters[0].get_shard_state(2 | 0).create_block_to_mine(address=acc1) b2 = clusters[0].get_shard_state(2 | 0).create_block_to_mine(address=acc1) b2.header.create_time += 1 self.assertNotEqual(b1.header.get_hash(), b2.header.get_hash()) call_async(clusters[0].get_shard(2 | 0).add_block(b1)) # expect shard 1 got the CrossShardTransactionList of b1 xshard_tx_list = ( clusters[0] .get_shard_state(2 | 1) .db.get_minor_block_xshard_tx_list(b1.header.get_hash()) ) self.assertEqual(len(xshard_tx_list.tx_list), 1) self.assertEqual(xshard_tx_list.tx_list[0].tx_hash, tx1.get_hash()) self.assertEqual(xshard_tx_list.tx_list[0].from_address, acc1) self.assertEqual(xshard_tx_list.tx_list[0].to_address, acc3) self.assertEqual(xshard_tx_list.tx_list[0].value, 54321) call_async(clusters[0].get_shard(2 | 0).add_block(b2)) # b2 doesn't update tip self.assertEqual(clusters[0].get_shard_state(2 | 0).header_tip, b1.header) # expect shard 1 got the CrossShardTransactionList of b2 xshard_tx_list = ( clusters[0] .get_shard_state(2 | 1) .db.get_minor_block_xshard_tx_list(b2.header.get_hash()) ) self.assertEqual(len(xshard_tx_list.tx_list), 1) self.assertEqual(xshard_tx_list.tx_list[0].tx_hash, tx1.get_hash()) self.assertEqual(xshard_tx_list.tx_list[0].from_address, acc1) self.assertEqual(xshard_tx_list.tx_list[0].to_address, acc3) self.assertEqual(xshard_tx_list.tx_list[0].value, 54321) b3 = ( clusters[0] .get_shard_state(2 | 1) .create_block_to_mine(address=acc1.address_in_shard(1)) ) call_async(master.add_raw_minor_block(b3.header.branch, b3.serialize())) root_block = call_async( master.get_next_block_to_mine(address=acc1, branch_value=None) ) call_async(master.add_root_block(root_block)) # b4 should include the withdraw of tx1 b4 = ( clusters[0] .get_shard_state(2 | 1) .create_block_to_mine(address=acc1.address_in_shard(1)) ) # adding b1, b2, b3 again shouldn't affect b4 to be added later self.assertTrue( call_async(master.add_raw_minor_block(b1.header.branch, b1.serialize())) ) self.assertTrue( call_async(master.add_raw_minor_block(b2.header.branch, b2.serialize())) ) self.assertTrue( call_async(master.add_raw_minor_block(b3.header.branch, b3.serialize())) ) self.assertTrue( call_async(master.add_raw_minor_block(b4.header.branch, b4.serialize())) ) self.assertEqual( call_async( master.get_primary_account_data(acc3) ).token_balances.balance_map, {genesis_token: 54321}, ) def test_broadcast_cross_shard_transactions_to_neighbor_only(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) # create 64 shards so that the neighbor rule can kick in # explicitly set num_slaves to 4 so that it does not spin up 64 slaves with ClusterContext(1, acc1, shard_size=64, num_slaves=4) as clusters: master = clusters[0].master # Add a root block first so that later minor blocks referring to this root # can be broadcasted to other shards root_block = call_async( master.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master.add_root_block(root_block)) b1 = clusters[0].get_shard_state(64).create_block_to_mine(address=acc1) self.assertTrue( call_async(master.add_raw_minor_block(b1.header.branch, b1.serialize())) ) neighbor_shards = [2 ** i for i in range(6)] for shard_id in range(64): xshard_tx_list = ( clusters[0] .get_shard_state(64 | shard_id) .db.get_minor_block_xshard_tx_list(b1.header.get_hash()) ) # Only neighbor should have it if shard_id in neighbor_shards: self.assertIsNotNone(xshard_tx_list) else: self.assertIsNone(xshard_tx_list) def test_get_work_from_slave(self): genesis = Address.create_empty_account(full_shard_key=0) with ClusterContext(1, genesis, remote_mining=True) as clusters: slaves = clusters[0].slave_list # no posw state = clusters[0].get_shard_state(2 | 0) branch = state.create_block_to_mine().header.branch work = call_async(slaves[0].get_work(branch)) self.assertEqual(work.difficulty, 10) # enable posw, with total stakes cover all the window state.shard_config.POSW_CONFIG.ENABLED = True state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 500000 work = call_async(slaves[0].get_work(branch)) self.assertEqual(work.difficulty, 0) def test_handle_get_minor_block_list_request_with_total_diff(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: cluster0_root_state = clusters[0].master.root_state cluster1_root_state = clusters[1].master.root_state coinbase = cluster1_root_state._calculate_root_block_coinbase([], 0) # Cluster 0 generates a root block of height 1 with 1e6 difficulty rb0 = cluster0_root_state.get_tip_block() rb1 = rb0.create_block_to_append(difficulty=int(1e6)).finalize(coinbase) # Establish cluster connection call_async( clusters[1].network.connect( "127.0.0.1", clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT, ) ) # Cluster 0 broadcasts the root block to cluster 1 call_async(clusters[0].master.add_root_block(rb1)) self.assertEqual(cluster0_root_state.tip.get_hash(), rb1.header.get_hash()) # Make sure the root block tip of cluster 1 is changed assert_true_with_timeout(lambda: cluster1_root_state.tip == rb1.header, 2) # Cluster 1 generates a minor block and broadcasts to cluster 0 shard_state = clusters[1].get_shard_state(0b10) b1 = _tip_gen(shard_state) add_result = call_async( clusters[1].master.add_raw_minor_block(b1.header.branch, b1.serialize()) ) self.assertTrue(add_result) # Make sure another cluster received the new minor block assert_true_with_timeout( lambda: clusters[1] .get_shard_state(0b10) .contain_block_by_hash(b1.header.get_hash()) ) assert_true_with_timeout( lambda: clusters[0].master.root_state.db.contain_minor_block_by_hash( b1.header.get_hash() ) ) # Cluster 1 generates a new root block with higher total difficulty rb2 = rb0.create_block_to_append(difficulty=int(3e6)).finalize(coinbase) call_async(clusters[1].master.add_root_block(rb2)) self.assertEqual(cluster1_root_state.tip.get_hash(), rb2.header.get_hash()) # Generate a minor block b2 b2 = _tip_gen(shard_state) add_result = call_async( clusters[1].master.add_raw_minor_block(b2.header.branch, b2.serialize()) ) self.assertTrue(add_result) # Make sure another cluster received the new minor block assert_true_with_timeout( lambda: clusters[1] .get_shard_state(0b10) .contain_block_by_hash(b2.header.get_hash()) ) assert_true_with_timeout( lambda: clusters[0].master.root_state.db.contain_minor_block_by_hash( b2.header.get_hash() ) ) def test_new_block_header_pool(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(1, acc1) as clusters: shard_state = clusters[0].get_shard_state(0b10) b1 = _tip_gen(shard_state) add_result = call_async( clusters[0].master.add_raw_minor_block(b1.header.branch, b1.serialize()) ) self.assertTrue(add_result) # Update config to force checking diff clusters[ 0 ].master.env.quark_chain_config.SKIP_MINOR_DIFFICULTY_CHECK = False b2 = b1.create_block_to_append(difficulty=12345) shard = clusters[0].slave_list[0].shards[b2.header.branch] with self.assertRaises(ValueError): call_async(shard.handle_new_block(b2)) # Also the block should not exist in new block pool self.assertTrue( b2.header.get_hash() not in shard.state.new_block_header_pool ) def test_get_root_block_headers_with_skip(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: master = clusters[0].master # Add a root block first so that later minor blocks referring to this root # can be broadcasted to other shards root_block_header_list = [master.root_state.tip] for i in range(10): root_block = call_async( master.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master.add_root_block(root_block)) root_block_header_list.append(root_block.header) self.assertEqual(root_block_header_list[-1].height, 10) assert_true_with_timeout( lambda: clusters[1].master.root_state.tip.height == 10 ) peer = clusters[1].peer # Test Case 1 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=1, skip=1, limit=3, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], root_block_header_list[1]) self.assertEqual(resp.block_header_list[1], root_block_header_list[3]) self.assertEqual(resp.block_header_list[2], root_block_header_list[5]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=root_block_header_list[1].get_hash(), skip=1, limit=3, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], root_block_header_list[1]) self.assertEqual(resp.block_header_list[1], root_block_header_list[3]) self.assertEqual(resp.block_header_list[2], root_block_header_list[5]) # Test Case 2 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=2, skip=2, limit=4, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], root_block_header_list[2]) self.assertEqual(resp.block_header_list[1], root_block_header_list[5]) self.assertEqual(resp.block_header_list[2], root_block_header_list[8]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=root_block_header_list[2].get_hash(), skip=2, limit=4, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], root_block_header_list[2]) self.assertEqual(resp.block_header_list[1], root_block_header_list[5]) self.assertEqual(resp.block_header_list[2], root_block_header_list[8]) # Test Case 3 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=6, skip=0, limit=100, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], root_block_header_list[6]) self.assertEqual(resp.block_header_list[1], root_block_header_list[7]) self.assertEqual(resp.block_header_list[2], root_block_header_list[8]) self.assertEqual(resp.block_header_list[3], root_block_header_list[9]) self.assertEqual(resp.block_header_list[4], root_block_header_list[10]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=root_block_header_list[6].get_hash(), skip=0, limit=100, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], root_block_header_list[6]) self.assertEqual(resp.block_header_list[1], root_block_header_list[7]) self.assertEqual(resp.block_header_list[2], root_block_header_list[8]) self.assertEqual(resp.block_header_list[3], root_block_header_list[9]) self.assertEqual(resp.block_header_list[4], root_block_header_list[10]) # Test Case 4 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=2, skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 1) self.assertEqual(resp.block_header_list[0], root_block_header_list[2]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=root_block_header_list[2].get_hash(), skip=2, limit=4, direction=Direction.GENESIS, ), ) ) self.assertEqual(len(resp.block_header_list), 1) self.assertEqual(resp.block_header_list[0], root_block_header_list[2]) # Test Case 5 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=11, skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 0) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=bytes(32), skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 0) # Test Case 6 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_height( height=8, skip=1, limit=5, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], root_block_header_list[8]) self.assertEqual(resp.block_header_list[1], root_block_header_list[6]) self.assertEqual(resp.block_header_list[2], root_block_header_list[4]) self.assertEqual(resp.block_header_list[3], root_block_header_list[2]) self.assertEqual(resp.block_header_list[4], root_block_header_list[0]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_ROOT_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetRootBlockHeaderListWithSkipRequest.create_for_hash( hash=root_block_header_list[8].get_hash(), skip=1, limit=5, direction=Direction.GENESIS, ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], root_block_header_list[8]) self.assertEqual(resp.block_header_list[1], root_block_header_list[6]) self.assertEqual(resp.block_header_list[2], root_block_header_list[4]) self.assertEqual(resp.block_header_list[3], root_block_header_list[2]) self.assertEqual(resp.block_header_list[4], root_block_header_list[0]) def test_get_root_block_header_sync_from_genesis(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master = clusters[0].master root_block_header_list = [master.root_state.tip] for i in range(10): root_block = call_async( master.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master.add_root_block(root_block)) root_block_header_list.append(root_block.header) # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: clusters[1].master.root_state.tip == root_block_header_list[-1]) self.assertEqual(clusters[1].master.synchronizer.stats.blocks_downloaded, len(root_block_header_list) - 1) def test_get_root_block_header_sync_from_height_3(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(10): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) # Add 3 blocks to another cluster master1 = clusters[1].master for i in range(3): call_async(master1.add_root_block(root_block_list[i])) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[2].header) # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[-1].header) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, len(root_block_list) - 3) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 1) def test_get_root_block_header_sync_with_fork(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(10): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) # Add 2+3 blocks to another cluster: 2 are the same as cluster 0, and 3 are the fork master1 = clusters[1].master for i in range(2): call_async(master1.add_root_block(root_block_list[i])) for i in range(3): root_block = call_async( master1.get_next_block_to_mine( acc1, branch_value=None ) ) call_async(master1.add_root_block(root_block)) # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[-1].header) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, len(root_block_list) - 2) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 1) def test_get_root_block_header_sync_with_staleness(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(10): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) assert_true_with_timeout(lambda: master0.root_state.tip == root_block_list[-1].header) # Add 3 blocks to another cluster master1 = clusters[1].master for i in range(8): root_block = call_async( master1.get_next_block_to_mine( acc1, branch_value=None ) ) call_async(master1.add_root_block(root_block)) master1.env.quark_chain_config.ROOT.MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF = 5 assert_true_with_timeout(lambda: master1.root_state.tip == root_block.header) # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.synchronizer.stats.ancestor_not_found_count == 1) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, 0) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 1) def test_get_root_block_header_sync_with_multiple_lookup(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(12): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) assert_true_with_timeout(lambda: master0.root_state.tip == root_block_list[-1].header) # Add 4+4 blocks to another cluster master1 = clusters[1].master for i in range(4): call_async(master1.add_root_block(root_block_list[i])) for i in range(4): root_block = call_async( master1.get_next_block_to_mine( acc1, branch_value=None ) ) call_async(master1.add_root_block(root_block)) master1.synchronizer.root_block_header_list_limit = 4 # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[-1].header) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, 8) self.assertEqual(master1.synchronizer.stats.headers_downloaded, 5 + 8) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 2) def test_get_root_block_header_sync_with_start_equal_end(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(5): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) assert_true_with_timeout(lambda: master0.root_state.tip == root_block_list[-1].header) # Add 3+1 blocks to another cluster master1 = clusters[1].master for i in range(3): call_async(master1.add_root_block(root_block_list[i])) for i in range(1): root_block = call_async( master1.get_next_block_to_mine( acc1, branch_value=None ) ) call_async(master1.add_root_block(root_block)) master1.synchronizer.root_block_header_list_limit = 3 # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[-1].header) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, 2) self.assertEqual(master1.synchronizer.stats.headers_downloaded, 6) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 2) def test_get_root_block_header_sync_with_best_ancestor(self): id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1, connect=False) as clusters: master0 = clusters[0].master root_block_list = [] for i in range(5): root_block = call_async( master0.get_next_block_to_mine( Address.create_empty_account(), branch_value=None ) ) call_async(master0.add_root_block(root_block)) root_block_list.append(root_block) assert_true_with_timeout(lambda: master0.root_state.tip == root_block_list[-1].header) # Add 2+2 blocks to another cluster master1 = clusters[1].master for i in range(2): call_async(master1.add_root_block(root_block_list[i])) for i in range(2): root_block = call_async( master1.get_next_block_to_mine( acc1, branch_value=None ) ) call_async(master1.add_root_block(root_block)) master1.synchronizer.root_block_header_list_limit = 3 # Lookup will be [0, 2, 4], and then [3], where 3 cannot be found and thus 2 is the best. # Connect and the synchronizer should automically download call_async(clusters[1].network.connect( "127.0.0.1", clusters[0].network.env.cluster_config.P2P_PORT) ) assert_true_with_timeout(lambda: master1.root_state.tip == root_block_list[-1].header) self.assertEqual(master1.synchronizer.stats.blocks_downloaded, 3) self.assertEqual(master1.synchronizer.stats.headers_downloaded, 4 + 3) self.assertEqual(master1.synchronizer.stats.ancestor_lookup_requests, 2) def test_get_minor_block_headers_with_skip(self): """ Test the broadcast is only done to the neighbors """ id1 = Identity.create_random_identity() acc1 = Address.create_from_identity(id1, full_shard_key=0) with ClusterContext(2, acc1) as clusters: master = clusters[0].master shard = next(iter(clusters[0].slave_list[0].shards.values())) # Add a root block first so that later minor blocks referring to this root # can be broadcasted to other shards minor_block_header_list = [shard.state.header_tip] branch = shard.state.header_tip.branch for i in range(10): b = shard.state.create_block_to_mine() call_async(master.add_raw_minor_block(b.header.branch, b.serialize())) minor_block_header_list.append(b.header) self.assertEqual(minor_block_header_list[-1].height, 10) peer = next(iter(clusters[1].slave_list[0].shards[branch].peers.values())) # Test Case 1 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=1, branch=branch, skip=1, limit=3, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], minor_block_header_list[1]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[3]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[5]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=minor_block_header_list[1].get_hash(), branch=branch, skip=1, limit=3, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], minor_block_header_list[1]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[3]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[5]) # Test Case 2 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=2, branch=branch, skip=2, limit=4, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], minor_block_header_list[2]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[5]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[8]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=minor_block_header_list[2].get_hash(), branch=branch, skip=2, limit=4, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 3) self.assertEqual(resp.block_header_list[0], minor_block_header_list[2]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[5]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[8]) # Test Case 3 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=6, branch=branch, skip=0, limit=100, direction=Direction.TIP ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], minor_block_header_list[6]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[7]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[8]) self.assertEqual(resp.block_header_list[3], minor_block_header_list[9]) self.assertEqual(resp.block_header_list[4], minor_block_header_list[10]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=minor_block_header_list[6].get_hash(), branch=branch, skip=0, limit=100, direction=Direction.TIP, ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], minor_block_header_list[6]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[7]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[8]) self.assertEqual(resp.block_header_list[3], minor_block_header_list[9]) self.assertEqual(resp.block_header_list[4], minor_block_header_list[10]) # Test Case 4 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=2, branch=branch, skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 1) self.assertEqual(resp.block_header_list[0], minor_block_header_list[2]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=minor_block_header_list[2].get_hash(), branch=branch, skip=2, limit=4, direction=Direction.GENESIS, ), ) ) self.assertEqual(len(resp.block_header_list), 1) self.assertEqual(resp.block_header_list[0], minor_block_header_list[2]) # Test Case 5 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=11, branch=branch, skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 0) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=bytes(32), branch=branch, skip=2, limit=4, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 0) # Test Case 6 ################################################### op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_height( height=8, branch=branch, skip=1, limit=5, direction=Direction.GENESIS ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], minor_block_header_list[8]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[6]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[4]) self.assertEqual(resp.block_header_list[3], minor_block_header_list[2]) self.assertEqual(resp.block_header_list[4], minor_block_header_list[0]) op, resp, rpc_id = call_async( peer.write_rpc_request( op=CommandOp.GET_MINOR_BLOCK_HEADER_LIST_WITH_SKIP_REQUEST, cmd=GetMinorBlockHeaderListWithSkipRequest.create_for_hash( hash=minor_block_header_list[8].get_hash(), branch=branch, skip=1, limit=5, direction=Direction.GENESIS, ), ) ) self.assertEqual(len(resp.block_header_list), 5) self.assertEqual(resp.block_header_list[0], minor_block_header_list[8]) self.assertEqual(resp.block_header_list[1], minor_block_header_list[6]) self.assertEqual(resp.block_header_list[2], minor_block_header_list[4]) self.assertEqual(resp.block_header_list[3], minor_block_header_list[2]) self.assertEqual(resp.block_header_list[4], minor_block_header_list[0])
45.332616
118
0.58573
[ "MIT" ]
Belgarion/pyquarkchain_cuda
quarkchain/cluster/tests/test_cluster.py
63,239
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np # type: ignore import onnx from ..base import Base from . import expect from ..utils import all_numeric_dtypes class Max(Base): @staticmethod def export(): # type: () -> None data_0 = np.array([3, 2, 1]).astype(np.float32) data_1 = np.array([1, 4, 4]).astype(np.float32) data_2 = np.array([2, 5, 3]).astype(np.float32) result = np.array([3, 5, 4]).astype(np.float32) node = onnx.helper.make_node( 'Max', inputs=['data_0', 'data_1', 'data_2'], outputs=['result'], ) expect(node, inputs=[data_0, data_1, data_2], outputs=[result], name='test_max_example') node = onnx.helper.make_node( 'Max', inputs=['data_0'], outputs=['result'], ) expect(node, inputs=[data_0], outputs=[data_0], name='test_max_one_input') result = np.maximum(data_0, data_1) node = onnx.helper.make_node( 'Max', inputs=['data_0', 'data_1'], outputs=['result'], ) expect(node, inputs=[data_0, data_1], outputs=[result], name='test_max_two_inputs') @staticmethod def export_max_all_numeric_types(): # type: () -> None for op_dtype in all_numeric_dtypes: data_0 = np.array([3, 2, 1]).astype(op_dtype) data_1 = np.array([1, 4, 4]).astype(op_dtype) result = np.array([3, 4, 4]).astype(op_dtype) node = onnx.helper.make_node( 'Max', inputs=['data_0', 'data_1'], outputs=['result'], ) expect(node, inputs=[data_0, data_1], outputs=[result], name='test_max_{0}'.format(np.dtype(op_dtype).name))
32.7
71
0.558104
[ "MIT" ]
15737939656/onnx
onnx/backend/test/case/node/max.py
1,962
Python
import theano.tensor as T import theano import numpy as np from scipy.spatial.distance import pdist, squareform, cdist import random import time ''' Sample code to reproduce our results for the Bayesian neural network example. Our settings are almost the same as Hernandez-Lobato and Adams (ICML15) https://jmhldotorg.files.wordpress.com/2015/05/pbp-icml2015.pdf Our implementation is also based on their Python code. p(y | W, X, \gamma) = \prod_i^N N(y_i | f(x_i; W), \gamma^{-1}) p(W | \lambda) = \prod_i N(w_i | 0, \lambda^{-1}) p(\gamma) = Gamma(\gamma | a0, b0) p(\lambda) = Gamma(\lambda | a0, b0) The posterior distribution is as follows: p(W, \gamma, \lambda) = p(y | W, X, \gamma) p(W | \lambda) p(\gamma) p(\lambda) To avoid negative values of \gamma and \lambda, we update loggamma and loglambda instead. Copyright (c) 2016, Qiang Liu & Dilin Wang All rights reserved. ''' class svgd_bayesnn: ''' We define a one-hidden-layer-neural-network specifically. We leave extension of deep neural network as our future work. Input -- X_train: training dataset, features -- y_train: training labels -- batch_size: sub-sampling batch size -- max_iter: maximum iterations for the training procedure -- M: number of particles are used to fit the posterior distribution -- n_hidden: number of hidden units -- a0, b0: hyper-parameters of Gamma distribution -- master_stepsize, auto_corr: parameters of adgrad ''' def __init__(self, X_train, y_train, X_test, y_text, batch_size = 100, max_iter = 1000, M = 20, n_hidden = 50, a0 = 1, b0 = 0.1, master_stepsize = 1e-3, auto_corr = 0.9, h=-1, alpha = 0.9, method = 'none',m=5, cf = False, uStat = True, regCoeff = 0.1, adver = False, adverMaxIter = 5, maxTime = 20, numTimeSteps = 20): self.n_hidden = n_hidden self.d = X_train.shape[1] # number of data, dimension self.M = M num_vars = self.d * n_hidden + n_hidden * 2 + 3 # w1: d*n_hidden; b1: n_hidden; w2 = n_hidden; b2 = 1; 2 variances self.theta = np.zeros([self.M, num_vars]) # particles, will be initialized later ''' We keep the last 10% (maximum 500) of training data points for model developing ''' size_dev = min(int(np.round(0.1 * X_train.shape[0])), 500) X_dev, y_dev = X_train[-size_dev:], y_train[-size_dev:] X_train, y_train = X_train[:-size_dev], y_train[:-size_dev] ''' The data sets are normalized so that the input features and the targets have zero mean and unit variance ''' self.std_X_train = np.std(X_train, 0) self.std_X_train[ self.std_X_train == 0 ] = 1 self.mean_X_train = np.mean(X_train, 0) self.mean_y_train = np.mean(y_train) self.std_y_train = np.std(y_train) ''' Theano symbolic variables Define the neural network here ''' X = T.matrix('X') # Feature matrix y = T.vector('y') # labels w_1 = T.matrix('w_1') # weights between input layer and hidden layer b_1 = T.vector('b_1') # bias vector of hidden layer w_2 = T.vector('w_2') # weights between hidden layer and output layer b_2 = T.scalar('b_2') # bias of output N = T.scalar('N') # number of observations log_gamma = T.scalar('log_gamma') # variances related parameters log_lambda = T.scalar('log_lambda') ### prediction = T.dot(T.nnet.relu(T.dot(X, w_1)+b_1), w_2) + b_2 ''' define the log posterior distribution ''' log_lik_data = -0.5 * X.shape[0] * (T.log(2*np.pi) - log_gamma) - (T.exp(log_gamma)/2) * T.sum(T.power(prediction - y, 2)) log_prior_data = (a0 - 1) * log_gamma - b0 * T.exp(log_gamma) + log_gamma log_prior_w = -0.5 * (num_vars-2) * (T.log(2*np.pi)-log_lambda) - (T.exp(log_lambda)/2)*((w_1**2).sum() + (w_2**2).sum() + (b_1**2).sum() + b_2**2) \ + (a0-1) * log_lambda - b0 * T.exp(log_lambda) + log_lambda # sub-sampling mini-batches of data, where (X, y) is the batch data, and N is the number of whole observations log_posterior = (log_lik_data * N / X.shape[0] + log_prior_data + log_prior_w) dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda = T.grad(log_posterior, [w_1, b_1, w_2, b_2, log_gamma, log_lambda]) # automatic gradient logp_gradient = theano.function( inputs = [X, y, w_1, b_1, w_2, b_2, log_gamma, log_lambda, N], outputs = [dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda] ) # prediction function self.nn_predict = theano.function(inputs = [X, w_1, b_1, w_2, b_2], outputs = prediction) ''' Training with SVGD ''' # normalization X_train, y_train = self.normalization(X_train, y_train) N0 = X_train.shape[0] # number of observations ''' initializing all particles ''' for i in range(self.M): w1, b1, w2, b2, loggamma, loglambda = self.init_weights(a0, b0) # use better initialization for gamma ridx = np.random.choice(range(X_train.shape[0]), \ np.min([X_train.shape[0], 1000]), replace = False) y_hat = self.nn_predict(X_train[ridx,:], w1, b1, w2, b2) loggamma = -np.log(np.mean(np.power(y_hat - y_train[ridx], 2))) self.theta[i,:] = self.pack_weights(w1, b1, w2, b2, loggamma, loglambda) grad_theta = np.zeros([self.M, num_vars]) # gradient # adagrad with momentum fudge_factor = 1e-6 historical_grad = 0 self.y_historical_grad = 0 self.h_historical_grad = 0 self.rmse_overTime = np.zeros(numTimeSteps) # RMSE self.llh_overTime = np.zeros(numTimeSteps) # LLH self.iter_overTime = np.zeros(numTimeSteps) # LLH timeStepUnit = maxTime / numTimeSteps # Time to check every iteration timeInd = 0; start_time = time.time() for iter in range(max_iter): if method == 'subparticles': self.Sqy = np.zeros([m, num_vars]) # Sqy elif method == 'inducedPoints' or method == 'none': self.Sqx = np.zeros([self.M, num_vars]) # Sqx h = -1; # sub-sampling batch = [ i % N0 for i in range(iter * batch_size, (iter + 1) * batch_size) ] if method == 'none' or method =='inducedPoints': for i in range(self.M): w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i,:]) dw1, db1, dw2, db2, dloggamma, dloglambda = logp_gradient(X_train[batch,:], y_train[batch], w1, b1, w2, b2, loggamma, loglambda, N0) self.Sqx[i,:] = self.pack_weights(dw1, db1, dw2, db2, dloggamma, dloglambda) if method == 'none': grad_theta = self.svgd_kernel(h=h) elif method == 'inducedPoints': self.yInd = np.random.choice(self.theta.shape[0], m, replace=False) self.y = self.theta[self.yInd] grad_theta = self.svgd_kernel_inducedPoints(h=h, uStat = uStat, regCoeff = regCoeff, adver=adver, adverMaxIter = adverMaxIter) elif method == 'subparticles': self.yInd = np.random.choice(self.theta.shape[0], m, replace=False) self.y = self.theta[self.yInd] for i in range(m): w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.y[i,:]) dw1, db1, dw2, db2, dloggamma, dloglambda = logp_gradient(X_train[batch,:], y_train[batch], w1, b1, w2, b2, loggamma, loglambda, N0) self.Sqy[i,:] = self.pack_weights(dw1, db1, dw2, db2, dloggamma, dloglambda) grad_theta = self.svgd_kernel_subset(h=-1, cf=cf) [adj_grad, historical_grad] = self.get_adamUpdate(iter, grad_theta, historical_grad,master_stepsize, alpha, fudge_factor) self.theta = self.theta + adj_grad; elapsed_time = time.time() - start_time if elapsed_time > timeStepUnit: self.thetaCopy = np.copy(self.theta) # Evaluate and save ''' Model selection by using a development set ''' X_dev = self.normalization(X_dev) for i in range(self.M): w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.thetaCopy[i, :]) pred_y_dev = self.nn_predict(X_dev, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train # likelihood def f_log_lik(loggamma): return np.sum( np.log(np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_dev - y_dev, 2) / 2) * np.exp(loggamma) )) ) # The higher probability is better lik1 = f_log_lik(loggamma) # one heuristic setting loggamma = -np.log(np.mean(np.power(pred_y_dev - y_dev, 2))) lik2 = f_log_lik(loggamma) if lik2 > lik1: self.thetaCopy[i,-2] = loggamma # update loggamma svgd_rmse, svgd_ll = self.evaluation(X_test, y_test) self.rmse_overTime[timeInd] = svgd_rmse self.llh_overTime[timeInd] = svgd_ll self.iter_overTime[timeInd] = iter start_time = time.time() timeInd = timeInd + 1 # Break after maxTime if timeInd >= numTimeSteps: print('Reached ', iter, 'iterations\n') break def normalization(self, X, y = None): X = (X - np.full(X.shape, self.mean_X_train)) / \ np.full(X.shape, self.std_X_train) if y is not None: y = (y - self.mean_y_train) / self.std_y_train return (X, y) else: return X ''' Initialize all particles ''' def init_weights(self, a0, b0): w1 = 1.0 / np.sqrt(self.d + 1) * np.random.randn(self.d, self.n_hidden) b1 = np.zeros((self.n_hidden,)) w2 = 1.0 / np.sqrt(self.n_hidden + 1) * np.random.randn(self.n_hidden) b2 = 0. loggamma = np.log(np.random.gamma(a0, b0)) loglambda = np.log(np.random.gamma(a0, b0)) return (w1, b1, w2, b2, loggamma, loglambda) ''' Returns control functional weights ''' def getWeights(self, KpMat): condNumber = self.getConditionNumber(KpMat) z = KpMat.shape[0] # Get weights KPrime = KpMat + condNumber * z * np.identity(z) num = np.matmul(np.ones(z),np.linalg.inv(KPrime)) denom = 1 + np.matmul(num,np.ones(z)) weights = num / denom weights = weights / sum(weights) return (weights) ''' Given a kernel matrix K, let lambda be smallest power of 10 such that kernel matrix K0 + lamba*I has condition number lower than 10^10 Note we use 2-norm for computing condition number ''' def getConditionNumber(self, K): condNumber = 10e-10 condA = 10e11 matSize = K.shape[0] while condA > 10e10: condNumber = condNumber * 10 A = K + condNumber * np.identity(matSize) condA = np.linalg.norm(A, ord=2) * np.linalg.norm(np.linalg.inv(A), ord=2) return (condNumber) ''' Calculate kernel matrix and its gradient: K, \nabla_x k ''' def svgd_kernel(self, h = -1): n,d = self.theta.shape sq_dist = pdist(self.theta) pairwise_dists = squareform(sq_dist)**2 if h < 0: # if h < 0, using median trick h = np.median(pairwise_dists) h = np.sqrt(0.5 * h / np.log(n+1)) # compute the rbf kernel Kxy = np.exp( -pairwise_dists / h**2 / 2) dxkxy = -np.matmul(Kxy, self.theta) sumkxy = np.sum(Kxy, axis=1) for i in range(d): dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy) dxkxy = dxkxy / (h**2) grad_theta = (np.matmul(Kxy, self.Sqx) + dxkxy) / n return grad_theta ''' Compute gradient update for theta using svgd random subset (with optional control functional) ''' def svgd_kernel_subset(self, h=-1, cf = False): n,d = self.theta.shape m = self.y.shape[0] pairwise_dists = cdist(self.theta, self.y)**2 if h < 0: # if h < 0, using median trick h = np.median(pairwise_dists) h = np.sqrt(0.5 * h / np.log(n+1)) # compute the rbf kernel Kxy = np.exp( -pairwise_dists / h**2 / 2) if cf == True : # Using control functional sqxdy_part = np.array([np.sum(np.multiply(self.Sqy,self.y),axis=1),]*m).T sqxdy = -(np.matmul(self.Sqy,self.y.T)- sqxdy_part)/ h**2 dxsqy = sqxdy.T dxdy = -pairwise_dists[self.yInd]/h**4 +d/h**2 KxySub = Kxy[self.yInd] KpMat = (np.matmul(self.Sqy, self.Sqy.T) + sqxdy + dxsqy + dxdy) KpMat = np.multiply(KpMat, KxySub) weights = self.getWeights(KpMat) Kxy = np.multiply(Kxy, np.matlib.repmat(weights, n, 1)) dxkxy = -np.matmul(Kxy, self.y) sumkxy = np.sum(Kxy, axis=1) for i in range(d): dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy) dxkxy = dxkxy / (h**2) grad_theta = (np.matmul(Kxy, self.Sqy) + dxkxy) if cf == False: grad_theta = grad_theta / m return grad_theta ''' Perform a step of adam update ''' def get_adamUpdate(self, iterInd, ori_grad, hist_grad, stepsize = 1e-3, alpha = 0.9, fudge_factor = 1e-6): if iterInd == 0: hist_grad = hist_grad + ori_grad ** 2 else: hist_grad = alpha * hist_grad + (1 - alpha) * (ori_grad ** 2) adj_grad = np.divide(ori_grad, fudge_factor+np.sqrt(hist_grad)) return (stepsize * adj_grad, hist_grad) ''' Compute gradient update for y ''' def svgd_kernel_grady(self, h=-1, uStat=True, regCoeff=0.1): m = self.y.shape[0] xAdverSubsetInd = np.random.choice(self.theta.shape[0], m, replace=False) self.thetaSubset = self.theta[xAdverSubsetInd,:] self.SqxSubset = self.Sqx[xAdverSubsetInd,:] #self.thetaSubset = np.copy(self.theta) #self.SqxSubset = np.copy(self.Sqx) n,d = self.thetaSubset.shape pairwise_dists = cdist(self.thetaSubset, self.y)**2 if h < 0: # if h < 0, using median trick h = np.median(pairwise_dists) h = np.sqrt(0.5 * h / np.log(n+1)) # compute the rbf kernel Kxy = np.exp( -pairwise_dists / h**2 / 2) yGrad = np.zeros((m,d)); # Compute gradient for yInd in range(m): Kxy_cur = Kxy[:,yInd]; xmy = (self.thetaSubset - np.tile(self.y[yInd,:],[n,1]))/h**2 Sqxxmy = self.SqxSubset - xmy; back = np.tile(np.array([Kxy_cur]).T,(1,d)) * Sqxxmy inner = np.tile(np.array([np.sum(np.matmul(back, back.T),axis=1)]).T,[1,d]) yGrad[yInd,:] = np.sum(xmy * inner,axis=0) + np.sum(back,axis=0) * np.sum(Kxy_cur)/h**2 # For U-statistic if uStat: front_u = np.tile(np.array([(Kxy_cur**2) * np.sum(Sqxxmy **2,axis=1)]).T,[1,d]) * xmy; back_u = np.tile(np.array([Kxy_cur**2 / h**2]).T,[1,d]) * Sqxxmy yGrad[yInd,:] = yGrad[yInd,:] - np.sum(front_u + back_u,axis=0) if uStat: yGrad = yGrad * 2 / (n*(n-1)*m); else: yGrad = yGrad * 2 / (n**2 * m); if regCoeff > 0 : H_y = cdist(self.y, self.y)**2 Kxy_y = np.exp( -H_y / h**2 / 2) sumKxy_y = np.sum(Kxy_y,axis=1) yReg = (self.y * np.tile(np.array([sumKxy_y]).T,[1,d]) - np.matmul(Kxy_y,self.y))/(h**2 * m) yGrad = yGrad + regCoeff * yReg return (yGrad) ''' Compute gradient update for h ''' def svgd_kernel_gradh(self, h=-1, uStat=True): n,d = self.thetaSubset.shape m = self.y.shape[0] H = cdist(self.thetaSubset, self.y)**2 if h < 0: # if h < 0, using median trick h = np.median(H) h = np.sqrt(0.5 * h / np.log(n+1)) # compute the rbf kernel Kxy = np.exp( -H / h**2 / 2) hGrad = 0; # For each induced point for yInd in range(m): Kxy_cur = Kxy[:,yInd] H_cur = H[:,yInd] xmy = (self.thetaSubset - np.tile(self.y[yInd,:],[n,1]))/h**2 Sqxxmy = self.SqxSubset - xmy part2 = np.tile(np.array([Kxy_cur]).T,[1,d]) * Sqxxmy part1_1 = np.tile(np.array([H_cur/h**3]).T,[1,d]) * part2 part1_2 = np.tile(np.array([Kxy_cur]).T,[1,d]) * (2*xmy / h**3) part = np.matmul(part1_1 + part1_2, part2.T) hGrad = hGrad + np.sum(np.sum(part,axis=1)) if uStat: front_u = (Kxy_cur**2) * (H_cur/h**3) * np.sum(Sqxxmy**2, axis=1) back_u = np.sum((2*xmy/h**3) * Sqxxmy,axis=1) hGrad = hGrad - np.sum(Kxy_cur**2 * (front_u + back_u),axis=0) if uStat: hGrad = hGrad * 2 / (n*(n-1)*m); else: hGrad = hGrad * 2 / (n**2 * m); return (hGrad) ''' Induced Points Method ''' def svgd_kernel_inducedPoints(self, h=-1, uStat=True, regCoeff=0.1, adver = False, adverMaxIter = 5, stepsize = 1e-3, alpha = 0.9): n,d = self.theta.shape m = self.y.shape[0] # If we want to perform EM if adver == True: # Perform update emMaxIter number of times fudge_factor = 1e-6 for adverIter in range(0,adverMaxIter): grad_y = self.svgd_kernel_grady(h=h,uStat=uStat, regCoeff=regCoeff) [update_y,hist_grad] = self.get_adamUpdate(adverIter, grad_y, self.y_historical_grad,stepsize = stepsize, alpha = alpha) self.y = self.y + update_y self.y_historical_grad = hist_grad grad_h = self.svgd_kernel_gradh(h=h,uStat=uStat) [update_h, hist_grad] = self.get_adamUpdate(adverIter, grad_h, self.h_historical_grad,stepsize = stepsize, alpha = alpha) h = h + update_h self.h_historical_grad = hist_grad pairwise_dists = cdist(self.theta, self.y)**2 # compute the rbf kernel Kxy = np.exp( -pairwise_dists / h**2 / 2) innerTerm_1 = np.matmul(Kxy.T, (self.Sqx - self.theta/ h**2)) sumkxy = np.sum(Kxy, axis=0) innerTerm_2 = np.multiply(np.tile(np.array([sumkxy]).T,(1,d)), self.y/h**2) innerTerm = (innerTerm_1 + innerTerm_2)/n gradTheta = np.matmul(Kxy, innerTerm)/m return (gradTheta) ''' Pack all parameters in our model ''' def pack_weights(self, w1, b1, w2, b2, loggamma, loglambda): params = np.concatenate([w1.flatten(), b1, w2, [b2], [loggamma],[loglambda]]) return params ''' Unpack all parameters in our model ''' def unpack_weights(self, z): w = z w1 = np.reshape(w[:self.d*self.n_hidden], [self.d, self.n_hidden]) b1 = w[self.d*self.n_hidden:(self.d+1)*self.n_hidden] w = w[(self.d+1)*self.n_hidden:] w2, b2 = w[:self.n_hidden], w[-3] # the last two parameters are log variance loggamma, loglambda= w[-2], w[-1] return (w1, b1, w2, b2, loggamma, loglambda) ''' Evaluating testing rmse and log-likelihood, which is the same as in PBP Input: -- X_test: unnormalized testing feature set -- y_test: unnormalized testing labels ''' def evaluation(self, X_test, y_test): # normalization X_test = self.normalization(X_test) # average over the output pred_y_test = np.zeros([self.M, len(y_test)]) prob = np.zeros([self.M, len(y_test)]) ''' Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood ''' for i in range(self.M): w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.thetaCopy[i, :]) pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) ) pred = np.mean(pred_y_test, axis=0) # evaluation svgd_rmse = np.sqrt(np.mean((pred - y_test)**2)) svgd_ll = np.mean(np.log(np.mean(prob, axis = 0))) return (svgd_rmse, svgd_ll) ''' Returns the result of the iterations ''' def getResults(self): return (self.rmse_overTime, self.llh_overTime, self.iter_overTime) if __name__ == '__main__': print ('Theano', theano.version.version) #our implementation is based on theano 0.8.2 np.random.seed(1) ''' load data file ''' for dataInd in range(0,1): if dataInd == 0: data = np.loadtxt('../data/boston_housing') datasetName = 'Boston Housing' elif dataInd == 1: data = np.loadtxt(open("../data/Concrete.csv", "rb"), delimiter=",", skiprows=1) # Concrete datasetName = 'Concrete' elif dataInd == 2: data = np.loadtxt(open("../data/Energy.csv", "rb"), delimiter=",", skiprows=1) # Energy datasetName = 'Energy' elif dataInd == 3: data = np.loadtxt(open("../data/kin8nm.csv", "rb"), delimiter=",", skiprows=0) # Kin8nm Dataset datasetName = 'Kin8nm' print('-------------------',datasetName,'-------------------') if dataInd == 2: X_input = data[ :, range(data.shape[ 1 ] - 2) ] y_input = data[ :, data.shape[ 1 ] - 2 ] else: # Please make sure that the last column is the label and the other columns are features X_input = data[ :, range(data.shape[ 1 ] - 1) ] y_input = data[ :, data.shape[ 1 ] - 1 ] ''' build the training and testing data set''' train_ratio = 0.9 # We create the train and test sets with 90% and 10% of the data permutation = np.arange(X_input.shape[0]) random.shuffle(permutation) size_train = int(np.round(X_input.shape[ 0 ] * train_ratio)) index_train = permutation[ 0 : size_train] index_test = permutation[ size_train : ] X_train, y_train = X_input[ index_train, : ], y_input[ index_train ] X_test, y_test = X_input[ index_test, : ], y_input[ index_test ] #names = ['Base','Subset','Subset-CF','Induced Points']; names = ['Base','Subset','Subset-CF','Induced Points','Adversarial Induced Points']; #names = ['Base','Induced Points','Adversarial Induced Points']; numIter = 10 maxTime = 100 numTimeSteps = 20 modelNum = len(names); svgd_rmse_final = np.zeros((modelNum, numTimeSteps)) svgd_ll_final = np.zeros((modelNum, numTimeSteps)) svgd_iter_final = np.zeros((modelNum, numTimeSteps)) ''' Training Bayesian neural network with SVGD ''' #batch_size, n_hidden, max_iter, numParticles = 100, 50, 2000, 30 # max_iter is a trade-off between running time and performance batch_size, n_hidden, max_iter, numParticles = 100, 50, 100000, 20 # max_iter is a trade-off between running time and performance max_iterRS = 100000 max_iterRSCF = 100000 max_iterIP = 100000 max_iterAIP = 100000 m, adverMaxIter = 10,1 max_iters = [max_iter, max_iterRS, max_iterRSCF, max_iterIP]; np.set_printoptions(precision=4) for modelInd in range(0,5): for t in range(0,numIter): np.random.seed(t) print(names[modelInd], ': Iteration ', t+1, '/', numIter) start = time.time() if modelInd == 0 :# base svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime, batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iter, method = 'none') elif modelInd == 1 : # Subset svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime, batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterRS, method = 'subparticles',m=m,cf=False) elif modelInd == 2 : # Subset (CF) svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime, batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterRSCF, method = 'subparticles',m=m,cf=True) elif modelInd == 3 : # Induced Points svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime, batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterIP, method = 'inducedPoints',m=m, uStat = True, adver=False) elif modelInd == 4 : # Induced Points (Adver) svgd = svgd_bayesnn(X_train, y_train, X_test, y_test, numTimeSteps = numTimeSteps, maxTime = maxTime, batch_size = batch_size, n_hidden = n_hidden, M=numParticles, max_iter = max_iterAIP, method = 'inducedPoints',m=m, uStat = True, adver=True, adverMaxIter = adverMaxIter) [rmseResult, llResult, iterResult] = svgd.getResults() svgd_rmse_final[modelInd,:] = svgd_rmse_final[modelInd,:] + rmseResult / numIter svgd_ll_final[modelInd,:] = svgd_ll_final[modelInd,:] + llResult / numIter svgd_iter_final[modelInd,:] = svgd_iter_final[modelInd,:] + np.round(iterResult / numIter) np.save('./subset_1adver_rmseResult_'+datasetName,svgd_rmse_final) np.save('./subset_1adver_llResult_'+datasetName,svgd_ll_final) np.save('./subset_1adver_iterResult_'+datasetName,svgd_iter_final) #print('--------------------------------------------------------------------------------') #print('Dataset : ', datasetName) #print('[Options] : M=',numParticles, ', m=',m, ', max_iter=', max_iter, ', n_hidden=',n_hidden, ', batch_size=',batch_size) #print('--------------------------------------------------------------------------------') #for modelInd in range(0,modelNum): # print (names[modelInd],' [Average of', numIter, 'runs] : ', max_iters[modelInd], ' iterations') # print ('[rmse] Mean : ', "%.4f" % np.mean(svgd_rmse_final[modelInd,]), ' st.dev : ', "%.4f" % np.std(svgd_rmse_final[modelInd,]) ) # print ('[llik] Mean : ', "%.4f" % np.mean(svgd_ll_final[modelInd,]), ' st.dev : ', "%.4f" % np.std(svgd_ll_final[modelInd,]) ) # print ('[time] Mean : ', "%.2f" % np.mean(svgd_time_final[modelInd,]), ' st.dev : ', "%.2f" % np.std(svgd_time_final[modelInd,]), '\n')
42.714724
188
0.5614
[ "MIT" ]
MinHyung-Kang/Thesis
python/bayesian_nn_subset.py
27,850
Python
''' This file contains test cases for tflearn ''' import tensorflow as tf import zqtflearn import unittest class TestInputs(unittest.TestCase): ''' This class contains test cases for serval input types ''' INPUT_DATA_1 = [ [ 1 ], [ 2 ], [ 3 ], [ 4 ], [ 5 ] ] INPUT_DATA_2 = [ [ 6 ], [ 7 ], [ 8 ], [ 9 ], [ 10 ] ] TARGET = [ [ 14 ], [ 18 ], [ 22 ], [ 26 ], [ 30 ] ] # (input1 + input2) * 2 def test_list_inputs(self): """Test input a list """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit([ inpData for _, _, inpData in inputs ], target, batch_size = 1) def test_dict_inputs(self): """Test input a dict with layer name """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ name: inpData for name, _, inpData in inputs }, target, batch_size = 1) def test_dict_withtensor_inputs(self): """Test input a dict with placeholder """ with tf.Graph().as_default(): model, inputs, target = self.build_simple_model() model.fit({ placeholder: inpData for _, placeholder, inpData in inputs }, target, batch_size = 1) def build_simple_model(self): """Build a simple model for test Returns: DNN, [ (input layer name, input placeholder, input data) ], Target data """ inputPlaceholder1, inputPlaceholder2 = \ tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2") input1 = zqtflearn.input_data(placeholder = inputPlaceholder1) input2 = zqtflearn.input_data(placeholder = inputPlaceholder2) network = zqtflearn.merge([input1, input2], "sum") network = zqtflearn.reshape(network, (1, 1)) network = zqtflearn.fully_connected(network, 1) network = zqtflearn.regression(network) return ( zqtflearn.DNN(network), [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ], self.TARGET, ) if __name__ == "__main__": unittest.main()
38.186441
119
0.594319
[ "MIT" ]
ZhengDeQuan/AAA
zqtflearn2/tests/test_inputs.py
2,253
Python
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, context from mindspore.common.parameter import Parameter from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.train import Model from mindspore.context import ParallelMode from tests.dataset_mock import MindData class Dataset(MindData): def __init__(self, predict, label, length=3): super(Dataset, self).__init__(size=length) self.predict = predict self.label = label self.index = 0 self.length = length def __iter__(self): return self def __next__(self): if self.index >= self.length: raise StopIteration self.index += 1 return self.predict, self.label def reset(self): self.index = 0 class TransposeNet(nn.Cell): def __init__(self, strategy1, strategy2): super(TransposeNet, self).__init__() self.matmul = P.MatMul().set_strategy(((8, 1), (1, 1))) self.matmul_weight = Parameter(Tensor(np.ones([128, 256]), dtype=ms.float32), name="weight") self.transpose1 = P.Transpose().set_strategy(strategy1) self.transpose2 = P.Transpose().set_strategy(strategy2) def construct(self, x): x = self.matmul(x, self.matmul_weight) x = self.transpose1(x, (1, 0)) x = self.transpose2(x, (1, 0)) return x def transpose_net(strategy1, strategy2): return TransposeNet(strategy1=strategy1, strategy2=strategy2) def transpose_common(strategy1, strategy2): learning_rate = 0.1 momentum = 0.9 epoch_size = 2 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8, parameter_broadcast=False) predict = Tensor(np.ones([32, 128]), dtype=ms.float32) label = Tensor(np.ones([32]), dtype=ms.int32) dataset = Dataset(predict, label, 2) net = transpose_net(strategy1, strategy2) loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') loss.softmax_cross_entropy.set_strategy(((8, 1), (8, 1))) opt = Momentum(net.trainable_params(), learning_rate, momentum) context.set_context(mode=context.GRAPH_MODE) model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) def test_transpose1(): strategy1 = ((1, 8),) strategy2 = ((1, 8),) transpose_common(strategy1, strategy2) def test_transpose2(): strategy1 = ((1, 4),) strategy2 = ((1, 8),) transpose_common(strategy1, strategy2) if __name__ == '__main__': test_transpose1() test_transpose2()
31.485981
100
0.697536
[ "Apache-2.0" ]
Rossil2012/mindspore
tests/ut/python/parallel/test_transpose.py
3,369
Python
# Quiet TensorFlow. import os import numpy as np # from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, pipeline from transformers import BertTokenizer, BertForSequenceClassification import textattack from textattack import Attacker from textattack.attack_recipes.my_attack.my_textfooler import MyTextFoolerJin2019 from textattack.datasets import HuggingFaceDataset from textattack.models.wrappers import ModelWrapper, huggingface_model_wrapper from textattack.models.wrappers import HuggingFaceModelWrapper def load_dataset_sst(path = '/mnt/cloud/bairu/repos/text_pgd_attack/sst-2/'): def process_file(file): # sentence_list = [] # label_list = [] data_list = [] with open(path + file,'r',encoding = 'utf-8') as f: for line in f: sen, label = line.split("\t",1) data_item = [sen, int(label)] data_list.append(data_item) return data_list train_dataset = process_file("train.tsv") valid_dataset = process_file("valid.tsv") test_dataset = process_file("test.tsv") return test_dataset directory = '/mnt/cloud/bairu/repos/std_text_pgd_attack/checkpoints/bert-base-uncased-sst' model = BertForSequenceClassification.from_pretrained(directory) tokenizer = BertTokenizer.from_pretrained('/mnt/cloud/bairu/repos/text_pgd_attack/checkpoints/bert-base-uncased-sst') wrapper_model = huggingface_model_wrapper.HuggingFaceModelWrapper(model, tokenizer) recipe = MyTextFoolerJin2019.build(wrapper_model) # dataset = HuggingFaceDataset("allocine", split="test") dataset = load_dataset_sst() dataset = textattack.datasets.Dataset(dataset) # attack_args = textattack.AttackArgs(num_examples = -1, log_to_txt = './log/textfooler_sst_bertbase_query2000.txt', query_budget = 2000) attack_args = textattack.AttackArgs(num_examples = -1, log_to_txt = './log/ddd.txt', query_budget = 2000) # attack_args = textattack.AttackArgs(num_examples = 10, log_to_txt = './log/ddd.txt', query_budget = 500) attacker = Attacker(recipe, dataset, attack_args) results = attacker.attack_dataset()
45.021277
137
0.76087
[ "MIT" ]
hbr690188270/SelfTextAttack
experiments/sst/attack_bert_textfooler_sst.py
2,116
Python
from utilities import * from model import get_model from data import get_data, get_loaders from augmentations import get_augs from test_epoch import test_epoch import gc import neptune from accelerate import Accelerator, DistributedType import pandas as pd import numpy as np def run_inference(df, df_old, df_test, CFG, run = None): ''' Run inference loop ''' # tests assert isinstance(CFG, dict), 'CFG has to be a dict with parameters' assert isinstance(df, pd.DataFrame), 'df has to be a pandas dataframe' # placeholders oof = None sub = None # inference for fold in range(CFG['num_folds']): # initialize accelerator accelerator = Accelerator(device_placement = True, fp16 = CFG['use_fp16'], split_batches = False) if CFG['device'] == 'GPU': accelerator.state.device = torch.device('cuda:{}'.format(CFG['device_index'])) # feedback accelerator.print('-' * 55) accelerator.print('FOLD {:d}/{:d}'.format(fold + 1, CFG['num_folds'])) accelerator.print('-' * 55) # get data df_trn, df_val = get_data(df = df, df_old = df_old, fold = fold, CFG = CFG, accelerator = accelerator, silent = True) # get test loader _, val_loader = get_loaders(df_train = df_trn, df_valid = df_val, CFG = CFG, accelerator = accelerator, labeled = False, silent = True) _, test_loader = get_loaders(df_train = df_trn, df_valid = df_test, CFG = CFG, accelerator = accelerator, labeled = False, silent = True) # prepare model model = get_model(CFG = CFG, pretrained = CFG['out_path'] + 'weights_fold{}.pth'.format(int(fold))) # handle device placement model, val_loader, test_loader = accelerator.prepare(model, val_loader, test_loader) # inference for validation data if CFG['predict_oof']: # produce OOF preds val_preds = test_epoch(loader = val_loader, model = model, CFG = CFG, accelerator = accelerator, num_tta = CFG['num_tta']) # store OOF preds val_preds_df = pd.DataFrame(val_preds, columns = ['pred']) val_preds_df = pd.concat([df_val, val_preds_df], axis = 1) oof = pd.concat([oof, val_preds_df], axis = 0).reset_index(drop = True) # inference for test data if CFG['predict_test']: # produce test preds test_preds = test_epoch(loader = test_loader, model = model, CFG = CFG, accelerator = accelerator, num_tta = CFG['num_tta']) # store test preds test_preds_df = pd.DataFrame(test_preds, columns = ['pred_fold{}'.format(int(fold))]) sub = pd.concat([sub, test_preds_df], axis = 1) # clear memory del model, val_loader, test_loader del accelerator gc.collect() # export OOF preds if CFG['predict_oof']: oof.to_csv(CFG['out_path'] + 'oof.csv', index = False) if CFG['tracking']: run['oof'].upload(CFG['out_path'] + 'oof.csv') # export test preds if CFG['predict_test']: sub = pd.concat([df_test['Id'], sub], axis = 1) sub.to_csv(CFG['out_path'] + 'submission.csv', index = False) if CFG['tracking']: run['submission'].upload(CFG['out_path'] + 'submission.csv')
38.154472
97
0.442574
[ "MIT" ]
kozodoi/Pet_Pawpularity
code/run_inference.py
4,693
Python
# -*- coding: utf-8 -*- from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import Microsoft ''' See the Microsoft DirectLine api documentation for how to get a user access token. https://docs.botframework.com/en-us/restapi/directline/ ''' chatbot = ChatBot( 'MicrosoftBot', directline_host=Microsoft['directline_host'], direct_line_token_or_secret=Microsoft['direct_line_token_or_secret'], conversation_id=Microsoft['conversation_id'], input_adapter='chatterbot.input.Microsoft', output_adapter='chatterbot.output.Microsoft' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break
29.59375
82
0.761352
[ "BSD-3-Clause" ]
Hacker-VP/ChatterBot
examples/microsoft_bot.py
947
Python
import picamera from time import sleep camera = picamera.PiCamera() camera.capture('image.jpg') #camera.resolution = (640, 480) #max resolution is 2592 x 1944 for stills, # 1920 x 1080 (<15fps) for video #camera.framerate = 45 camera.vflip = True camera.start_recording('video.h264') camera.image_effect = 'negative' for j in range (10): for i in range(200): camera.annotate_text = "everyday we stray further from God's light" camera.annotate_text_size = abs(i-100)+6 camera.brightness = abs(i-100) sleep(0.001) camera.stop_recording()
29.142857
78
0.663399
[ "Apache-2.0" ]
harrychowjackson/automated-radishes
src/test.py
612
Python
# coding: utf-8 """ Adobe Experience Manager (AEM) API Swagger AEM is an OpenAPI specification for Adobe Experience Manager (AEM) API OpenAPI spec version: 2.2.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class CrxApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_crxde_status(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_crxde_status(async=True) >>> result = thread.get() :param async bool :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_crxde_status_with_http_info(**kwargs) else: (data) = self.get_crxde_status_with_http_info(**kwargs) return data def get_crxde_status_with_http_info(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_crxde_status_with_http_info(async=True) >>> result = thread.get() :param async bool :return: str If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_crxde_status" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['plain/text']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/server/crx.default/jcr:root/.1.json', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_install_status(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_install_status(async=True) >>> result = thread.get() :param async bool :return: InstallStatus If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_install_status_with_http_info(**kwargs) else: (data) = self.get_install_status_with_http_info(**kwargs) return data def get_install_status_with_http_info(self, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_install_status_with_http_info(async=True) >>> result = thread.get() :param async bool :return: InstallStatus If the method is called asynchronously, returns the request thread. """ all_params = [] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_install_status" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/packmgr/installstatus.jsp', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InstallStatus', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def post_package_service(self, cmd, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_service(cmd, async=True) >>> result = thread.get() :param async bool :param str cmd: (required) :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.post_package_service_with_http_info(cmd, **kwargs) else: (data) = self.post_package_service_with_http_info(cmd, **kwargs) return data def post_package_service_with_http_info(self, cmd, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_service_with_http_info(cmd, async=True) >>> result = thread.get() :param async bool :param str cmd: (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['cmd'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method post_package_service" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'cmd' is set if ('cmd' not in params) or (params['cmd'] is None): raise ValueError("Missing the required parameter `cmd` when calling `post_package_service`") collection_formats = {} path_params = {} query_params = [] if 'cmd' in params: query_params.append(('cmd', params['cmd'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['text/xml']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/packmgr/service.jsp', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def post_package_service_json(self, path, cmd, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_service_json(path, cmd, async=True) >>> result = thread.get() :param async bool :param str path: (required) :param str cmd: (required) :param str group_name: :param str package_name: :param str package_version: :param str charset_: :param bool force: :param bool recursive: :param file package: :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.post_package_service_json_with_http_info(path, cmd, **kwargs) else: (data) = self.post_package_service_json_with_http_info(path, cmd, **kwargs) return data def post_package_service_json_with_http_info(self, path, cmd, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_service_json_with_http_info(path, cmd, async=True) >>> result = thread.get() :param async bool :param str path: (required) :param str cmd: (required) :param str group_name: :param str package_name: :param str package_version: :param str charset_: :param bool force: :param bool recursive: :param file package: :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['path', 'cmd', 'group_name', 'package_name', 'package_version', 'charset_', 'force', 'recursive', 'package'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method post_package_service_json" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'path' is set if ('path' not in params) or (params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `post_package_service_json`") # verify the required parameter 'cmd' is set if ('cmd' not in params) or (params['cmd'] is None): raise ValueError("Missing the required parameter `cmd` when calling `post_package_service_json`") collection_formats = {} path_params = {} if 'path' in params: path_params['path'] = params['path'] query_params = [] if 'cmd' in params: query_params.append(('cmd', params['cmd'])) if 'group_name' in params: query_params.append(('groupName', params['group_name'])) if 'package_name' in params: query_params.append(('packageName', params['package_name'])) if 'package_version' in params: query_params.append(('packageVersion', params['package_version'])) if 'charset_' in params: query_params.append(('_charset_', params['charset_'])) if 'force' in params: query_params.append(('force', params['force'])) if 'recursive' in params: query_params.append(('recursive', params['recursive'])) header_params = {} form_params = [] local_var_files = {} if 'package' in params: local_var_files['package'] = params['package'] body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['multipart/form-data']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/packmgr/service/.json/{path}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def post_package_update(self, group_name, package_name, version, path, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_update(group_name, package_name, version, path, async=True) >>> result = thread.get() :param async bool :param str group_name: (required) :param str package_name: (required) :param str version: (required) :param str path: (required) :param str filter: :param str charset_: :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.post_package_update_with_http_info(group_name, package_name, version, path, **kwargs) else: (data) = self.post_package_update_with_http_info(group_name, package_name, version, path, **kwargs) return data def post_package_update_with_http_info(self, group_name, package_name, version, path, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_package_update_with_http_info(group_name, package_name, version, path, async=True) >>> result = thread.get() :param async bool :param str group_name: (required) :param str package_name: (required) :param str version: (required) :param str path: (required) :param str filter: :param str charset_: :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['group_name', 'package_name', 'version', 'path', 'filter', 'charset_'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method post_package_update" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'group_name' is set if ('group_name' not in params) or (params['group_name'] is None): raise ValueError("Missing the required parameter `group_name` when calling `post_package_update`") # verify the required parameter 'package_name' is set if ('package_name' not in params) or (params['package_name'] is None): raise ValueError("Missing the required parameter `package_name` when calling `post_package_update`") # verify the required parameter 'version' is set if ('version' not in params) or (params['version'] is None): raise ValueError("Missing the required parameter `version` when calling `post_package_update`") # verify the required parameter 'path' is set if ('path' not in params) or (params['path'] is None): raise ValueError("Missing the required parameter `path` when calling `post_package_update`") collection_formats = {} path_params = {} query_params = [] if 'group_name' in params: query_params.append(('groupName', params['group_name'])) if 'package_name' in params: query_params.append(('packageName', params['package_name'])) if 'version' in params: query_params.append(('version', params['version'])) if 'path' in params: query_params.append(('path', params['path'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'charset_' in params: query_params.append(('_charset_', params['charset_'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/packmgr/update.jsp', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def post_set_password(self, old, plain, verify, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_set_password(old, plain, verify, async=True) >>> result = thread.get() :param async bool :param str old: (required) :param str plain: (required) :param str verify: (required) :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.post_set_password_with_http_info(old, plain, verify, **kwargs) else: (data) = self.post_set_password_with_http_info(old, plain, verify, **kwargs) return data def post_set_password_with_http_info(self, old, plain, verify, **kwargs): """ This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_set_password_with_http_info(old, plain, verify, async=True) >>> result = thread.get() :param async bool :param str old: (required) :param str plain: (required) :param str verify: (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['old', 'plain', 'verify'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method post_set_password" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'old' is set if ('old' not in params) or (params['old'] is None): raise ValueError("Missing the required parameter `old` when calling `post_set_password`") # verify the required parameter 'plain' is set if ('plain' not in params) or (params['plain'] is None): raise ValueError("Missing the required parameter `plain` when calling `post_set_password`") # verify the required parameter 'verify' is set if ('verify' not in params) or (params['verify'] is None): raise ValueError("Missing the required parameter `verify` when calling `post_set_password`") collection_formats = {} path_params = {} query_params = [] if 'old' in params: query_params.append(('old', params['old'])) if 'plain' in params: query_params.append(('plain', params['plain'])) if 'verify' in params: query_params.append(('verify', params['verify'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['text/plain']) # Authentication setting auth_settings = ['aemAuth'] return self.api_client.call_api('/crx/explorer/ui/setpassword.jsp', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
40.217729
130
0.549033
[ "Apache-2.0" ]
hoomaan-kh/swagger-aem
clients/python/generated/swaggeraem/apis/crx_api.py
25,860
Python
import json import re from django.http import JsonResponse from django.db.models import Q from django.urls import reverse from django.views.decorators.csrf import csrf_exempt from apps.vit.models import Vit from apps.notification.utilities import notify from apps.vit.utilities import find_mention, find_plustag from .forms import VitForm from apps.develop.backends import KeyBackend def like(request): user = KeyBackend().authenticate(request) if request.user.is_authenticated: try: vit = Vit.objects.get(id=request.GET.get('vit_pk')) if vit.likes.filter(id=request.user.id).exists(): vit.likes.remove(request.user) vit.like_count -= 1 vit.save() return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': False}, status=200) else: vit.likes.add(request.user) vit.like_count += 1 vit.save() if vit.user != request.user: notify(message=f"{request.user.username.title()} liked your Vit - '{vit.body}'", notification_type="like", to_user=vit.user, by_user=request.user, link=reverse('vit_detail', kwargs={'pk': vit.id})) return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': True}, status=200) except Vit.DoesNotExist: return JsonResponse({'error': 'Vit not found'}, status=404) else: return JsonResponse({'error': 'You must be logged in'}, status=401) def get_vits(request): user = KeyBackend().authenticate(request) if request.user.is_authenticated: vits = Vit.objects.filter(Q(user=request.user) | Q( user__profile__in=request.user.profile.follows.all()) | Q(user__profile__in=request.user.profile.followed_by.all())).order_by('-date') return JsonResponse({'vits': [vit.to_json() for vit in vits]}) else: return JsonResponse({'error': 'You must be logged in'}, status=401) def get_vit(request, id): user = KeyBackend().authenticate(request) if request.user.is_authenticated: try: vit = Vit.objects.get(id=id) return JsonResponse({'vit': vit.to_json()}, status=200) except: return JsonResponse({'error': 'Vit not found'}, status=404) else: return JsonResponse({'error': 'You must be logged in'}, status=401) @csrf_exempt def add_vit(request): """ Add a new vit with API, currently image and video are not supported """ user = KeyBackend().authenticate(request) if request.method == "POST": if request.user.is_authenticated: form = VitForm(request.POST) if form.is_valid(): vit = form.save(commit=False) vit.user = request.user vit.save() return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201) else: return JsonResponse({'error': 'No vit body provided'}, status=400) else: return JsonResponse({'error': 'You must be logged in'}, status=401) else: return JsonResponse({'error': 'Invalid request'}, status=400) @csrf_exempt def edit_vit(request): """ Edit a vit with API """ user = KeyBackend().authenticate(request) if request.method == "POST": if request.user.is_authenticated: try: vit = Vit.objects.get(id=request.POST.get('vit_pk')) if vit.user == request.user: form = VitForm(request.POST, instance=vit) if form.is_valid(): vit = form.save(commit=False) vit.save() return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201) else: return JsonResponse({'error': 'No vit body provided'}, status=400) else: return JsonResponse({'error': 'You do not have permission to edit this vit'}, status=403) except Vit.DoesNotExist: return JsonResponse({'error': 'Vit not found'}, status=404) else: return JsonResponse({'error': 'You must be logged in'}, status=401) else: return JsonResponse({'error': 'Invalid request'}, status=400) @csrf_exempt def delete_vit(request): """ Delete a vit with API """ user = KeyBackend().authenticate(request) if request.method == "POST": if request.user.is_authenticated: try: vit = Vit.objects.get(id=request.POST.get('vit_pk')) if vit.user == request.user: vit.delete() return JsonResponse({'status': 'success'}, status=200) else: return JsonResponse({'error': 'You do not have permission to delete this vit'}, status=403) except Vit.DoesNotExist: return JsonResponse({'error': 'Vit not found'}, status=404) else: return JsonResponse({'error': 'You must be logged in'}, status=401) else: return JsonResponse({'error': 'Invalid request'}, status=400)
39.111111
146
0.586174
[ "BSD-3-Clause" ]
Visualway/Vitary
apps/vit/api.py
5,280
Python
import math import torch import torch.nn as nn import torch.nn.functional as F #-------------------------------------------------# # MISH激活函数 #-------------------------------------------------# class Mish(nn.Module): def __init__(self): super(Mish, self).__init__() def forward(self, x): return x * torch.tanh(F.softplus(x)) #---------------------------------------------------# # 卷积块 -> 卷积 + 标准化 + 激活函数 # Conv2d + BatchNormalization + Mish #---------------------------------------------------# class CBM(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): super(CBM, self).__init__() #pad = kernel_size//2,表示1x1卷积不补零,3x3卷积补一圈0,这样输出图片的尺寸不会改变 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False) self.bn = nn.BatchNorm2d(out_channels) self.activation = Mish() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.activation(x) return x #---------------------------------------------------# # CSPdarknet的结构块的组成部分 # 内部堆叠的残差块 #---------------------------------------------------# class Resblock(nn.Module): def __init__(self, channels, hidden_channels=None): super(Resblock, self).__init__() if hidden_channels is None: hidden_channels = channels self.block = nn.Sequential( CBM(channels, hidden_channels, 1), CBM(hidden_channels, channels, 3) ) def forward(self, x): return x + self.block(x) #--------------------------------------------------------------------# # CSPdarknet的结构块 # 首先利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩 # 然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构 # 主干部分会对num_blocks进行循环,循环内部是残差结构。 # 对于整个CSPdarknet的结构块,就是一个大残差块+内部多个小残差块 #--------------------------------------------------------------------# class Resblock_body(nn.Module): def __init__(self, in_channels, out_channels, num_blocks, first): super(Resblock_body, self).__init__() #----------------------------------------------------------------# # 利用一个步长为2x2的卷积块进行高和宽的压缩 #----------------------------------------------------------------# self.downsample_conv = CBM(in_channels, out_channels, 3, stride=2) if first: #--------------------------------------------------------------------------# # 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构 #--------------------------------------------------------------------------# self.split_conv0 = CBM(out_channels, out_channels, 1) #----------------------------------------------------------------# # 主干部分会对num_blocks进行循环,循环内部是残差结构。 #----------------------------------------------------------------# self.split_conv1 = CBM(out_channels, out_channels, 1) self.blocks_conv = nn.Sequential( Resblock(channels=out_channels, hidden_channels=out_channels//2), CBM(out_channels, out_channels, 1) ) self.concat_conv = CBM(out_channels*2, out_channels, 1) else: #--------------------------------------------------------------------------# # 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构 #--------------------------------------------------------------------------# self.split_conv0 = CBM(out_channels, out_channels//2, 1) #----------------------------------------------------------------# # 主干部分会对num_blocks进行循环,循环内部是残差结构。 #----------------------------------------------------------------# self.split_conv1 = CBM(out_channels, out_channels//2, 1) self.blocks_conv = nn.Sequential( *[Resblock(out_channels//2) for _ in range(num_blocks)], CBM(out_channels//2, out_channels//2, 1) ) self.concat_conv = CBM(out_channels, out_channels, 1) def forward(self, x): x = self.downsample_conv(x) x0 = self.split_conv0(x) x1 = self.split_conv1(x) x1 = self.blocks_conv(x1) #------------------------------------# # 将大残差边再堆叠回来 #------------------------------------# x = torch.cat([x1, x0], dim=1) #------------------------------------# # 最后对通道数进行整合 #------------------------------------# x = self.concat_conv(x) return x #---------------------------------------------------# # CSPdarknet53 的主体部分 # 输入为一张416x416x3的图片 # 输出为三个有效特征层 #---------------------------------------------------# class CSPDarkNet(nn.Module): def __init__(self, layers): super(CSPDarkNet, self).__init__() self.inplanes = 32 # 416,416,3 -> 416,416,32 self.conv1 = CBM(3, self.inplanes, kernel_size=3, stride=1) self.feature_channels = [64, 128, 256, 512, 1024] self.stages = nn.ModuleList([ # 416,416,32 -> 208,208,64 Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True), # 208,208,64 -> 104,104,128 Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False), # 104,104,128 -> 52,52,256 Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False), # 52,52,256 -> 26,26,512 Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False), # 26,26,512 -> 13,13,1024 Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False) ]) self.num_features = 1 for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): x = self.conv1(x) x = self.stages[0](x) x = self.stages[1](x) out3 = self.stages[2](x) out4 = self.stages[3](out3) out5 = self.stages[4](out4) return out3, out4, out5 def darknet53(pretrained, **kwargs): model = CSPDarkNet([1, 2, 8, 8, 4]) if pretrained: if isinstance(pretrained, str): model.load_state_dict(torch.load(pretrained)) else: raise Exception("darknet request a pretrained path. got [{}]".format(pretrained)) return model
37.410112
105
0.463583
[ "MIT" ]
Arcofcosmos/MyYolov4_Pytorch
.history/nets/CSPdarknet_20210816140029.py
7,299
Python
# -*- coding: utf-8 -*- # Scrapy settings for mySpider project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'mySpider' SPIDER_MODULES = ['mySpider.spiders'] NEWSPIDER_MODULE = 'mySpider.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = 'mySpider (+http://www.yourdomain.com)' # Obey robots.txt rules ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'mySpider.middlewares.MyspiderSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'mySpider.middlewares.MyspiderDownloaderMiddleware': 543, #} # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'mySpider.pipelines.MyspiderPipeline': 300, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
33.945055
102
0.775332
[ "MIT" ]
zxallen/spider
mySpider/mySpider/settings.py
3,089
Python
# Generated by Django 3.1.2 on 2020-12-07 05:13 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('zooniverse', '0006_auto_20201207_0713'), ] operations = [ migrations.CreateModel( name='FinalAnnotation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('question', models.CharField(blank=True, max_length=255)), ('answer', models.TextField(blank=True)), ('retirement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zooniverse.retirement')), ], ), ]
29.884615
123
0.615187
[ "MIT" ]
karilint/cradle_of_mankind
quality_control/migrations/0001_initial.py
777
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Code from: https://github.com/michailbrynard/ethereum-bip44-python This submodule provides the PublicKey, PrivateKey, and Signature classes. It also provides HDPublicKey and HDPrivateKey classes for working with HD wallets.""" import os import math import codecs import random import base58 import base64 import binascii import hashlib import hmac from mnemonic.mnemonic import Mnemonic from pywallet.crypto.ecdsa_base import Point from pywallet.crypto.ecdsa import ECPointAffine from pywallet.crypto.ecdsa import secp256k1 bitcoin_curve = secp256k1() from Crypto.Hash import keccak sha3_256 = lambda x: keccak.new(digest_bits=256, data=x) def sha3(seed): return sha3_256(seed).digest() def get_bytes(s): """Returns the byte representation of a hex- or byte-string.""" if isinstance(s, bytes): b = s elif isinstance(s, str): b = bytes.fromhex(s) else: raise TypeError("s must be either 'bytes' or 'str'!") return b class PrivateKeyBase(object): """ Base class for both PrivateKey and HDPrivateKey. As this class is a base class it should not be used directly. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ raise NotImplementedError def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ raise NotImplementedError def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ raise NotImplementedError def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ raise NotImplementedError def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ raise NotImplementedError def to_hex(self): """ Generates a hex encoding of the serialized key. Returns: str: A hex encoded string representing the key. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError class PublicKeyBase(object): """ Base class for both PublicKey and HDPublicKey. As this class is a base class it should not be used directly. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. Args: key_bytes (bytes or str): A byte stream. Returns: PublicKey: A PublicKey object. """ raise NotImplementedError @staticmethod def from_private_key(private_key): """ Generates a public key object from a PrivateKey object. Args: private_key (PrivateKey): The private key object from which to derive this object. Returns: PublicKey: A PublicKey object. """ return private_key.public_key def __init__(self): pass def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ raise NotImplementedError def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ raise NotImplementedError def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ raise NotImplementedError def to_hex(self): """ Hex representation of the serialized byte stream. Returns: h (str): A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def __bytes__(self): raise NotImplementedError def __int__(self): raise NotImplementedError @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ raise NotImplementedError class PrivateKey(PrivateKeyBase): """ Encapsulation of a Bitcoin ECDSA private key. This class provides capability to generate private keys, obtain the corresponding public key, sign messages and serialize/deserialize into a variety of formats. Args: k (int): The private key. Returns: PrivateKey: The object representing the private key. """ TESTNET_VERSION = 0xEF MAINNET_VERSION = 0x80 @staticmethod def from_bytes(b): """ Generates PrivateKey from the underlying bytes. Args: b (bytes): A byte stream containing a 256-bit (32-byte) integer. Returns: tuple(PrivateKey, bytes): A PrivateKey object and the remainder of the bytes. """ if len(b) < 32: raise ValueError('b must contain at least 32 bytes') return PrivateKey(int.from_bytes(b[:32], 'big')) @staticmethod def from_hex(h): """ Generates PrivateKey from a hex-encoded string. Args: h (str): A hex-encoded string containing a 256-bit (32-byte) integer. Returns: PrivateKey: A PrivateKey object. """ return PrivateKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_int(i): """ Initializes a private key from an integer. Args: i (int): Integer that is the private key. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(i) @staticmethod def from_b58check(private_key): """ Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object """ b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big')) @staticmethod def from_random(): """ Initializes a private key from a random integer. Returns: PrivateKey: The object representing the private key. """ return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n)) def __init__(self, k): self.key = k self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: PublicKey: The PublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = PublicKey.from_point( bitcoin_curve.public_key(self.key)) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using this private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ if isinstance(message, str): msg = bytes(message, 'ascii') elif isinstance(message, bytes): msg = message else: raise TypeError("message must be either str or bytes!") sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash) # Take care of large s: # Bitcoin deals with large s, by subtracting # s from the curve order. See: # https://bitcointalk.org/index.php?topic=285142.30;wap2 if sig_pt.y >= (bitcoin_curve.n // 2): sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y) rec_id ^= 0x1 return (sig_pt, rec_id) def sign(self, message, do_hash=True): """ Signs message using this private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ # Some BTC things want to have the recovery id to extract the public # key, so we should figure that out. sig_pt, rec_id = self.raw_sign(message, do_hash) return Signature(sig_pt.x, sig_pt.y, rec_id) def sign_bitcoin(self, message, compressed=False): """ Signs a message using this private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. """ if isinstance(message, str): msg_in = bytes(message, 'ascii') elif isinstance(message, bytes): msg_in = message else: raise TypeError("message must be either str or bytes!") msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in msg_hash = hashlib.sha256(msg).digest() sig = self.sign(msg_hash) comp_adder = 4 if compressed else 0 magic = 27 + sig.recovery_id + comp_adder return base64.b64encode(bytes([magic]) + bytes(sig)) def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this private key. Returns: str: A Base58Check encoded string representing the key. """ version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION return base58.b58encode_check(bytes([version]) + bytes(self)) def __bytes__(self): return self.key.to_bytes(32, 'big') def __int__(self): return self.key class PublicKey(PublicKeyBase): """ Encapsulation of a Bitcoin ECDSA public key. This class provides a high-level API to using an ECDSA public key, specifically for Bitcoin (secp256k1) purposes. Args: x (int): The x component of the public key point. y (int): The y component of the public key point. Returns: PublicKey: The object representing the public key. """ TESTNET_VERSION = 0x6F MAINNET_VERSION = 0x00 @staticmethod def from_point(p): """ Generates a public key object from any object containing x, y coordinates. Args: p (Point): An object containing a two-dimensional, affine representation of a point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ return PublicKey(p.x, p.y) @staticmethod def from_int(i): """ Generates a public key object from an integer. Note: This assumes that the upper 32 bytes of the integer are the x component of the public key point and the lower 32 bytes are the y component. Args: i (Bignum): A 512-bit integer representing the public key point on the secp256k1 curve. Returns: PublicKey: A PublicKey object. """ point = ECPointAffine.from_int(bitcoin_curve, i) return PublicKey.from_point(point) @staticmethod def from_base64(b64str, testnet=False): """ Generates a public key object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. testnet (bool) (Optional): If True, changes the version that is prepended to the key. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(key_bytes): """ Generates a public key object from a byte (or hex) string. The byte stream must be of the SEC variety (http://www.secg.org/): beginning with a single byte telling what key representation follows. A full, uncompressed key is represented by: 0x04 followed by 64 bytes containing the x and y components of the point. For compressed keys with an even y component, 0x02 is followed by 32 bytes containing the x component. For compressed keys with an odd y component, 0x03 is followed by 32 bytes containing the x component. Args: key_bytes (bytes or str): A byte stream that conforms to the above. Returns: PublicKey: A PublicKey object. """ b = get_bytes(key_bytes) key_bytes_len = len(b) key_type = b[0] if key_type == 0x04: # Uncompressed if key_bytes_len != 65: raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.") x = int.from_bytes(b[1:33], 'big') y = int.from_bytes(b[33:65], 'big') elif key_type == 0x02 or key_type == 0x03: if key_bytes_len != 33: raise ValueError("key_bytes must be exactly 33 bytes long when compressed.") x = int.from_bytes(b[1:33], 'big') ys = bitcoin_curve.y_from_x(x) # Pick the one that corresponds to key_type last_bit = key_type - 0x2 for y in ys: if y & 0x1 == last_bit: break else: return None return PublicKey(x, y) @staticmethod def from_hex(h): """ Generates a public key object from a hex-encoded string. See from_bytes() for requirements of the hex string. Args: h (str): A hex-encoded string. Returns: PublicKey: A PublicKey object. """ return PublicKey.from_bytes(h) @staticmethod def from_signature(message, signature): """ Attempts to create PublicKey object by deriving it from the message and signature. Args: message (bytes): The message to be verified. signature (Signature): The signature for message. The recovery_id must not be None! Returns: PublicKey: A PublicKey object derived from the signature, it it exists. None otherwise. """ if signature.recovery_id is None: raise ValueError("The signature must have a recovery_id.") msg = get_bytes(message) pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id) for k, recid in pub_keys: if signature.recovery_id is not None and recid == signature.recovery_id: return PublicKey(k.x, k.y) return None @staticmethod def verify_bitcoin(message, signature, address): """ Verifies a message signed using PrivateKey.sign_bitcoin() or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.) Args: message(bytes): The message that the signature corresponds to. signature (bytes or str): A Base64 encoded signature address (str): Base58Check encoded address. Returns: bool: True if the signature verified properly, False otherwise. """ magic_sig = base64.b64decode(signature) magic = magic_sig[0] sig = Signature.from_bytes(magic_sig[1:]) sig.recovery_id = (magic - 27) & 0x3 compressed = ((magic - 27) & 0x4) != 0 # Build the message that was signed msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message msg_hash = hashlib.sha256(msg).digest() derived_public_key = PublicKey.from_signature(msg_hash, sig) if derived_public_key is None: raise ValueError("Could not recover public key from the provided signature.") n = base58.b58decode_check(address) version = n[0] h160 = n[1:] hash160 = derived_public_key.hash160(compressed) if hash160 != h160: return False return derived_public_key.verify(msg_hash, sig) def __init__(self, x, y): p = ECPointAffine(bitcoin_curve, x, y) if not bitcoin_curve.is_on_curve(p): raise ValueError("The provided (x, y) are not on the secp256k1 curve.") self.point = p # RIPEMD-160 of SHA-256 r = hashlib.new('ripemd160') r.update(hashlib.sha256(bytes(self)).digest()) self.ripe = r.digest() r = hashlib.new('ripemd160') r.update(hashlib.sha256(self.compressed_bytes).digest()) self.ripe_compressed = r.digest() self.keccak = sha3(bytes(self)[1:]) def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the public key. Args: compressed (bool): Whether or not the compressed key should be used. Returns: bytes: RIPEMD-160 byte string. """ return self.ripe_compressed if compressed else self.ripe def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii') # Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet # version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION]) # return base58.b58encode_check(version + self.hash160(compressed)) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ msg = get_bytes(message) return bitcoin_curve.verify(msg, signature, self.point, do_hash) def to_base64(self): """ Hex representation of the serialized byte stream. Returns: b (str): A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __int__(self): mask = 2 ** 256 - 1 return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask) def __bytes__(self): return bytes(self.point) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self.point.compressed_bytes class Signature(object): """ Encapsulation of a ECDSA signature for Bitcoin purposes. Args: r (Bignum): r component of the signature. s (Bignum): s component of the signature. recovery_id (int) (Optional): Must be between 0 and 3 specifying which of the public keys generated by the algorithm specified in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key Recovery Operation) is the correct one for this signature. Returns: sig (Signature): A Signature object. """ @staticmethod def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s) @staticmethod def from_base64(b64str): """ Generates a signature object from a Base64 encoded string. Args: b64str (str): A Base64-encoded string. Returns: Signature: A Signature object. """ return Signature.from_bytes(base64.b64decode(b64str)) @staticmethod def from_bytes(b): """ Extracts the r and s components from a byte string. Args: b (bytes): A 64-byte long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. Raises: ValueError: If signature is incorrect length """ if len(b) != 64: raise ValueError("from_bytes: Signature length != 64.") r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) @staticmethod def from_hex(h): """ Extracts the r and s components from a hex-encoded string. Args: h (str): A 64-byte (128 character) long string. The first 32 bytes are extracted as the r component and the second 32 bytes are extracted as the s component. Returns: Signature: A Signature object. """ return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id @property def x(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.r @property def y(self): """ Convenience property for any method that requires this object to provide a Point interface. """ return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: # Compute minimum bytes to represent integer bl = math.ceil(x.bit_length() / 8) # Make sure it's at least one byte in length if bl == 0: bl += 1 x_bytes = x.to_bytes(bl, 'big') # make sure there's no way it could be interpreted # as a negative integer if x_bytes[0] & 0x80: x_bytes = bytes([0]) + x_bytes rv.append(x_bytes) return rv def to_der(self): """ Encodes this signature using DER Returns: bytes: The DER encoding of (self.r, self.s). """ # Output should be: # 0x30 <length> 0x02 <length r> r 0x02 <length s> s r, s = self._canonicalize() total_length = 6 + len(r) + len(s) der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s return der def to_hex(self): """ Hex representation of the serialized byte stream. Returns: str: A hex-encoded string. """ return codecs.encode(bytes(self), 'hex_codec').decode('ascii') def to_base64(self): """ Hex representation of the serialized byte stream. Returns: str: A Base64-encoded string. """ return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil(bitcoin_curve.nlen / 8) return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big') class HDKey(object): """ Base class for HDPrivateKey and HDPublicKey. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ @staticmethod def from_b58check(key): """ Decodes a Base58Check encoded key. The encoding must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: key (str): A Base58Check encoded key. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(base58.b58decode_check(key)) @staticmethod def from_bytes(b): """ Generates either a HDPrivateKey or HDPublicKey from the underlying bytes. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: b (bytes): A byte stream conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return rv @staticmethod def from_hex(h): """ Generates either a HDPrivateKey or HDPublicKey from the underlying hex-encoded string. The serialization must conform to the description in: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format Args: h (str): A hex-encoded string conforming to the above. Returns: HDPrivateKey or HDPublicKey: Either an HD private or public key object, depending on what was serialized. """ return HDKey.from_bytes(bytes.fromhex(h)) @staticmethod def from_path(root_key, path): p = HDKey.parse_path(path) if p[0] == "m": if root_key.master: p = p[1:] else: raise ValueError("root_key must be a master key if 'm' is the first element of the path.") keys = [root_key] for i in p: if isinstance(i, str): hardened = i[-1] == "'" index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0) else: index = i k = keys[-1] klass = k.__class__ keys.append(klass.from_parent(k, index)) return keys @staticmethod def parse_path(path): if isinstance(path, str): # Remove trailing "/" p = path.rstrip("/").split("/") elif isinstance(path, bytes): p = path.decode('utf-8').rstrip("/").split("/") else: p = list(path) return p @staticmethod def path_from_indices(l): p = [] for n in l: if n == "m": p.append(n) else: if n & 0x80000000: _n = n & 0x7fffffff p.append(str(_n) + "'") else: p.append(str(n)) return "/".join(p) def __init__(self, key, chain_code, index, depth, parent_fingerprint): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") if not isinstance(chain_code, bytes): raise TypeError("chain_code must be bytes") self._key = key self.chain_code = chain_code self.depth = depth self.index = index self.parent_fingerprint = get_bytes(parent_fingerprint) @property def master(self): """ Whether or not this is a master node. Returns: bool: True if this is a master node, False otherwise. """ return self.depth == 0 @property def hardened(self): """ Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000. Returns: bool: True if this is hardened, False otherwise. """ # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000 @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: A 20-byte RIPEMD-160 hash. """ raise NotImplementedError @property def fingerprint(self): """ Returns the key's fingerprint, which is the first 4 bytes of its identifier. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers Returns: bytes: The first 4 bytes of the RIPEMD-160 hash. """ return self.identifier[:4] def to_b58check(self, testnet=False): """ Generates a Base58Check encoding of this key. Args: testnet (bool): True if the key is to be used with testnet, False otherwise. Returns: str: A Base58Check encoded string representing the key. """ b = self.testnet_bytes if testnet else bytes(self) return base58.b58encode_check(b) def _serialize(self, testnet=False): version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key) return (version.to_bytes(length=4, byteorder='big') + bytes([self.depth]) + self.parent_fingerprint + self.index.to_bytes(length=4, byteorder='big') + self.chain_code + key_bytes) def __bytes__(self): return self._serialize() @property def testnet_bytes(self): """ Serialization of the key for testnet. Returns: bytes: A 78-byte serialization of the key, specifically for testnet (i.e. the first 2 bytes will be 0x0435). """ return self._serialize(True) class HDPrivateKey(HDKey, PrivateKeyBase): """ Implements an HD Private Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the 3 static functions (HDPrivateKey.master_key_from_entropy, HDPrivateKey.master_key_from_seed and HDPrivateKey.from_parent) will be used rather than directly constructing an object. Args: key (PrivateKey or PublicKey): The underlying simple private or public key that is used to sign/verify. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDKey: An HDKey object. """ MAINNET_VERSION = 0x0488ADE4 TESTNET_VERSION = 0x04358394 @staticmethod def master_key_from_mnemonic(mnemonic, passphrase=''): """ Generates a master key from a mnemonic. Args: mnemonic (str): The mnemonic sentence representing the seed from which to generate the master key. passphrase (str): Password if one was used. Returns: HDPrivateKey: the master private key. """ return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(mnemonic, passphrase)) @staticmethod def master_key_from_entropy(passphrase='', strength=128): """ Generates a master key from system entropy. Args: strength (int): Amount of entropy desired. This should be a multiple of 32 between 128 and 256. passphrase (str): An optional passphrase for the generated mnemonic string. Returns: HDPrivateKey, str: a tuple consisting of the master private key and a mnemonic string from which the seed can be recovered. """ if strength % 32 != 0: raise ValueError("strength must be a multiple of 32") if strength < 128 or strength > 256: raise ValueError("strength should be >= 128 and <= 256") entropy = os.urandom(strength // 8) m = Mnemonic(language='english') n = m.to_mnemonic(entropy) return HDPrivateKey.master_key_from_seed( Mnemonic.to_seed(n, passphrase)), n @staticmethod def master_key_from_seed(seed): """ Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key. """ S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) @staticmethod def from_parent(parent_key, i): """ Derives a child private key from a parent private key. It is not possible to derive a child private key from a public parent key. Args: parent_private_key (HDPrivateKey): """ if not isinstance(parent_key, HDPrivateKey): raise TypeError("parent_key must be an HDPrivateKey object.") hmac_key = parent_key.chain_code if i & 0x80000000: hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big') else: hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big') I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n if child_key == 0: # Incredibly unlucky choice return None child_depth = parent_key.depth + 1 return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) def __init__(self, key, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): if index < 0 or index > 0xffffffff: raise ValueError("index is out of range: 0 <= index <= 2**32 - 1") private_key = PrivateKey(key) HDKey.__init__(self, private_key, chain_code, index, depth, parent_fingerprint) self._public_key = None @property def public_key(self): """ Returns the public key associated with this private key. Returns: HDPublicKey: The HDPublicKey object that corresponds to this private key. """ if self._public_key is None: self._public_key = HDPublicKey(x=self._key.public_key.point.x, y=self._key.public_key.point.y, chain_code=self.chain_code, index=self.index, depth=self.depth, parent_fingerprint=self.parent_fingerprint) return self._public_key def raw_sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Args: message (bytes): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: ECPointAffine: a raw point (r = pt.x, s = pt.y) which is the signature. """ return self._key.raw_sign(message, do_hash) def sign(self, message, do_hash=True): """ Signs message using the underlying non-extended private key. Note: This differs from `raw_sign()` since it returns a Signature object. Args: message (bytes or str): The message to be signed. If a string is provided it is assumed the encoding is 'ascii' and converted to bytes. If this is not the case, it is up to the caller to convert the string to bytes appropriately and pass in the bytes. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: Signature: The signature corresponding to message. """ return self._key.sign(message, do_hash) def sign_bitcoin(self, message, compressed=False): """ Signs a message using the underlying non-extended private key such that it is compatible with bitcoind, bx, and other Bitcoin clients/nodes/utilities. Note: 0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is prepended to the message before signing. Args: message (bytes or str): Message to be signed. compressed (bool): True if the corresponding public key will be used in compressed format. False if the uncompressed version is used. Returns: bytes: A Base64-encoded byte string of the signed message. The first byte of the encoded message contains information about how to recover the public key. In bitcoind parlance, this is the magic number containing the recovery ID and whether or not the key was compressed or not. (This function always processes full, uncompressed public-keys, so the magic number will always be either 27 or 28). """ return self._key.sign_bitcoin(message, compressed) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the corresponding public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.public_key.hash160() def __int__(self): return int(self.key) class HDPublicKey(HDKey, PublicKeyBase): """ Implements an HD Public Key according to BIP-0032: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki For the vast majority of use cases, the static function HDPublicKey.from_parent() will be used rather than directly constructing an object. Args: x (int): x component of the point representing the public key. y (int): y component of the point representing the public key. chain_code (bytes): The chain code associated with the HD key. depth (int): How many levels below the master node this key is. By definition, depth = 0 for the master node. index (int): A value between 0 and 0xffffffff indicating the child number. Values >= 0x80000000 are considered hardened children. parent_fingerprint (bytes): The fingerprint of the parent node. This is 0x00000000 for the master node. Returns: HDPublicKey: An HDPublicKey object. """ MAINNET_VERSION = 0x0488B21E TESTNET_VERSION = 0x043587CF @staticmethod def from_parent(parent_key, i): """ """ if isinstance(parent_key, HDPrivateKey): # Get child private key return HDPrivateKey.from_parent(parent_key, i).public_key elif isinstance(parent_key, HDPublicKey): if i & 0x80000000: raise ValueError("Can't generate a hardened child key from a parent public key.") else: I = hmac.new(parent_key.chain_code, parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'), hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il >= bitcoin_curve.n: return None temp_priv_key = PrivateKey(parse_Il) Ki = temp_priv_key.public_key.point + parent_key._key.point if Ki.infinity: return None child_depth = parent_key.depth + 1 return HDPublicKey(x=Ki.x, y=Ki.y, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint) else: raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object") def __init__(self, x, y, chain_code, index, depth, parent_fingerprint=b'\x00\x00\x00\x00'): key = PublicKey(x, y) HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint) PublicKeyBase.__init__(self) @property def identifier(self): """ Returns the identifier for the key. A key's identifier and fingerprint are defined as: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers In this case, it will return the RIPEMD-160 hash of the non-extended public key. Returns: bytes: A 20-byte RIPEMD-160 hash. """ return self.hash160() def hash160(self, compressed=True): """ Return the RIPEMD-160 hash of the SHA-256 hash of the non-extended public key. Note: This always returns the hash of the compressed version of the public key. Returns: bytes: RIPEMD-160 byte string. """ return self._key.hash160(True) def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ return self._key.address(True, testnet) def verify(self, message, signature, do_hash=True): """ Verifies that message was appropriately signed. Args: message (bytes): The message to be verified. signature (Signature): A signature object. do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: verified (bool): True if the signature is verified, False otherwise. """ return self._key.verify(message, signature, do_hash) @property def compressed_bytes(self): """ Byte string corresponding to a compressed representation of this public key. Returns: b (bytes): A 33-byte long byte string. """ return self._key.compressed_bytes
33.901889
111
0.590135
[ "MIT" ]
ukor/pywallet
pywallet/utils/ethereum.py
55,633
Python
# -*- coding: utf-8 -*- from tkinter import ( Frame, LabelFrame, Text, Scrollbar, Button, Label, RIDGE, W, E, N, S, FLAT, CENTER, SUNKEN, END, INSERT, ) from tkinter.simpledialog import askinteger, askstring from tkinter.messagebox import askquestion, showinfo, YES, WARNING from pdp8 import pdp8 class Emulatore(object): """ Interfaccia grafica per l'emulatore del pdp8 """ def __init__(self, master, codice, calcolatore, emulatore): """ Inizializza i frame per l'interfaccia dell'emulatore """ self.CD = calcolatore self.codice = codice self.delay = 100 self.master = Frame(master) self.root = emulatore # Memoria Ram self.ram = LabelFrame( self.master, text="Memoria RAM", relief=RIDGE, borderwidth=5, labelanchor="n", pady=5, ) self.ram.rowconfigure(0, weight=1) self.ram.columnconfigure(0, weight=1) self.ram.grid(row=0, column=0, rowspan=3, columnspan=5, sticky=W + E + N + S) # Controlli self.controlli = Frame(self.master, padx=10, pady=10) self.controlli.grid(row=0, column=5, rowspan=1) # Status CD self.registri = LabelFrame( self.master, text="REGISTRI", relief=RIDGE, borderwidth=5, labelanchor="n", padx=25, pady=10, ) self.registri.grid(row=0, column=6, rowspan=1, sticky=W + E + N + S) self.unita = LabelFrame( self.master, text="UC", relief=RIDGE, borderwidth=5, labelanchor="n", padx=10, pady=10, ) self.unita.grid(row=2, column=6, rowspan=1, sticky=N) # Var self.variabili = Frame(self.master) self.variabili.grid(row=2, column=5) self.nstep = LabelFrame( self.variabili, text="Num. Step", relief=RIDGE, borderwidth=5, labelanchor="n", ) self.nstep.grid(row=0, column=5, sticky=W + E) self.delays = LabelFrame( self.variabili, text="Delay", relief=RIDGE, borderwidth=5, labelanchor="n" ) self.delays.grid(row=1, column=5, sticky=W + E) self.tempo = LabelFrame( self.variabili, text="Tempo", relief=RIDGE, borderwidth=5, labelanchor="n" ) self.tempo.grid(row=1, column=6, sticky=W + E) # Unita' di controllo self.unitas = LabelFrame(self.unita, text="S", labelanchor="s", padx=10) self.unitas.grid(row=0, column=0, sticky=N) self.unitaf = LabelFrame(self.unita, text="F", labelanchor="s", padx=10) self.unitaf.grid(row=0, column=1, sticky=N) self.unitar = LabelFrame(self.unita, text="R", labelanchor="s", padx=10) self.unitar.grid(row=0, column=2, sticky=N) self.unitaint = LabelFrame(self.unita, text="Int.", labelanchor="s", padx=10) self.unitaint.grid(row=0, column=3, sticky=N) # Registri self.programc = LabelFrame( self.registri, text="PC", relief=FLAT, labelanchor="e", padx=5 ) self.programc.grid(row=0, column=0, sticky=W + E) self.mar = LabelFrame( self.registri, text="MAR", relief=FLAT, labelanchor="e", padx=5 ) self.mar.grid(row=1, column=0, sticky=W + E) self.mbr = LabelFrame( self.registri, text="MBR", relief=FLAT, labelanchor="e", padx=5 ) self.mbr.grid(row=2, column=0, sticky=W + E) self.lopr = LabelFrame( self.registri, text="OPR", relief=FLAT, labelanchor="e", padx=5 ) self.lopr.grid(row=3, column=0, sticky=W + E) self.vari = LabelFrame( self.registri, text="I", relief=FLAT, labelanchor="e", padx=5 ) self.vari.grid(row=4, column=0, sticky=W + E) self.vare = LabelFrame( self.registri, text="E", relief=FLAT, labelanchor="e", padx=5 ) self.vare.grid(row=5, column=0, sticky=W + E) self.lac = LabelFrame( self.registri, text="AC", relief=FLAT, labelanchor="e", padx=5 ) self.lac.grid(row=6, column=0, sticky=W + E) self.lacint = LabelFrame( self.registri, text="INT AC", relief=FLAT, labelanchor="e", padx=5 ) self.lacint.grid(row=7, column=0, sticky=W + E) self.lachex = LabelFrame( self.registri, text="HEX AC", relief=FLAT, labelanchor="e", padx=5 ) self.lachex.grid(row=8, column=0, sticky=W + E) # Microistruzioni self.micro = LabelFrame( self.master, text="Microistruzioni eseguite", relief=RIDGE, borderwidth=5, labelanchor="n", pady=5, ) self.micro.rowconfigure(0, weight=1) self.micro.columnconfigure(0, weight=1) self.micro.grid(row=3, column=4, rowspan=5, columnspan=5, sticky=W + E + N + S) # Inout self.inout = LabelFrame( self.master, text="Input & Output", relief=RIDGE, borderwidth=5, labelanchor="n", pady=5, ) self.inout.rowconfigure(0, weight=1) self.inout.columnconfigure(0, weight=1) self.inout.grid(row=3, column=0, columnspan=4, sticky=W + E + N + S) self.create_widgets() def create_widgets(self): """ Crea il layout del programma, finestra dell'emulatore """ # Memoria RAM self.Visualizza = Text(self.ram, width=80) self.Visualizzascrollbar = Scrollbar(self.ram) self.Visualizzascrollbar.config(command=self.Visualizza.yview) self.Visualizza.config(yscrollcommand=self.Visualizzascrollbar.set) self.Visualizzascrollbar.grid(row=0, column=1, sticky=N + S) self.Visualizza.grid(row=0, column=0, sticky=W) # INOUT self.Visualizzainout = Text( self.inout, width=62, height=7, fg="green", bg="black" ) self.Visualizzascrollbar_inout = Scrollbar(self.inout) self.Visualizzascrollbar_inout.config(command=self.Visualizzainout.yview) self.Visualizzainout.config(yscrollcommand=self.Visualizzascrollbar_inout.set) self.Visualizzascrollbar_inout.grid(row=0, column=1, sticky=N + S) self.Visualizzainout.grid(row=0, column=0, sticky=W) # Mircroistruzioni self.Visualizzamicro = Text(self.micro, width=55, height=7) self.Visualizzascrollbar_m = Scrollbar(self.micro) self.Visualizzascrollbar_m.config(command=self.Visualizzamicro.yview) self.Visualizzamicro.config(yscrollcommand=self.Visualizzascrollbar_m.set) self.Visualizzascrollbar_m.grid(row=0, column=1, sticky=N + S) self.Visualizzamicro.grid(row=0, column=0, sticky=W) # Pulsanti self.butload = Button( self.controlli, text="LOAD", anchor=CENTER, width=15, command=self.loading, bg="SkyBlue", ) self.butload.grid(row=0, column=0) self.butstep = Button( self.controlli, text="Step", anchor=CENTER, width=15, command=self.step, bg="linen", ) self.butstep.grid(row=1, column=0) self.butminstep = Button( self.controlli, text="miniStep", anchor=CENTER, width=15, command=self.mini_step, bg="linen", ) self.butminstep.grid(row=2, column=0) self.butstep = Button( self.controlli, text="microStep", anchor=CENTER, width=15, command=self.micro_step, bg="linen", ) self.butstep.grid(row=3, column=0) self.butsetstep = Button( self.controlli, text="Set n Step", anchor=CENTER, width=15, command=self.setnstep, bg="linen", ) self.butsetstep.grid(row=4, column=0) self.butsetdelay = Button( self.controlli, text="Set Delay", anchor=CENTER, width=15, command=self.setdelay, bg="linen", ) self.butsetdelay.grid(row=5, column=0) self.butstart = Button( self.controlli, text="START", anchor=CENTER, width=15, command=self.start, bg="DarkOliveGreen3", ) self.butstart.grid(row=6, column=0) self.butreset = Button( self.controlli, text="RESET", anchor=CENTER, width=15, command=self.resetCD, bg="Orange3", ) self.butreset.grid(row=7, column=0) self.butstop = Button( self.controlli, text="STOP", anchor=CENTER, width=15, command=self.stop, bg="IndianRed", ) self.butstop.grid(row=8, column=0) self.butbreak = Button( self.controlli, text="BREAK", anchor=CENTER, width=15, command=self.breakpoint, bg="Magenta2", ) self.butbreak.grid(row=9, column=0) self.butcontinue = Button( self.controlli, text="CONTINUA", anchor=CENTER, width=15, command=self.continua, bg="Magenta2", ) self.butcontinue.grid(row=10, column=0) self.butesegui = Button( self.controlli, text="ESEGUI", anchor=CENTER, width=15, command=self.esegui, bg="Yellow", ) self.butesegui.grid(row=11, column=0) # Labels self.labelprogramc = Label( self.programc, text="00000000000", relief=SUNKEN, bg="red" ) self.labelprogramc.grid() self.labelmar = Label(self.mar, text="00000000000", relief=SUNKEN, bg="yellow") self.labelmar.grid() self.labelmbr = Label(self.mbr, text="000000000000000", relief=SUNKEN) self.labelmbr.grid() self.labelvari = Label(self.vari, text="0", relief=SUNKEN) self.labelvari.grid() self.labelopr = Label(self.lopr, text="000", relief=SUNKEN) self.labelopr.grid() self.labelucs = Label(self.unitas, text="0") self.labelucs.grid() self.labelucf = Label(self.unitaf, text="0") self.labelucf.grid() self.labelucr = Label(self.unitar, text="0") self.labelucr.grid() self.labelucint = Label(self.unitaint, text="0") self.labelucint.grid() self.labelnstep = Label(self.nstep, text="1") self.labelnstep.grid() self.labeldelay = Label(self.delays, text=str(self.delay)) self.labeldelay.grid() self.labeltempo = Label(self.tempo, text=str(self.CD.tempo)) self.labeltempo.grid() self.labelac = Label(self.lac, text="000000000000000", relief=SUNKEN) self.labelac.grid() self.labelacint = Label(self.lacint, text="000000000000000", relief=SUNKEN) self.labelacint.grid() self.labelachex = Label(self.lachex, text="000000000000000", relief=SUNKEN) self.labelachex.grid() self.labelvare = Label(self.vare, text="0", relief=SUNKEN) self.labelvare.grid() def continua(self): """ Continua l'esecuzione dopo un break """ self.CD.S = True self.esegui() def micro_step(self): """ Esegue il metodo step del calcolatore didattico ed aggiorna """ if self.CD.S: self.CD.step(self.root, self.codice) if self.CD.tempo == 0 and not self.CD.F and not self.CD.R: self.CD.previstr = self.CD.nextistr self.aggiornaall() def step(self): """ Esegue il metodo step del calcolatore didattico ed aggiorna """ var = True if self.CD.S and self.CD.nstep > 0: while var and self.CD.S: self.CD.step(self.root, self.codice) if not self.CD.F and not self.CD.R and self.CD.tempo == 0: self.CD.nstep -= 1 self.aggiornaall() self.CD.previstr = self.CD.nextistr var = False if self.CD.nstep > 0: self.butstep.after(self.delay, self.step) else: self.CD.setnstep(1) else: self.CD.setnstep(1) self.aggiornaall() def esegui(self): """ Esegue il programma fino all'arresto della macchina tramite l'istruzione HLT """ while self.CD.S: self.CD.step(self.root, self.codice) if not self.CD.F and not self.CD.R and self.CD.tempo == 0: self.aggiornaall() self.CD.previstr = self.CD.nextistr break if self.CD.S: self.butesegui.after(self.delay, self.esegui) else: self.CD.setnstep(1) self.aggiornaall() def mini_step(self): """ Esegue un singolo ciclo della macchina """ if self.CD.S: for x in range(0, 4): self.CD.step(self.root, self.codice) self.CD.nstep = 1 self.aggiornaall() if self.CD.F is False and self.CD.R is False: self.CD.previstr = self.CD.nextistr def cerca_istr_prev(self): """ Evidenzia di VERDE l'ultima istruzione eseguita """ if self.CD.PC == "000000000000": return try: if self.CD.previstr == "" and int(self.CD.PC, 2) == self.CD.START: return else: pospc = str(3.0 + self.CD.previstr) self.Visualizza.tag_add( "PISTR", str(pospc[:-1] + "16"), str(pospc[:-1] + "end") ) self.Visualizza.tag_config("PISTR", background="green") self.Visualizza.see(pospc) except TypeError: pass # Errore che si ottiene durante il reset del CD # NOTA : METODO NON NECESSARIO NEL PROGRAMMA FINALE # def cerca_istr_corr(self): # """ # Evidenzia di verde l'istruzione che si dovrà eseguire # """ # if self.CD.PC == '000000000000': # return # try: # if int(self.CD.PC,2) == self.CD.START: # Inizio esecuzione del programma # Il PC e l'istruzione da eseguire sono allo stesso 'livello' # pos = str(3.0) # self.Visualizza.tag_add("ISTR", str(pos[0]+'.16'), str(pos[:-1]+'end')) # self.Visualizza.tag_config("ISTR", background = "green") # else: # pospc = str(3.0 + self.CD.nextistr) # self.Visualizza.tag_add("ISTR", str(pospc[:-1]+'16'), str(pospc[:-1]+'end')) # self.Visualizza.tag_config("ISTR", background = "green") # self.Visualizza.see(pospc) # except TypeError: # pass ## Errore che si ottiene durante il reset del CD def cerca_MAR(self): """ Evidenzia di giallo l'indirizzo puntato dal MAR """ try: pos = 3.0 stringa = self.Visualizza.get(str(pos), "end") while ( stringa[:12] != self.CD.MAR and int(pos) < len(self.CD.RAM) + 3 and len(self.CD.RAM) > 0 ): pos += 1 stringa = self.Visualizza.get(str(pos), "end") if int(pos) >= len(self.CD.RAM) + 3: return self.Visualizza.tag_add("MAR", pos, str(float(pos) + 0.12)) self.Visualizza.tag_config("MAR", background="yellow") except TypeError: pass # Errore che si ottiene durante il reset del CD def cerca_PC(self): """ Evidenzia di rosso l'indirizzo puntato da PC """ try: pos = 3.0 stringa = self.Visualizza.get(str(pos), "end") while ( stringa[:12] != self.CD.PC and int(pos) < len(self.CD.RAM) + 3 and len(self.CD.RAM) > 0 ): pos += 1 stringa = self.Visualizza.get(str(pos), "end") if int(pos) >= len(self.CD.RAM) + 3: return self.Visualizza.tag_add("PC", pos, str(float(pos) + 0.12)) self.Visualizza.tag_config("PC", background="red") except TypeError: pass # Errore che si ottiene durante il reset del CD def aggiornaout(self): """ Aggiorna micro e input/output """ self.aggiornamicro() self.aggiornainout() def aggiornamicro(self): """ Aggiorna le microistruzioni eseguite """ self.Visualizzamicro.delete(1.0, END) stringa = self.CD.microistruzioni self.Visualizzamicro.insert(INSERT, stringa) self.Visualizzamicro.see(END) def aggiornainout(self): """ Aggiorna gli input ed output di sistema """ self.Visualizzainout.delete(1.0, END) stringa = self.CD.inout self.Visualizzainout.insert(INSERT, stringa) self.Visualizzainout.see(END) def aggiornaram(self): """ Aggiorna lo stato della RAM """ self.Visualizza.delete(1.0, END) stringa = self.CD.statusRAM() self.Visualizza.insert(INSERT, stringa) self.cerca_MAR() self.cerca_PC() self.cerca_istr_prev() # self.cerca_istr_corr() #Non più necessaria nella versione finale def aggiornareg(self): """ Aggiorna lo stato dei Registri """ self.labelprogramc.config(text=self.CD.PC) self.labelmar.config(text=self.CD.MAR) self.labelmbr.config(text=self.CD.MBR) self.labelac.config(text=self.CD.AC) self.labelacint.config(text=str(self.CD.range(int(self.CD.AC, 2)))) self.labelachex.config(text=str((hex(int(self.CD.AC, 2))[2:].upper())).zfill(4)) self.labelvare.config(text=self.CD.E) self.labelvari.config(text=self.CD.I) self.labelopr.config(text=self.CD.OPR) def aggiornauc(self): """ Aggiorna lo stato dell'unita' di controllo """ if self.CD.S and not self.CD.breaks: self.labelucs.config(text=self.CD.S, bg="green") self.unitas.config(bg="green") elif not self.CD.S and self.CD.breaks: self.labelucs.config(text=self.CD.S, bg="Magenta2") self.unitas.config(bg="Magenta2") else: self.labelucs.config(text=self.CD.S, bg="red") self.unitas.config(bg="red") self.labelucf.config(text=self.CD.F) self.labelucr.config(text=self.CD.R) self.labelucint.config(text=self.CD.Interrupt) self.labeltempo.config(text=self.CD.tempo) def aggiornaall(self): """ Aggiorna tutto """ self.aggiornaram() self.aggiornareg() self.aggiornauc() self.aggiornamicro() self.aggiornaout() self.labelnstep.config(text=self.CD.nstep) def loading(self): """ Carica il contenuto del codice assembly decodificandolo in binario nella RAM """ contenuto = self.codice.Inserisci.get(1.0, END) if len(contenuto) > 1: self.resetCD() if self.CD.carica(contenuto, self) is not None: self.CD.S = 0 self.aggiornaall() def resetCD(self): """ Resetta il calcolatore didattico """ self.CD = pdp8() self.aggiornaall() def start(self): """ Mette la variabile Start (S) ad 1, cioe' True """ self.CD.S = True if self.CD.breaks == True: self.CD.breaks = False self.aggiornauc() def stop(self): """ Mette la variabile Start (S) ad 0, cioe' False """ self.CD.S = False self.aggiornauc() def setnstep(self): """ Setta, in base al valore passato, il numero di cicli da eseguire """ temp = askinteger( "Num Step", "Numero di step da eseguire", initialvalue=1, minvalue=1, parent=self.root, ) if temp is None: self.CD.setnstep(1) else: self.CD.setnstep(temp) self.labelnstep.config(text=self.CD.nstep) def setdelay(self): """ Setta, in base al valore passato, il ritardo di esecuzione. Il valore è espresso in millisecondi, di default = 1000 """ temp = askinteger( "Set Delay", "Ritardo in millisecondi", initialvalue=100, minvalue=1, parent=self.root, ) if temp is not None: self.delay = temp self.labeldelay.config(text=self.delay) def breakpoint(self): """ Setta o elimina i breakpoint dal programma caricato in memoria """ temp = askstring("Cella di memoria", "Indirizzo esadecimale", parent=self.root) if temp is not None: temp = self.CD.binario(int(temp, 16)).zfill(12) self.CD.breakpoint(temp) self.aggiornaram() def exit(self): """ Esce dal programma """ if askquestion("Exit", "Sicuro di voler uscire?", parent=self.master) == YES: self.codice.master.quit() self.codice.master.destroy() else: showinfo( "Suggerimento", """Forse e' meglio fare una pausa!""", icon=WARNING, parent=self.master, )
34.039882
95
0.522413
[ "MIT" ]
MircoT/py-pdp8-tk
Emulatore.py
23,048
Python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import sys from pathlib import Path from typing import Any import nevergrad as ng from hydra.core.override_parser.overrides_parser import OverridesParser from hydra.core.plugins import Plugins from hydra.plugins.sweeper import Sweeper from hydra.test_utils.test_utils import ( TSweepRunner, chdir_plugin_root, run_process, run_python_script, ) from omegaconf import DictConfig, OmegaConf from pytest import mark from hydra_plugins.hydra_nevergrad_sweeper import _impl from hydra_plugins.hydra_nevergrad_sweeper.nevergrad_sweeper import NevergradSweeper chdir_plugin_root() def test_discovery() -> None: assert NevergradSweeper.__name__ in [ x.__name__ for x in Plugins.instance().discover(Sweeper) ] def assert_ng_param_equals(expected: Any, actual: Any) -> None: assert type(expected) == type(actual) if isinstance(actual, ng.p.Choice) or isinstance(actual, ng.p.TransitionChoice): assert sorted(expected.choices.value) == sorted(actual.choices.value) elif isinstance(actual, ng.p.Log) or isinstance(actual, ng.p.Scalar): assert expected.bounds == actual.bounds assert expected.integer == actual.integer else: assert False, f"Unexpected type: {type(actual)}" def get_scalar_with_integer_bounds(lower: int, upper: int, type: Any) -> ng.p.Scalar: scalar = type(lower=lower, upper=upper) scalar.set_integer_casting() assert isinstance(scalar, ng.p.Scalar) return scalar @mark.parametrize( "input, expected", [ ([1, 2, 3], ng.p.Choice([1, 2, 3])), (["1", "2", "3"], ng.p.Choice(["1", "2", "3"])), ({"lower": 1, "upper": 12, "log": True}, ng.p.Log(lower=1, upper=12)), ({"lower": 1, "upper": 12}, ng.p.Scalar(lower=1, upper=12)), ( {"lower": 1, "upper": 12, "integer": True}, get_scalar_with_integer_bounds(1, 12, ng.p.Scalar), ), ( {"lower": 1, "upper": 12, "log": True, "integer": True}, get_scalar_with_integer_bounds(1, 12, ng.p.Log), ), ], ) def test_create_nevergrad_parameter_from_config( input: Any, expected: Any, ) -> None: actual = _impl.create_nevergrad_param_from_config(input) assert_ng_param_equals(expected, actual) @mark.parametrize( "input, expected", [ ("key=choice(1,2)", ng.p.Choice([1, 2])), ("key=choice('hello','world')", ng.p.Choice(["hello", "world"])), ("key=tag(ordered, choice(1,2,3))", ng.p.TransitionChoice([1, 2, 3])), ( "key=tag(ordered, choice('hello','world', 'nevergrad'))", ng.p.TransitionChoice(["hello", "world", "nevergrad"]), ), ("key=range(1,3)", ng.p.Choice([1, 2])), ("key=shuffle(range(1,3))", ng.p.Choice([1, 2])), ("key=range(1,5)", ng.p.Choice([1, 2, 3, 4])), ("key=float(range(1,5))", ng.p.Choice([1.0, 2.0, 3.0, 4.0])), ( "key=int(interval(1,12))", get_scalar_with_integer_bounds(lower=1, upper=12, type=ng.p.Scalar), ), ("key=tag(log, interval(1,12))", ng.p.Log(lower=1, upper=12)), ( "key=tag(log, int(interval(1,12)))", get_scalar_with_integer_bounds(lower=1, upper=12, type=ng.p.Log), ), ], ) def test_create_nevergrad_parameter_from_override( input: Any, expected: Any, ) -> None: parser = OverridesParser.create() parsed = parser.parse_overrides([input])[0] param = _impl.create_nevergrad_parameter_from_override(parsed) assert_ng_param_equals(param, expected) def test_launched_jobs(hydra_sweep_runner: TSweepRunner) -> None: budget = 8 sweep = hydra_sweep_runner( calling_file=None, calling_module="hydra.test_utils.a_module", config_path="configs", config_name="compose.yaml", task_function=None, overrides=[ "hydra/sweeper=nevergrad", "hydra/launcher=basic", f"hydra.sweeper.optim.budget={budget}", # small budget to test fast "hydra.sweeper.optim.num_workers=3", "foo=1,2", "bar=4:8", ], ) with sweep: assert sweep.returns is None @mark.parametrize("with_commandline", (True, False)) def test_nevergrad_example(with_commandline: bool, tmpdir: Path) -> None: budget = 32 if with_commandline else 1 # make a full test only once (faster) cmd = [ "example/my_app.py", "-m", "hydra.sweep.dir=" + str(tmpdir), f"hydra.sweeper.optim.budget={budget}", # small budget to test fast f"hydra.sweeper.optim.num_workers={min(8, budget)}", "hydra.sweeper.optim.seed=12", # avoid random failures ] if with_commandline: cmd += [ "db=mnist,cifar", "batch_size=4,8,12,16", "lr=tag(log, interval(0.001, 1.0))", "dropout=interval(0,1)", ] run_python_script(cmd) returns = OmegaConf.load(f"{tmpdir}/optimization_results.yaml") assert isinstance(returns, DictConfig) assert returns.name == "nevergrad" assert len(returns) == 3 best_parameters = returns.best_evaluated_params assert not best_parameters.dropout.is_integer() if budget > 1: assert best_parameters.batch_size == 4 # this argument should be easy to find # check that all job folders are created last_job = max(int(fp.name) for fp in Path(tmpdir).iterdir() if fp.name.isdigit()) assert last_job == budget - 1 @mark.parametrize("max_failure_rate", (0.5, 1.0)) def test_failure_rate(max_failure_rate: float, tmpdir: Path) -> None: cmd = [ sys.executable, "example/my_app.py", "-m", f"hydra.sweep.dir={tmpdir}", "hydra.sweeper.optim.budget=2", # small budget to test fast "hydra.sweeper.optim.num_workers=2", f"hydra.sweeper.optim.max_failure_rate={max_failure_rate}", "error=true", ] out, err = run_process(cmd, print_error=False, raise_exception=False) assert "Returning infinity for failed experiment" in out error_string = "RuntimeError: cfg.error is True" if max_failure_rate < 1.0: assert error_string in err else: assert error_string not in err
35.005525
86
0.631629
[ "MIT" ]
beerzyp/hydra
plugins/hydra_nevergrad_sweeper/tests/test_nevergrad_sweeper_plugin.py
6,336
Python
#!/usr/bin/env python3 def sum_recursin(numList): if len(numList) == 1: return numList[0] else: return numList[0] + sum_recursin(numList[1:]) if __name__ == "__main__": print(sum_recursin(list(range(1, 101))))
21.818182
53
0.629167
[ "BSD-2-Clause" ]
zzz0072/Python_Exercises
07_RSI/ch03/sum.py
240
Python
#!/content/Python/bin/python3.6 import os import torch from setuptools import setup, find_packages from torch.utils.cpp_extension import BuildExtension, CUDAExtension from compiler_args import nvcc_args, cxx_args setup( name='interpolation_cuda', ext_modules=[ CUDAExtension('interpolation_cuda', [ 'interpolation_cuda.cc', 'interpolation_cuda_kernel.cu' ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) ], cmdclass={ 'build_ext': BuildExtension })
25.285714
67
0.696798
[ "MIT" ]
iBobbyTS/Colab-DAIN
my_package/Interpolation/setup.py
531
Python
# Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Allows us to create and absorb changes (aka Deltas) to elements.""" import functools import json import random import textwrap import numbers import re from datetime import datetime from datetime import date from datetime import time from streamlit import caching from streamlit import config from streamlit import cursor from streamlit import type_util from streamlit.ReportThread import get_report_ctx from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException from streamlit.errors import NoSessionContext from streamlit.file_util import get_encoded_file_data from streamlit.js_number import JSNumber from streamlit.js_number import JSNumberBoundsException from streamlit.proto import Alert_pb2 from streamlit.proto import Balloons_pb2 from streamlit.proto import BlockPath_pb2 from streamlit.proto import ForwardMsg_pb2 from streamlit.proto.NumberInput_pb2 import NumberInput from streamlit.proto.TextInput_pb2 import TextInput from streamlit.logger import get_logger from streamlit.type_util import is_type LOGGER = get_logger(__name__) # Save the type built-in for when we override the name "type". _type = type MAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB # List of Streamlit commands that perform a Pandas "melt" operation on # input dataframes. DELTAS_TYPES_THAT_MELT_DATAFRAMES = ("line_chart", "area_chart", "bar_chart") def _wraps_with_cleaned_sig(wrapped, num_args_to_remove): """Simplify the function signature by removing arguments from it. Removes the first N arguments from function signature (where N is num_args_to_remove). This is useful since function signatures are visible in our user-facing docs, and many methods in DeltaGenerator have arguments that users have no access to. Note that "self" is ignored by default. So to remove both "self" and the next argument you'd pass num_args_to_remove=1. """ # By passing (None, ...), we're removing (arg1, ...) from *args args_to_remove = (None,) * num_args_to_remove fake_wrapped = functools.partial(wrapped, *args_to_remove) fake_wrapped.__doc__ = wrapped.__doc__ fake_wrapped.__name__ = wrapped.__name__ # type: ignore[attr-defined] fake_wrapped.__module__ = wrapped.__module__ return functools.wraps(fake_wrapped) def _with_element(method): """Wrap function and pass a NewElement proto to be filled. This is a function decorator. Converts a method of the with arguments (self, element, ...) into a method with arguments (self, ...). Thus, the instantiation of the element proto object and creation of the element are handled automatically. Parameters ---------- method : callable A DeltaGenerator method with arguments (self, element, ...) Returns ------- callable A new DeltaGenerator method with arguments (self, ...) """ @_wraps_with_cleaned_sig(method, 1) # Remove self and element from sig. def wrapped_method(dg, *args, **kwargs): # Warn if we're called from within an @st.cache function caching.maybe_show_cached_st_function_warning(dg, method.__name__) delta_type = method.__name__ last_index = None if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES and len(args) > 0: data = args[0] if type_util.is_dataframe_compatible(data): data = type_util.convert_anything_to_df(data) if data.index.size > 0: last_index = data.index[-1] else: last_index = None def marshall_element(element): return method(dg, element, *args, **kwargs) return dg._enqueue_new_element_delta(marshall_element, delta_type, last_index) return wrapped_method def _build_duplicate_widget_message(widget_type, user_key=None): if user_key is not None: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with `key='{user_key}'`. To fix this, please make sure that the `key` argument is unique for each `st.{widget_type}` you create. """ ) else: message = textwrap.dedent( """ There are multiple identical `st.{widget_type}` widgets with the same generated key. (When a widget is created, it's assigned an internal key based on its structure. Multiple widgets with an identical structure will result in the same internal key, which causes this error.) To fix this, please pass a unique `key` argument to `st.{widget_type}`. """ ) return message.strip("\n").format(widget_type=widget_type, user_key=user_key) def _set_widget_id(widget_type, element, user_key=None): """Set the widget id. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified key to use for the widget ID. If this is None, we'll generate an ID by hashing the element. """ element_hash = hash(element.SerializeToString()) if user_key is not None: widget_id = "%s-%s" % (user_key, element_hash) else: widget_id = "%s" % element_hash ctx = get_report_ctx() if ctx is not None: added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( _build_duplicate_widget_message(widget_type, user_key) ) el = getattr(element, widget_type) el.id = widget_id def _get_widget_ui_value(widget_type, element, user_key=None): """Get the widget ui_value from the report context. NOTE: This function should be called after the proto has been filled. Parameters ---------- widget_type : str The type of the widget as stored in proto. element : proto The proto of the element user_key : str Optional user-specified string to use as the widget ID. If this is None, we'll generate an ID by hashing the element. Returns ------- ui_value : any The value of the widget set by the client or the default value passed. If the report context doesn't exist, None will be returned. """ _set_widget_id(widget_type, element, user_key) el = getattr(element, widget_type) ctx = get_report_ctx() ui_value = ctx.widgets.get_widget_value(el.id) if ctx else None return ui_value def _get_pandas_index_attr(data, attr): return getattr(data.index, attr, None) class NoValue(object): """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() call to return None. This is needed because `_enqueue_new_element_delta` replaces `None` with a `DeltaGenerator` (for use in non-widget elements). """ pass class DeltaGenerator(object): """Creator of Delta protobuf messages. Parameters ---------- container: BlockPath_pb2.BlockPath or None The root container for this DeltaGenerator. If None, this is a null DeltaGenerator which doesn't print to the app at all (useful for testing). cursor: cursor.AbstractCursor or None """ # The pydoc below is for user consumption, so it doesn't talk about # DeltaGenerator constructor parameters (which users should never use). For # those, see above. def __init__(self, container=BlockPath_pb2.BlockPath.MAIN, cursor=None): """Inserts or updates elements in Streamlit apps. As a user, you should never initialize this object by hand. Instead, DeltaGenerator objects are initialized for you in two places: 1) When you call `dg = st.foo()` for some method "foo", sometimes `dg` is a DeltaGenerator object. You can call methods on the `dg` object to update the element `foo` that appears in the Streamlit app. 2) This is an internal detail, but `st.sidebar` itself is a DeltaGenerator. That's why you can call `st.sidebar.foo()` to place an element `foo` inside the sidebar. """ self._container = container # This is either: # - None: if this is the running DeltaGenerator for a top-level # container. # - RunningCursor: if this is the running DeltaGenerator for a # non-top-level container (created with dg._block()) # - LockedCursor: if this is a locked DeltaGenerator returned by some # other DeltaGenerator method. E.g. the dg returned in dg = # st.text("foo"). # # You should never use this! Instead use self._cursor, which is a # computed property that fetches the right cursor. # self._provided_cursor = cursor def __getattr__(self, name): import streamlit as st streamlit_methods = [ method_name for method_name in dir(st) if callable(getattr(st, method_name)) ] def wrapper(*args, **kwargs): if name in streamlit_methods: if self._container == BlockPath_pb2.BlockPath.SIDEBAR: message = ( "Method `%(name)s()` does not exist for " "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} ) else: message = ( "Method `%(name)s()` does not exist for " "`DeltaGenerator` objects. Did you mean " "`st.%(name)s()`?" % {"name": name} ) else: message = "`%(name)s()` is not a valid Streamlit command." % { "name": name } raise StreamlitAPIException(message) return wrapper @property def _cursor(self): if self._provided_cursor is None: return cursor.get_container_cursor(self._container) else: return self._provided_cursor def _get_coordinates(self): """Returns the element's 4-component location as string like "M.(1,2).3". This function uniquely identifies the element's position in the front-end, which allows (among other potential uses) the MediaFileManager to maintain session-specific maps of MediaFile objects placed with their "coordinates". This way, users can (say) use st.image with a stream of different images, and Streamlit will expire the older images and replace them in place. """ container = self._container # Proto index of container (e.g. MAIN=1) if self._cursor: path = ( self._cursor.path ) # [uint, uint] - "breadcrumbs" w/ ancestor positions index = self._cursor.index # index - element's own position else: # Case in which we have started up in headless mode. path = "(,)" index = "" return "{}.{}.{}".format(container, path, index) def _enqueue_new_element_delta( self, marshall_element, delta_type, last_index=None, element_width=None, element_height=None, ): """Create NewElement delta, fill it, and enqueue it. Parameters ---------- marshall_element : callable Function which sets the fields for a NewElement protobuf. element_width : int or None Desired width for the element element_height : int or None Desired height for the element Returns ------- DeltaGenerator A DeltaGenerator that can be used to modify the newly-created element. """ rv = None # Always call marshall_element() so users can run their script without # Streamlit. msg = ForwardMsg_pb2.ForwardMsg() rv = marshall_element(msg.delta.new_element) msg_was_enqueued = False # Only enqueue message if there's a container. if self._container and self._cursor: msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index if element_width is not None: msg.metadata.element_dimension_spec.width = element_width if element_height is not None: msg.metadata.element_dimension_spec.height = element_height _enqueue_message(msg) msg_was_enqueued = True if msg_was_enqueued: # Get a DeltaGenerator that is locked to the current element # position. output_dg = DeltaGenerator( container=self._container, cursor=self._cursor.get_locked_cursor( delta_type=delta_type, last_index=last_index ), ) else: # If the message was not enqueued, just return self since it's a # no-op from the point of view of the app. output_dg = self return _value_or_dg(rv, output_dg) def _block(self): if self._container is None or self._cursor is None: return self msg = ForwardMsg_pb2.ForwardMsg() msg.delta.new_block = True msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index # Normally we'd return a new DeltaGenerator that uses the locked cursor # below. But in this case we want to return a DeltaGenerator that uses # a brand new cursor for this new block we're creating. block_cursor = cursor.RunningCursor( path=self._cursor.path + (self._cursor.index,) ) block_dg = DeltaGenerator(container=self._container, cursor=block_cursor) # Must be called to increment this cursor's index. self._cursor.get_locked_cursor(None) _enqueue_message(msg) return block_dg @_with_element def balloons(self, element): """Draw celebratory balloons. Example ------- >>> st.balloons() ...then watch your app and get ready for a celebration! """ element.balloons.type = Balloons_pb2.Balloons.DEFAULT element.balloons.execution_id = random.randrange(0xFFFFFFFF) @_with_element def text(self, element, body): """Write fixed-width and preformatted text. Parameters ---------- body : str The string to display. Example ------- >>> st.text('This is some text.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PYxU1kee5ubuhGR11NsnT1 height: 50px """ element.text.body = _clean_text(body) @_with_element def markdown(self, element, body, unsafe_allow_html=False): """Display string formatted as Markdown. Parameters ---------- body : str The string to display as Github-flavored Markdown. Syntax information can be found at: https://github.github.com/gfm. This also supports: * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. For a list of all supported codes, see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. * LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$" must be on their own lines). Supported LaTeX functions are listed at https://katex.org/docs/supported.html. unsafe_allow_html : bool By default, any HTML tags found in the body will be escaped and therefore treated as pure text. This behavior may be turned off by setting this argument to True. That said, we *strongly advise against it*. It is hard to write secure HTML, so by using this argument you may be compromising your users' security. For more information, see: https://github.com/streamlit/streamlit/issues/152 *Also note that `unsafe_allow_html` is a temporary measure and may be removed from Streamlit at any time.* If you decide to turn on HTML anyway, we ask you to please tell us your exact use case here: https://discuss.streamlit.io/t/96 This will help us come up with safe APIs that allow you to do what you want. Example ------- >>> st.markdown('Streamlit is **_really_ cool**.') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS height: 50px """ element.markdown.body = _clean_text(body) element.markdown.allow_html = unsafe_allow_html @_with_element def latex(self, element, body): # This docstring needs to be "raw" because of the backslashes in the # example below. r"""Display mathematical expressions formatted as LaTeX. Supported LaTeX functions are listed at https://katex.org/docs/supported.html. Parameters ---------- body : str or SymPy expression The string or SymPy expression to display as LaTeX. If str, it's a good idea to use raw Python strings since LaTeX uses backslashes a lot. Example ------- >>> st.latex(r''' ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = ... \sum_{k=0}^{n-1} ar^k = ... a \left(\frac{1-r^{n}}{1-r}\right) ... ''') .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 height: 75px """ if type_util.is_sympy_expession(body): import sympy body = sympy.latex(body) element.markdown.body = "$$\n%s\n$$" % _clean_text(body) @_with_element def code(self, element, body, language="python"): """Display a code block with optional syntax highlighting. (This is a convenience wrapper around `st.markdown()`) Parameters ---------- body : str The string to display as code. language : str The language that the code is written in, for syntax highlighting. If omitted, the code will be unstyled. Example ------- >>> code = '''def hello(): ... print("Hello, Streamlit!")''' >>> st.code(code, language='python') .. output:: https://share.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 height: 100px """ markdown = "```%(language)s\n%(body)s\n```" % { "language": language or "", "body": body, } element.markdown.body = _clean_text(markdown) @_with_element def json(self, element, body): """Display object or string as a pretty-printed JSON string. Parameters ---------- body : Object or str The object to print as JSON. All referenced objects should be serializable to JSON as well. If object is a string, we assume it contains serialized JSON. Example ------- >>> st.json({ ... 'foo': 'bar', ... 'baz': 'boz', ... 'stuff': [ ... 'stuff 1', ... 'stuff 2', ... 'stuff 3', ... 'stuff 5', ... ], ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=CTFkMQd89hw3yZbZ4AUymS height: 280px """ import streamlit as st if not isinstance(body, str): try: body = json.dumps(body, default=lambda o: str(type(o))) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=lambda o: str(type(o))) element.json.body = body @_with_element def title(self, element, body): """Display text in title formatting. Each document should have a single `st.title()`, although this is not enforced. Parameters ---------- body : str The text to display. Example ------- >>> st.title('This is a title') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj height: 100px """ element.markdown.body = "# %s" % _clean_text(body) @_with_element def header(self, element, body): """Display text in header formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.header('This is a header') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj height: 100px """ element.markdown.body = "## %s" % _clean_text(body) @_with_element def subheader(self, element, body): """Display text in subheader formatting. Parameters ---------- body : str The text to display. Example ------- >>> st.subheader('This is a subheader') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ height: 100px """ element.markdown.body = "### %s" % _clean_text(body) @_with_element def error(self, element, body): """Display error message. Parameters ---------- body : str The error text to display. Example ------- >>> st.error('This is an error') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.ERROR @_with_element def warning(self, element, body): """Display warning message. Parameters ---------- body : str The warning text to display. Example ------- >>> st.warning('This is a warning') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.WARNING @_with_element def info(self, element, body): """Display an informational message. Parameters ---------- body : str The info text to display. Example ------- >>> st.info('This is a purely informational message') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.INFO @_with_element def success(self, element, body): """Display a success message. Parameters ---------- body : str The success text to display. Example ------- >>> st.success('This is a success message!') """ element.alert.body = _clean_text(body) element.alert.format = Alert_pb2.Alert.SUCCESS @_with_element def help(self, element, obj): """Display object's doc string, nicely formatted. Displays the doc string for this object. Parameters ---------- obj : Object The object whose docstring should be displayed. Example ------- Don't remember how to initialize a dataframe? Try this: >>> st.help(pandas.DataFrame) Want to quickly check what datatype is output by a certain function? Try: >>> x = my_poorly_documented_function() >>> st.help(x) """ import streamlit.elements.doc_string as doc_string doc_string.marshall(element, obj) @_with_element def exception(self, element, exception): """Display an exception. Parameters ---------- exception : Exception The exception to display. Example ------- >>> e = RuntimeError('This is an exception of type RuntimeError') >>> st.exception(e) """ import streamlit.elements.exception_proto as exception_proto exception_proto.marshall(element.exception, exception) def dataframe(self, data=None, width=None, height=None): """Display a dataframe as an interactive table. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to display. If 'data' is a pandas.Styler, it will be used to style its underyling DataFrame. Streamlit supports custom cell values and colors. (It does not support some of the more exotic pandas styling features, like bar charts, hovering, and captions.) Styler support is experimental! width : int or None Desired width of the UI element expressed in pixels. If None, a default width based on the page width is used. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. Examples -------- >>> df = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df) # Same as st.write(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=165mJbzWdAC8Duf8a4tjyQ height: 330px >>> st.dataframe(df, 200, 100) You can also pass a Pandas Styler object to change the style of the rendered DataFrame: >>> df = pd.DataFrame( ... np.random.randn(10, 20), ... columns=('col %d' % i for i in range(20))) ... >>> st.dataframe(df.style.highlight_max(axis=0)) .. output:: https://share.streamlit.io/0.29.0-dV1Y/index.html?id=Hb6UymSNuZDzojUNybzPby height: 285px """ import streamlit.elements.data_frame_proto as data_frame_proto def set_data_frame(delta): data_frame_proto.marshall_data_frame(data, delta.data_frame) return self._enqueue_new_element_delta( set_data_frame, "dataframe", element_width=width, element_height=height ) @_with_element def line_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a line chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict or None Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.line_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=BdxXG3MmrVBfJyqS2R2ki8 height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("line", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def area_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a area chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(20, 3), ... columns=['a', 'b', 'c']) ... >>> st.area_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=Pp65STuFj65cJRDfhGh4Jt height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("area", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def bar_chart( self, element, data=None, width=0, height=0, use_container_width=True ): """Display a bar chart. This is just syntax-sugar around st.altair_chart. The main difference is this command uses the data's own column and indices to figure out the chart's spec. As a result this is easier to use for many "just plot this" scenarios, while being less customizable. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, or dict Data to be plotted. width : int The chart width in pixels. If 0, selects the width automatically. height : int The chart width in pixels. If 0, selects the height automatically. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the width argument. Example ------- >>> chart_data = pd.DataFrame( ... np.random.randn(50, 3), ... columns=["a", "b", "c"]) ... >>> st.bar_chart(chart_data) .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=5U5bjR2b3jFwnJdDfSvuRk height: 220px """ import streamlit.elements.altair as altair chart = altair.generate_chart("bar", data, width, height) altair.marshall(element.vega_lite_chart, chart, use_container_width) @_with_element def vega_lite_chart( self, element, data=None, spec=None, width=0, use_container_width=False, **kwargs, ): """Display a chart using the Vega-Lite library. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Either the data to be plotted or a Vega-Lite spec containing the data (which more closely follows the Vega-Lite API). spec : dict or None The Vega-Lite spec for the chart. If the spec was already passed in the previous argument, this must be set to None. See https://vega.github.io/vega-lite/docs/ for more info. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Vega-Lite spec. Please refer to the Vega-Lite documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Vega-Lite's native `width` value. **kwargs : any Same as spec, but as keywords. Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) >>> >>> st.vega_lite_chart(df, { ... 'mark': {'type': 'circle', 'tooltip': True}, ... 'encoding': { ... 'x': {'field': 'a', 'type': 'quantitative'}, ... 'y': {'field': 'b', 'type': 'quantitative'}, ... 'size': {'field': 'c', 'type': 'quantitative'}, ... 'color': {'field': 'c', 'type': 'quantitative'}, ... }, ... }) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Vega-Lite usage without Streamlit can be found at https://vega.github.io/vega-lite/examples/. Most of those can be easily translated to the syntax shown above. """ import streamlit.elements.vega_lite as vega_lite if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Vega-Lite's native `width` argument as described at https://vega.github.io/vega-lite/docs/size.html" ) vega_lite.marshall( element.vega_lite_chart, data, spec, use_container_width=use_container_width, **kwargs, ) @_with_element def altair_chart(self, element, altair_chart, width=0, use_container_width=False): """Display a chart using the Altair library. Parameters ---------- altair_chart : altair.vegalite.v2.api.Chart The Altair chart object to display. width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Altair spec. Please refer to the Altair documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Altair's native `width` value. Example ------- >>> import pandas as pd >>> import numpy as np >>> import altair as alt >>> >>> df = pd.DataFrame( ... np.random.randn(200, 3), ... columns=['a', 'b', 'c']) ... >>> c = alt.Chart(df).mark_circle().encode( ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c']) >>> >>> st.altair_chart(c, use_container_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5 height: 200px Examples of Altair charts can be found at https://altair-viz.github.io/gallery/. """ import streamlit.elements.altair as altair if width != 0: import streamlit as st st.warning( "The `width` argument in `st.vega_lite_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use altair's native `width` argument as described at https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html" ) altair.marshall( element.vega_lite_chart, altair_chart, use_container_width=use_container_width, ) @_with_element def graphviz_chart( self, element, figure_or_dot, width=0, height=0, use_container_width=False ): """Display a graph using the dagre-d3 library. Parameters ---------- figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, str The Graphlib graph object or dot string to display width : number Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the Graphviz spec. Please refer to the Graphviz documentation for details. height : number Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the Graphviz spec. Please refer to the Graphviz documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. Example ------- >>> import streamlit as st >>> import graphviz as graphviz >>> >>> # Create a graphlib graph object >>> graph = graphviz.Digraph() >>> graph.edge('run', 'intr') >>> graph.edge('intr', 'runbl') >>> graph.edge('runbl', 'run') >>> graph.edge('run', 'kernel') >>> graph.edge('kernel', 'zombie') >>> graph.edge('kernel', 'sleep') >>> graph.edge('kernel', 'runmem') >>> graph.edge('sleep', 'swap') >>> graph.edge('swap', 'runswap') >>> graph.edge('runswap', 'new') >>> graph.edge('runswap', 'runmem') >>> graph.edge('new', 'runmem') >>> graph.edge('sleep', 'runmem') >>> >>> st.graphviz_chart(graph) Or you can render the chart from the graph using GraphViz's Dot language: >>> st.graphviz_chart(''' digraph { run -> intr intr -> runbl runbl -> run run -> kernel kernel -> zombie kernel -> sleep kernel -> runmem sleep -> swap swap -> runswap runswap -> new runswap -> runmem new -> runmem sleep -> runmem } ''') .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=GBn3GXZie5K1kXuBKe4yQL height: 400px """ import streamlit.elements.graphviz_chart as graphviz_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.graphviz` are deprecated and will be removed on 2020-03-04" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.graphviz` is deprecated and will be removed on 2020-03-04" ) graphviz_chart.marshall( element.graphviz_chart, figure_or_dot, use_container_width ) @_with_element def plotly_chart( self, element, figure_or_data, width=0, height=0, use_container_width=False, sharing="streamlit", **kwargs, ): """Display an interactive Plotly chart. Plotly is a charting library for Python. The arguments to this function closely follow the ones for Plotly's `plot()` function. You can find more about Plotly at https://plot.ly/python. Parameters ---------- figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data, dict/list of plotly.graph_objs.Figure/Data, or matplotlib.figure.Figure See https://plot.ly/python/ for examples of graph descriptions. If a Matplotlib Figure, converts it to a Plotly figure and displays it. width : int Deprecated. If != 0 (default), will show an alert. From now on you should set the width directly in the figure. Please refer to the Plotly documentation for details. height : int Deprecated. If != 0 (default), will show an alert. From now on you should set the height directly in the figure. Please refer to the Plotly documentation for details. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. sharing : {'streamlit', 'private', 'secret', 'public'} Use 'streamlit' to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly's servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account. **kwargs Any argument accepted by Plotly's `plot()` function. To show Plotly charts in Streamlit, just call `st.plotly_chart` wherever you would call Plotly's `py.plot` or `py.iplot`. Example ------- The example below comes straight from the examples at https://plot.ly/python: >>> import streamlit as st >>> import plotly.figure_factory as ff >>> import numpy as np >>> >>> # Add histogram data >>> x1 = np.random.randn(200) - 2 >>> x2 = np.random.randn(200) >>> x3 = np.random.randn(200) + 2 >>> >>> # Group data together >>> hist_data = [x1, x2, x3] >>> >>> group_labels = ['Group 1', 'Group 2', 'Group 3'] >>> >>> # Create distplot with custom bin_size >>> fig = ff.create_distplot( ... hist_data, group_labels, bin_size=[.1, .25, .5]) >>> >>> # Plot! >>> st.plotly_chart(fig, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ height: 400px """ # NOTE: "figure_or_data" is the name used in Plotly's .plot() method # for their main parameter. I don't like the name, but it's best to # keep it in sync with what Plotly calls it. import streamlit.elements.plotly_chart as plotly_chart if width != 0 and height != 0: import streamlit as st st.warning( "The `width` and `height` arguments in `st.plotly_chart` are deprecated and will be removed on 2020-03-04. To set these values, you should instead use Plotly's native arguments as described at https://plot.ly/python/setting-graph-size/" ) elif width != 0: import streamlit as st st.warning( "The `width` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the width, you should instead use Plotly's native `width` argument as described at https://plot.ly/python/setting-graph-size/" ) elif height != 0: import streamlit as st st.warning( "The `height` argument in `st.plotly_chart` is deprecated and will be removed on 2020-03-04. To set the height, you should instead use Plotly's native `height` argument as described at https://plot.ly/python/setting-graph-size/" ) plotly_chart.marshall( element.plotly_chart, figure_or_data, use_container_width, sharing, **kwargs ) @_with_element def pyplot(self, element, fig=None, clear_figure=None, **kwargs): """Display a matplotlib.pyplot figure. Parameters ---------- fig : Matplotlib Figure The figure to plot. When this argument isn't specified, which is the usual case, this function will render the global plot. clear_figure : bool If True, the figure will be cleared after being rendered. If False, the figure will not be cleared after being rendered. If left unspecified, we pick a default based on the value of `fig`. * If `fig` is set, defaults to `False`. * If `fig` is not set, defaults to `True`. This simulates Jupyter's approach to matplotlib rendering. **kwargs : any Arguments to pass to Matplotlib's savefig function. Example ------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> arr = np.random.normal(1, 1, size=100) >>> plt.hist(arr, bins=20) >>> >>> st.pyplot() .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=PwzFN7oLZsvb6HDdwdjkRB height: 530px Notes ----- Matplotlib support several different types of "backends". If you're getting an error using Matplotlib with Streamlit, try setting your backend to "TkAgg":: echo "backend: TkAgg" >> ~/.matplotlib/matplotlibrc For more information, see https://matplotlib.org/faq/usage_faq.html. """ import streamlit.elements.pyplot as pyplot pyplot.marshall(self._get_coordinates, element, fig, clear_figure, **kwargs) @_with_element def bokeh_chart(self, element, figure, use_container_width=False): """Display an interactive Bokeh chart. Bokeh is a charting library for Python. The arguments to this function closely follow the ones for Bokeh's `show` function. You can find more about Bokeh at https://bokeh.pydata.org. Parameters ---------- figure : bokeh.plotting.figure.Figure A Bokeh figure to plot. use_container_width : bool If True, set the chart width to the column width. This takes precedence over Bokeh's native `width` value. To show Bokeh charts in Streamlit, just call `st.bokeh_chart` wherever you would call Bokeh's `show`. Example ------- >>> import streamlit as st >>> from bokeh.plotting import figure >>> >>> x = [1, 2, 3, 4, 5] >>> y = [6, 7, 2, 4, 5] >>> >>> p = figure( ... title='simple line example', ... x_axis_label='x', ... y_axis_label='y') ... >>> p.line(x, y, legend='Trend', line_width=2) >>> >>> st.bokeh_chart(p, use_container_width=True) .. output:: https://share.streamlit.io/0.56.0-xTAd/index.html?id=Fdhg51uMbGMLRRxXV6ubzp height: 600px """ import streamlit.elements.bokeh_chart as bokeh_chart bokeh_chart.marshall(element.bokeh_chart, figure, use_container_width) @_with_element def image( self, element, image, caption=None, width=None, use_column_width=False, clamp=False, channels="RGB", format="JPEG", ): """Display an image or list of images. Parameters ---------- image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str] Monochrome image of shape (w,h) or (w,h,1) OR a color image of shape (w,h,3) OR an RGBA image of shape (w,h,4) OR a URL to fetch the image from OR a list of one of the above, to display multiple images. caption : str or list of str Image caption. If displaying multiple images, caption should be a list of captions (one for each image). width : int or None Image width. None means use the image width. use_column_width : bool If True, set the image width to the column width. This takes precedence over the `width` parameter. clamp : bool Clamp image pixel values to a valid range ([0-255] per channel). This is only meaningful for byte array images; the parameter is ignored for image URLs. If this is not set, and an image has an out-of-range value, an error will be thrown. channels : 'RGB' or 'BGR' If image is an nd.array, this parameter denotes the format used to represent color information. Defaults to 'RGB', meaning `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and `image[:, :, 2]` is blue. For images coming from libraries like OpenCV you should set this to 'BGR', instead. format : 'JPEG' or 'PNG' This parameter specifies the image format to use when transferring the image data. Defaults to 'JPEG'. Example ------- >>> from PIL import Image >>> image = Image.open('sunrise.jpg') >>> >>> st.image(image, caption='Sunrise by the mountains', ... use_column_width=True) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=YCFaqPgmgpEz7jwE4tHAzY height: 630px """ from .elements import image_proto if use_column_width: width = -2 elif width is None: width = -1 elif width <= 0: raise StreamlitAPIException("Image width must be positive.") image_proto.marshall_images( self._get_coordinates(), image, caption, width, element.imgs, clamp, channels, format, ) @_with_element def audio(self, element, data, format="audio/wav", start_time=0): """Display an audio player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw audio data, filename, or a URL pointing to the file to load. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. start_time: int The time from which this element should start playing. format : str The mime type for the audio file. Defaults to 'audio/wav'. See https://tools.ietf.org/html/rfc4281 for more info. Example ------- >>> audio_file = open('myaudio.ogg', 'rb') >>> audio_bytes = audio_file.read() >>> >>> st.audio(audio_bytes, format='audio/ogg') .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Dv3M9sA7Cg8gwusgnVNTHb height: 400px """ from .elements import media_proto media_proto.marshall_audio( self._get_coordinates(), element.audio, data, format, start_time ) @_with_element def video(self, element, data, format="video/mp4", start_time=0): """Display a video player. Parameters ---------- data : str, bytes, BytesIO, numpy.ndarray, or file opened with io.open(). Raw video data, filename, or URL pointing to a video to load. Includes support for YouTube URLs. Numpy arrays and raw data formats must include all necessary file headers to match specified file format. format : str The mime type for the video file. Defaults to 'video/mp4'. See https://tools.ietf.org/html/rfc4281 for more info. start_time: int The time from which this element should start playing. Example ------- >>> video_file = open('myvideo.mp4', 'rb') >>> video_bytes = video_file.read() >>> >>> st.video(video_bytes) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=Wba9sZELKfKwXH4nDCCbMv height: 600px .. note:: Some videos may not display if they are encoded using MP4V (which is an export option in OpenCV), as this codec is not widely supported by browsers. Converting your video to H.264 will allow the video to be displayed in Streamlit. See this `StackOverflow post <https://stackoverflow.com/a/49535220/2394542>`_ or this `Streamlit forum post <https://discuss.streamlit.io/t/st-video-doesnt-show-opencv-generated-mp4/3193/2>`_ for more information. """ from .elements import media_proto media_proto.marshall_video( self._get_coordinates(), element.video, data, format, start_time ) @_with_element def button(self, element, label, key=None): """Display a button widget. Parameters ---------- label : str A short label explaining to the user what this button is for. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool If the button was clicked on the last run of the app. Example ------- >>> if st.button('Say hello'): ... st.write('Why hello there') ... else: ... st.write('Goodbye') """ element.button.label = label element.button.default = False ui_value = _get_widget_ui_value("button", element, user_key=key) current_value = ui_value if ui_value is not None else False return current_value @_with_element def checkbox(self, element, label, value=False, key=None): """Display a checkbox widget. Parameters ---------- label : str A short label explaining to the user what this checkbox is for. value : bool Preselect the checkbox when it first renders. This will be cast to bool internally. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- bool Whether or not the checkbox is checked. Example ------- >>> agree = st.checkbox('I agree') >>> >>> if agree: ... st.write('Great!') """ element.checkbox.label = label element.checkbox.default = bool(value) ui_value = _get_widget_ui_value("checkbox", element, user_key=key) current_value = ui_value if ui_value is not None else value return bool(current_value) @_with_element def multiselect( self, element, label, options, default=None, format_func=str, key=None ): """Display a multiselect widget. The multiselect widget starts as empty. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. default: [str] or None List of default values. format_func : function Function to modify the display of selectbox options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the selectbox. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- [str] A list with the selected options Example ------- >>> options = st.multiselect( ... 'What are your favorite colors', ... ['Green', 'Yellow', 'Red', 'Blue'], ... ['Yellow', 'Red']) >>> >>> st.write('You selected:', options) .. note:: User experience can be degraded for large lists of `options` (100+), as this widget is not designed to handle arbitrary text search efficiently. See this `thread <https://discuss.streamlit.io/t/streamlit-loading-column-data-takes-too-much-time/1791>`_ on the Streamlit community forum for more information and `GitHub issue #1059 <https://github.com/streamlit/streamlit/issues/1059>`_ for updates on the issue. """ # Perform validation checks and return indices base on the default values. def _check_and_convert_to_indices(options, default_values): if default_values is None and None not in options: return None if not isinstance(default_values, list): # This if is done before others because calling if not x (done # right below) when x is of type pd.Series() or np.array() throws a # ValueError exception. if is_type(default_values, "numpy.ndarray") or is_type( default_values, "pandas.core.series.Series" ): default_values = list(default_values) elif not default_values: default_values = [default_values] else: default_values = list(default_values) for value in default_values: if value not in options: raise StreamlitAPIException( "Every Multiselect default value must exist in options" ) return [options.index(value) for value in default_values] indices = _check_and_convert_to_indices(options, default) element.multiselect.label = label default_value = [] if indices is None else indices element.multiselect.default[:] = default_value element.multiselect.options[:] = [ str(format_func(option)) for option in options ] ui_value = _get_widget_ui_value("multiselect", element, user_key=key) current_value = ui_value.value if ui_value is not None else default_value return [options[i] for i in current_value] @_with_element def radio(self, element, label, options, index=0, format_func=str, key=None): """Display a radio button widget. Parameters ---------- label : str A short label explaining to the user what this radio group is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the radio options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of radio options. It receives the raw option as an argument and should output the label to be shown for that option. This has no impact on the return value of the radio. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option. Example ------- >>> genre = st.radio( ... "What\'s your favorite movie genre", ... ('Comedy', 'Drama', 'Documentary')) >>> >>> if genre == 'Comedy': ... st.write('You selected comedy.') ... else: ... st.write("You didn\'t select comedy.") """ if not isinstance(index, int): raise StreamlitAPIException( "Radio Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Radio index must be between 0 and length of options" ) element.radio.label = label element.radio.default = index element.radio.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("radio", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def selectbox(self, element, label, options, index=0, format_func=str, key=None): """Display a select widget. Parameters ---------- label : str A short label explaining to the user what this select widget is for. options : list, tuple, numpy.ndarray, or pandas.Series Labels for the select options. This will be cast to str internally by default. index : int The index of the preselected option on first render. format_func : function Function to modify the display of the labels. It receives the option as an argument and its output will be cast to str. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- any The selected option Example ------- >>> option = st.selectbox( ... 'How would you like to be contacted?', ... ('Email', 'Home phone', 'Mobile phone')) >>> >>> st.write('You selected:', option) """ if not isinstance(index, int): raise StreamlitAPIException( "Selectbox Value has invalid type: %s" % type(index).__name__ ) if len(options) > 0 and not 0 <= index < len(options): raise StreamlitAPIException( "Selectbox index must be between 0 and length of options" ) element.selectbox.label = label element.selectbox.default = index element.selectbox.options[:] = [str(format_func(option)) for option in options] ui_value = _get_widget_ui_value("selectbox", element, user_key=key) current_value = ui_value if ui_value is not None else index return ( options[current_value] if len(options) > 0 and options[current_value] is not None else NoValue ) @_with_element def slider( self, element, label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, ): """Display a slider widget. This also allows you to render a range slider by passing a two-element tuple or list as the `value`. Parameters ---------- label : str or None A short label explaining to the user what this slider is for. min_value : int/float or None The minimum permitted value. Defaults to 0 if the value is an int, 0.0 otherwise. max_value : int/float or None The maximum permitted value. Defaults 100 if the value is an int, 1.0 otherwise. value : int/float or a tuple/list of int/float or None The value of the slider when it first renders. If a tuple/list of two values is passed here, then a range slider with those lower and upper bounds is rendered. For example, if set to `(1, 10)` the slider will have a selectable range between 1 and 10. Defaults to min_value. step : int/float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. format : str or None A printf-style format string controlling how the interface should display numbers. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int/float or tuple of int/float The current value of the slider widget. The return type will match the data type of the value parameter. Examples -------- >>> age = st.slider('How old are you?', 0, 130, 25) >>> st.write("I'm ", age, 'years old') And here's an example of a range slider: >>> values = st.slider( ... 'Select a range of values', ... 0.0, 100.0, (25.0, 75.0)) >>> st.write('Values:', values) """ # Set value default. if value is None: value = min_value if min_value is not None else 0 # Ensure that the value is either a single value or a range of values. single_value = isinstance(value, (int, float)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "Slider value should either be an int/float or a list/tuple of " "0 to 2 ints/floats" ) # Ensure that the value is either an int/float or a list/tuple of ints/floats. if single_value: int_value = isinstance(value, int) float_value = isinstance(value, float) else: int_value = all(map(lambda v: isinstance(v, int), value)) float_value = all(map(lambda v: isinstance(v, float), value)) if not int_value and not float_value: raise StreamlitAPIException( "Slider tuple/list components must be of the same type." ) # Set corresponding defaults. if min_value is None: min_value = 0 if int_value else 0.0 if max_value is None: max_value = 100 if int_value else 1.0 if step is None: step = 1 if int_value else 0.01 # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all(map(lambda a: isinstance(a, int), args)) float_args = all(map(lambda a: isinstance(a, float), args)) if not int_args and not float_args: raise StreamlitAPIException( "Slider value arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "Both value and arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that min <= value <= max. if single_value: if not min_value <= value <= max_value: raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) elif len(value) == 2: start, end = value if not min_value <= start <= end <= max_value: raise StreamlitAPIException( "The value and/or arguments are out of range." ) else: value = [min_value, max_value] # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. # (We check `min_value` and `max_value` here; `value` and `step` are # already known to be in the [min_value, max_value] range.) try: if all_ints: JSNumber.validate_int_bounds(min_value, "`min_value`") JSNumber.validate_int_bounds(max_value, "`max_value`") else: JSNumber.validate_float_bounds(min_value, "`min_value`") JSNumber.validate_float_bounds(max_value, "`max_value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) # Set format default. if format is None: if all_ints: format = "%d" else: format = "%0.2f" # It would be great if we could guess the number of decimal places from # the `step` argument, but this would only be meaningful if step were a # decimal. As a possible improvement we could make this function accept # decimals and/or use some heuristics for floats. element.slider.label = label element.slider.format = format element.slider.default[:] = [value] if single_value else value element.slider.min = min_value element.slider.max = max_value element.slider.step = step ui_value = _get_widget_ui_value("slider", element, user_key=key) # Convert the current value to the appropriate type. current_value = ui_value if ui_value is not None else value # Cast ui_value to the same type as the input arguments if ui_value is not None: current_value = getattr(ui_value, "value") # Convert float array into int array if the rest of the arguments # are ints if all_ints: current_value = list(map(int, current_value)) # If there is only one value in the array destructure it into a # single variable current_value = current_value[0] if single_value else current_value return current_value if single_value else tuple(current_value) @_with_element def file_uploader( self, element, label, type=None, encoding="auto", key=None, ): """Display a file uploader widget. By default, uploaded files are limited to 200MB. You can configure this using the `server.maxUploadSize` config option. Parameters ---------- label : str or None A short label explaining to the user what this file uploader is for. type : str or list of str or None Array of allowed extensions. ['png', 'jpg'] By default, all extensions are allowed. encoding : str or None The encoding to use when opening textual files (i.e. non-binary). For example: 'utf-8'. If set to 'auto', will try to guess the encoding. If None, will assume the file is binary. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- BytesIO or StringIO or or list of BytesIO/StringIO or None If no file has been uploaded, returns None. Otherwise, returns the data for the uploaded file(s): - If the file is in a well-known textual format (or if the encoding parameter is set), the file data is a StringIO. - Otherwise the file data is BytesIO. - If multiple_files is True, a list of file data will be returned. Note that BytesIO/StringIO are "file-like", which means you can pass them anywhere where a file is expected! Examples -------- >>> uploaded_file = st.file_uploader("Choose a CSV file", type="csv") >>> if uploaded_file is not None: ... data = pd.read_csv(uploaded_file) ... st.write(data) """ # Don't release this just yet. (When ready to release, turn test back # on at file_uploader_test.py) accept_multiple_files = False if isinstance(type, str): type = [type] element.file_uploader.label = label element.file_uploader.type[:] = type if type is not None else [] element.file_uploader.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) element.file_uploader.multiple_files = accept_multiple_files _set_widget_id("file_uploader", element, user_key=key) files = None ctx = get_report_ctx() if ctx is not None: files = ctx.uploaded_file_mgr.get_files( session_id=ctx.session_id, widget_id=element.file_uploader.id ) if files is None: return NoValue file_datas = [get_encoded_file_data(file.data, encoding) for file in files] return file_datas if accept_multiple_files else file_datas[0] @_with_element def beta_color_picker(self, element, label, value=None, key=None): """Display a color picker widget. Note: This is a beta feature. See https://docs.streamlit.io/en/latest/pre_release_features.html for more information. Parameters ---------- label : str A short label explaining to the user what this input is for. value : str or None The hex value of this widget when it first renders. If None, defaults to black. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The selected color as a hex string. Example ------- >>> color = st.beta_color_picker('Pick A Color', '#00f900') >>> st.write('The current color is', color) """ # set value default if value is None: value = "#000000" # make sure the value is a string if not isinstance(value, str): raise StreamlitAPIException( """ Color Picker Value has invalid type: %s. Expects a hex string like '#00FFAA' or '#000'. """ % type(value).__name__ ) # validate the value and expects a hex string match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value) if not match: raise StreamlitAPIException( """ '%s' is not a valid hex code for colors. Valid ones are like '#00FFAA' or '#000'. """ % value ) element.color_picker.label = label element.color_picker.default = str(value) ui_value = _get_widget_ui_value("color_picker", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_input( self, element, label, value="", max_chars=None, key=None, type="default" ): """Display a single-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. max_chars : int or None Max number of characters allowed in text input. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. type : str The type of the text input. This can be either "default" (for a regular text input), or "password" (for a text input that masks the user's typed value). Defaults to "default". Returns ------- str The current value of the text input widget. Example ------- >>> title = st.text_input('Movie title', 'Life of Brian') >>> st.write('The current movie title is', title) """ element.text_input.label = label element.text_input.default = str(value) if max_chars is not None: element.text_input.max_chars = max_chars if type == "default": element.text_input.type = TextInput.DEFAULT elif type == "password": element.text_input.type = TextInput.PASSWORD else: raise StreamlitAPIException( "'%s' is not a valid text_input type. Valid types are 'default' and 'password'." % type ) ui_value = _get_widget_ui_value("text_input", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def text_area( self, element, label, value="", height=None, max_chars=None, key=None ): """Display a multi-line text input widget. Parameters ---------- label : str A short label explaining to the user what this input is for. value : any The text value of this widget when it first renders. This will be cast to str internally. height : int or None Desired height of the UI element expressed in pixels. If None, a default height is used. max_chars : int or None Maximum number of characters allowed in text area. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- str The current value of the text input widget. Example ------- >>> txt = st.text_area('Text to analyze', ''' ... It was the best of times, it was the worst of times, it was ... the age of wisdom, it was the age of foolishness, it was ... the epoch of belief, it was the epoch of incredulity, it ... was the season of Light, it was the season of Darkness, it ... was the spring of hope, it was the winter of despair, (...) ... ''') >>> st.write('Sentiment:', run_sentiment_analysis(txt)) """ element.text_area.label = label element.text_area.default = str(value) if height is not None: element.text_area.height = height if max_chars is not None: element.text_area.max_chars = max_chars ui_value = _get_widget_ui_value("text_area", element, user_key=key) current_value = ui_value if ui_value is not None else value return str(current_value) @_with_element def time_input(self, element, label, value=None, key=None): """Display a time input widget. Parameters ---------- label : str A short label explaining to the user what this time input is for. value : datetime.time/datetime.datetime The value of this widget when it first renders. This will be cast to str internally. Defaults to the current time. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.time The current value of the time input widget. Example ------- >>> t = st.time_input('Set an alarm for', datetime.time(8, 45)) >>> st.write('Alarm is set for', t) """ # Set value default. if value is None: value = datetime.now().time() # Ensure that the value is either datetime/time if not isinstance(value, datetime) and not isinstance(value, time): raise StreamlitAPIException( "The type of the value should be either datetime or time." ) # Convert datetime to time if isinstance(value, datetime): value = value.time() element.time_input.label = label element.time_input.default = time.strftime(value, "%H:%M") ui_value = _get_widget_ui_value("time_input", element, user_key=key) current_value = ( datetime.strptime(ui_value, "%H:%M").time() if ui_value is not None else value ) return current_value @_with_element def date_input( self, element, label, value=None, min_value=datetime.min, max_value=None, key=None, ): """Display a date input widget. Parameters ---------- label : str A short label explaining to the user what this date input is for. value : datetime.date or datetime.datetime or list/tuple of datetime.date or datetime.datetime or None The value of this widget when it first renders. If a list/tuple with 0 to 2 date/datetime values is provided, the datepicker will allow users to provide a range. Defaults to today as a single-date picker. min_value : datetime.date or datetime.datetime The minimum selectable date. Defaults to datetime.min. max_value : datetime.date or datetime.datetime The maximum selectable date. Defaults to today+10y. key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- datetime.date The current value of the date input widget. Example ------- >>> d = st.date_input( ... "When\'s your birthday", ... datetime.date(2019, 7, 6)) >>> st.write('Your birthday is:', d) """ # Set value default. if value is None: value = datetime.now().date() single_value = isinstance(value, (date, datetime)) range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2) if not single_value and not range_value: raise StreamlitAPIException( "DateInput value should either be an date/datetime or a list/tuple of " "0 - 2 date/datetime values" ) if single_value: value = [value] element.date_input.is_range = range_value value = [v.date() if isinstance(v, datetime) else v for v in value] element.date_input.label = label element.date_input.default[:] = [date.strftime(v, "%Y/%m/%d") for v in value] if isinstance(min_value, datetime): min_value = min_value.date() element.date_input.min = date.strftime(min_value, "%Y/%m/%d") if max_value is None: today = date.today() max_value = date(today.year + 10, today.month, today.day) if isinstance(max_value, datetime): max_value = max_value.date() element.date_input.max = date.strftime(max_value, "%Y/%m/%d") ui_value = _get_widget_ui_value("date_input", element, user_key=key) if ui_value is not None: value = getattr(ui_value, "data") value = [datetime.strptime(v, "%Y/%m/%d").date() for v in value] if single_value: return value[0] else: return tuple(value) @_with_element def number_input( self, element, label, min_value=None, max_value=None, value=NoValue(), step=None, format=None, key=None, ): """Display a numeric input widget. Parameters ---------- label : str or None A short label explaining to the user what this input is for. min_value : int or float or None The minimum permitted value. If None, there will be no minimum. max_value : int or float or None The maximum permitted value. If None, there will be no maximum. value : int or float or None The value of this widget when it first renders. Defaults to min_value, or 0.0 if min_value is None step : int or float or None The stepping interval. Defaults to 1 if the value is an int, 0.01 otherwise. If the value is not specified, the format parameter will be used. format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact the return value. Valid formatters: %d %e %f %g %i key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. Multiple widgets of the same type may not share the same key. Returns ------- int or float The current value of the numeric input widget. The return type will match the data type of the value parameter. Example ------- >>> number = st.number_input('Insert a number') >>> st.write('The current number is ', number) """ if isinstance(value, NoValue): if min_value: value = min_value else: value = 0.0 # We set a float as default int_value = isinstance(value, numbers.Integral) float_value = isinstance(value, float) if value is None: raise StreamlitAPIException( "Default value for number_input should be an int or a float." ) else: if format is None: format = "%d" if int_value else "%0.2f" if format in ["%d", "%u", "%i"] and float_value: # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( "Warning: NumberInput value below is float, but format {} displays as integer.".format( format ) ) if step is None: step = 1 if int_value else 0.01 try: float(format % 2) except (TypeError, ValueError): raise StreamlitAPIException( "Format string for st.number_input contains invalid characters: %s" % format ) # Ensure that all arguments are of the same type. args = [min_value, max_value, step] int_args = all( map( lambda a: ( isinstance(a, numbers.Integral) or isinstance(a, type(None)) ), args, ) ) float_args = all( map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) ) if not int_args and not float_args: raise StreamlitAPIException( "All arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, } ) # Ensure that the value matches arguments' types. all_ints = int_value and int_args all_floats = float_value and float_args if not all_ints and not all_floats: raise StreamlitAPIException( "All numerical arguments must be of the same type." "\n`value` has %(value_type)s type." "\n`min_value` has %(min_type)s type." "\n`max_value` has %(max_type)s type." "\n`step` has %(step_type)s type." % { "value_type": type(value).__name__, "min_type": type(min_value).__name__, "max_type": type(max_value).__name__, "step_type": type(step).__name__, } ) if (min_value and min_value > value) or (max_value and max_value < value): raise StreamlitAPIException( "The default `value` of %(value)s " "must lie between the `min_value` of %(min)s " "and the `max_value` of %(max)s, inclusively." % {"value": value, "min": min_value, "max": max_value} ) # Bounds checks. JSNumber produces human-readable exceptions that # we simply re-package as StreamlitAPIExceptions. try: if all_ints: if min_value is not None: JSNumber.validate_int_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_int_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_int_bounds(step, "`step`") JSNumber.validate_int_bounds(value, "`value`") else: if min_value is not None: JSNumber.validate_float_bounds(min_value, "`min_value`") if max_value is not None: JSNumber.validate_float_bounds(max_value, "`max_value`") if step is not None: JSNumber.validate_float_bounds(step, "`step`") JSNumber.validate_float_bounds(value, "`value`") except JSNumberBoundsException as e: raise StreamlitAPIException(str(e)) number_input = element.number_input number_input.data_type = NumberInput.INT if all_ints else NumberInput.FLOAT number_input.label = label number_input.default = value if min_value is not None: number_input.min = min_value number_input.has_min = True if max_value is not None: number_input.max = max_value number_input.has_max = True if step is not None: number_input.step = step if format is not None: number_input.format = format ui_value = _get_widget_ui_value("number_input", element, user_key=key) return ui_value if ui_value is not None else value @_with_element def progress(self, element, value): """Display a progress bar. Parameters ---------- value : int or float 0 <= value <= 100 for int 0.0 <= value <= 1.0 for float Example ------- Here is an example of a progress bar increasing over time: >>> import time >>> >>> my_bar = st.progress(0) >>> >>> for percent_complete in range(100): ... time.sleep(0.1) ... my_bar.progress(percent_complete + 1) """ # TODO: standardize numerical type checking across st.* functions. if isinstance(value, float): if 0.0 <= value <= 1.0: element.progress.value = int(value * 100) else: raise StreamlitAPIException( "Progress Value has invalid value [0.0, 1.0]: %f" % value ) elif isinstance(value, int): if 0 <= value <= 100: element.progress.value = value else: raise StreamlitAPIException( "Progress Value has invalid value [0, 100]: %d" % value ) else: raise StreamlitAPIException( "Progress Value has invalid type: %s" % type(value).__name__ ) @_with_element def empty(self, element): """Add a placeholder to the app. The placeholder can be filled any time by calling methods on the return value. Example ------- >>> my_placeholder = st.empty() >>> >>> # Now replace the placeholder with some text: >>> my_placeholder.text("Hello world!") >>> >>> # And replace the text with an image: >>> my_placeholder.image(my_image_bytes) """ # The protobuf needs something to be set element.empty.unused = True @_with_element def map(self, element, data=None, zoom=None, use_container_width=True): """Display a map with points on it. This is a wrapper around st.pydeck_chart to quickly create scatterplot charts on top of a map, with auto-centering and auto-zoom. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The data to be plotted. Must have columns called 'lat', 'lon', 'latitude', or 'longitude'. zoom : int Zoom level as specified in https://wiki.openstreetmap.org/wiki/Zoom_levels Example ------- >>> import pandas as pd >>> import numpy as np >>> >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.map(df) .. output:: https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH height: 600px """ import streamlit.elements.map as streamlit_map element.deck_gl_json_chart.json = streamlit_map.to_deckgl_json(data, zoom) element.deck_gl_json_chart.use_container_width = use_container_width @_with_element def deck_gl_chart(self, element, spec=None, use_container_width=False, **kwargs): """Draw a map chart using the Deck.GL library. This API closely follows Deck.GL's JavaScript API (https://deck.gl/#/documentation), with a few small adaptations and some syntax sugar. When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec : dict Keys in this dict can be: - Anything accepted by Deck.GL's top level element, such as "viewport", "height", "width". - "layers": a list of dicts containing information to build a new Deck.GL layer in the map. Each layer accepts the following keys: - "data" : DataFrame The data for the current layer. - "type" : str One of the Deck.GL layer types that are currently supported by Streamlit: ArcLayer, GridLayer, HexagonLayer, LineLayer, PointCloudLayer, ScatterplotLayer, ScreenGridLayer, TextLayer. - Plus anything accepted by that layer type. The exact keys that are accepted depend on the "type" field, above. For example, for ScatterplotLayer you can set fields like "opacity", "filled", "stroked", and so on. In addition, Deck.GL"s documentation for ScatterplotLayer shows you can use a "getRadius" field to individually set the radius of each circle in the plot. So here you would set "getRadius": "my_column" where "my_column" is the name of the column containing the radius data. For things like "getPosition", which expect an array rather than a scalar value, we provide alternates that make the API simpler to use with dataframes: - Instead of "getPosition" : use "getLatitude" and "getLongitude". - Instead of "getSourcePosition" : use "getLatitude" and "getLongitude". - Instead of "getTargetPosition" : use "getTargetLatitude" and "getTargetLongitude". - Instead of "getColor" : use "getColorR", "getColorG", "getColorB", and (optionally) "getColorA", for red, green, blue and alpha. - Instead of "getSourceColor" : use the same as above. - Instead of "getTargetColor" : use "getTargetColorR", etc. use_container_width : bool If True, set the chart width to the column width. This takes precedence over the figure's native `width` value. **kwargs : any Same as spec, but as keywords. Keys are "unflattened" at the underscore characters. For example, foo_bar_baz=123 becomes foo={'bar': {'bar': 123}}. Example ------- >>> st.deck_gl_chart( ... viewport={ ... 'latitude': 37.76, ... 'longitude': -122.4, ... 'zoom': 11, ... 'pitch': 50, ... }, ... layers=[{ ... 'type': 'HexagonLayer', ... 'data': df, ... 'radius': 200, ... 'elevationScale': 4, ... 'elevationRange': [0, 1000], ... 'pickable': True, ... 'extruded': True, ... }, { ... 'type': 'ScatterplotLayer', ... 'data': df, ... }]) ... .. output:: https://share.streamlit.io/0.50.0-td2L/index.html?id=3GfRygWqxuqB5UitZLjz9i height: 530px """ suppress_deprecation_warning = config.get_option( "global.suppressDeprecationWarnings" ) if not suppress_deprecation_warning: import streamlit as st st.warning( """ The `deck_gl_chart` widget is deprecated and will be removed on 2020-05-01. To render a map, you should use `st.pydeck_chart` widget. """ ) import streamlit.elements.deck_gl as deck_gl deck_gl.marshall(element.deck_gl_chart, spec, use_container_width, **kwargs) @_with_element def pydeck_chart(self, element, pydeck_obj=None, use_container_width=False): """Draw a chart using the PyDeck library. This supports 3D maps, point clouds, and more! More info about PyDeck at https://deckgl.readthedocs.io/en/latest/. These docs are also quite useful: - DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs - DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json When using this command, we advise all users to use a personal Mapbox token. This ensures the map tiles used in this chart are more robust. You can do this with the mapbox.token config option. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) See https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more info on how to set config options. Parameters ---------- spec: pydeck.Deck or None Object specifying the PyDeck chart to draw. Example ------- Here's a chart using a HexagonLayer and a ScatterplotLayer on top of the light map style: >>> df = pd.DataFrame( ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], ... columns=['lat', 'lon']) >>> >>> st.pydeck_chart(pdk.Deck( ... map_style='mapbox://styles/mapbox/light-v9', ... initial_view_state=pdk.ViewState( ... latitude=37.76, ... longitude=-122.4, ... zoom=11, ... pitch=50, ... ), ... layers=[ ... pdk.Layer( ... 'HexagonLayer', ... data=df, ... get_position='[lon, lat]', ... radius=200, ... elevation_scale=4, ... elevation_range=[0, 1000], ... pickable=True, ... extruded=True, ... ), ... pdk.Layer( ... 'ScatterplotLayer', ... data=df, ... get_position='[lon, lat]', ... get_color='[200, 30, 0, 160]', ... get_radius=200, ... ), ... ], ... )) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i height: 530px """ import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart deck_gl_json_chart.marshall(element, pydeck_obj, use_container_width) @_with_element def table(self, element, data=None): """Display a static table. This differs from `st.dataframe` in that the table in this case is static: its entire contents are just laid out directly on the page. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None The table data. Example ------- >>> df = pd.DataFrame( ... np.random.randn(10, 5), ... columns=('col %d' % i for i in range(5))) ... >>> st.table(df) .. output:: https://share.streamlit.io/0.25.0-2JkNY/index.html?id=KfZvDMprL4JFKXbpjD3fpq height: 480px """ import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, element.table) def add_rows(self, data=None, **kwargs): """Concatenate a dataframe to the bottom of the current one. Parameters ---------- data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None Table to concat. Optional. **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None The named dataset to concat. Optional. You can only pass in 1 dataset (including the one in the data parameter). Example ------- >>> df1 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table = st.table(df1) >>> >>> df2 = pd.DataFrame( ... np.random.randn(50, 20), ... columns=('col %d' % i for i in range(20))) ... >>> my_table.add_rows(df2) >>> # Now the table shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. You can do the same thing with plots. For example, if you want to add more data to a line chart: >>> # Assuming df1 and df2 from the example above still exist... >>> my_chart = st.line_chart(df1) >>> my_chart.add_rows(df2) >>> # Now the chart shown in the Streamlit app contains the data for >>> # df1 followed by the data for df2. And for plots whose datasets are named, you can pass the data with a keyword argument where the key is the name: >>> my_chart = st.vega_lite_chart({ ... 'mark': 'line', ... 'encoding': {'x': 'a', 'y': 'b'}, ... 'datasets': { ... 'some_fancy_name': df1, # <-- named dataset ... }, ... 'data': {'name': 'some_fancy_name'}, ... }), >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword """ if self._container is None or self._cursor is None: return self if not self._cursor.is_locked: raise StreamlitAPIException("Only existing elements can `add_rows`.") # Accept syntax st.add_rows(df). if data is not None and len(kwargs) == 0: name = "" # Accept syntax st.add_rows(foo=df). elif len(kwargs) == 1: name, data = kwargs.popitem() # Raise error otherwise. else: raise StreamlitAPIException( "Wrong number of arguments to add_rows()." "Command requires exactly one dataset" ) # When doing add_rows on an element that does not already have data # (for example, st.line_chart() without any args), call the original # st.foo() element with new data instead of doing an add_rows(). if ( self._cursor.props["delta_type"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES and self._cursor.props["last_index"] is None ): # IMPORTANT: This assumes delta types and st method names always # match! st_method_name = self._cursor.props["delta_type"] st_method = getattr(self, st_method_name) st_method(data, **kwargs) return data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows( data, self._cursor.props["delta_type"], self._cursor.props["last_index"] ) msg = ForwardMsg_pb2.ForwardMsg() msg.metadata.parent_block.container = self._container msg.metadata.parent_block.path[:] = self._cursor.path msg.metadata.delta_id = self._cursor.index import streamlit.elements.data_frame_proto as data_frame_proto data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data) if name: msg.delta.add_rows.name = name msg.delta.add_rows.has_name = True _enqueue_message(msg) return self def _maybe_melt_data_for_add_rows(data, delta_type, last_index): import pandas as pd import streamlit.elements.data_frame_proto as data_frame_proto # For some delta types we have to reshape the data structure # otherwise the input data and the actual data used # by vega_lite will be different and it will throw an error. if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES: if not isinstance(data, pd.DataFrame): data = type_util.convert_anything_to_df(data) if type(data.index) is pd.RangeIndex: old_step = _get_pandas_index_attr(data, "step") # We have to drop the predefined index data = data.reset_index(drop=True) old_stop = _get_pandas_index_attr(data, "stop") if old_step is None or old_stop is None: raise StreamlitAPIException( "'RangeIndex' object has no attribute 'step'" ) start = last_index + old_step stop = last_index + old_step + old_stop data.index = pd.RangeIndex(start=start, stop=stop, step=old_step) last_index = stop - 1 index_name = data.index.name if index_name is None: index_name = "index" data = pd.melt(data.reset_index(), id_vars=[index_name]) return data, last_index def _clean_text(text): return textwrap.dedent(str(text)).strip() def _value_or_dg(value, dg): """Return either value, or None, or dg. This is needed because Widgets have meaningful return values. This is unlike other elements, which always return None. Then we internally replace that None with a DeltaGenerator instance. However, sometimes a widget may want to return None, and in this case it should not be replaced by a DeltaGenerator. So we have a special NoValue object that gets replaced by None. """ if value is NoValue: return None if value is None: return dg return value def _enqueue_message(msg): """Enqueues a ForwardMsg proto to send to the app.""" ctx = get_report_ctx() if ctx is None: raise NoSessionContext() ctx.enqueue(msg)
35.168652
278
0.576087
[ "Apache-2.0" ]
OakNorthAI/streamlit-base
lib/streamlit/DeltaGenerator.py
112,188
Python
import numpy as np import matplotlib.pyplot as plt ts = np.load("ts.npy") ts -= ts[0] xs = np.load("xs.npy") print("ts shape: {0}".format(np.shape(ts))) print("xs shape: {0}".format(np.shape(xs))) plt.figure() plt.scatter(ts, xs) plt.show()
16.333333
43
0.64898
[ "MIT" ]
ThatSnail/synth_detune
plotter.py
245
Python
class Solution: def numSpecialEquivGroups(self, A: List[str]) -> int: S = set() for s in A: S.add(''.join(sorted(s[::2]) + sorted(s[1::2]))) return len(S)
25
55
0.56
[ "MIT" ]
chopchap/leetcode
Algorithms/String/893. Groups of Special-Equivalent Strings.py
175
Python
from collections import OrderedDict import numpy as np from gym.spaces import Box, Dict from multiworld.envs.env_util import get_stat_in_paths, \ create_stats_ordered_dict, get_asset_full_path from multiworld.core.multitask_env import MultitaskEnv from multiworld.envs.mujoco.sawyer_xyz.push.sawyer_push import SawyerPushEnv from pyquaternion import Quaternion def zangle_to_quat(zangle): """ :param zangle in rad :return: quaternion """ #return (Quaternion(axis=[0,1,0], angle=np.pi) * Quaternion(axis=[0, 0, -1], angle= zangle)).elements return (Quaternion(axis=[0,0,1], angle=np.pi) * Quaternion(axis=[-1, 0, 0], angle= zangle)).elements #return (Quaternion(axis=[1,0,0], angle=np.pi) * Quaternion(axis=[0, -1, 0], angle= zangle)).elements #return (Quaternion(axis=[1,0,0], angle=np.pi) * Quaternion(axis=[-1, 0, 0], angle= zangle)).elements #fail #return (Quaternion(axis=[0,0,1], angle=np.pi) * Quaternion(axis=[0, -1, 0], angle= zangle)).elements class SawyerCoffeeEnv( SawyerPushEnv): def __init__( self, tasks = [{'goal': np.array([0, 1.0, 0.05]), 'height': 0.06, 'obj_init_pos':np.array([0, 0.6, 0.04])}] , hand_type = 'weiss_v2', rewMode = 'orig', **kwargs ): self.quick_init(locals()) self.hand_type = hand_type SawyerPushEnv.__init__( self, tasks = tasks, hand_type = hand_type, **kwargs ) #self.hand_init_pos = [-0.00434313 , 0.76608467 , 0.26081535] self.demo = False self.max_path_length = 120 self.camera_name = 'angled_cam' self.info_logKeys = ['placingDist' , 'pickRew' , 'reachRew' , 'placeRew'] self.rewMode = rewMode self.action_space = Box( np.array([-1, -1, -1, -1]), np.array([1, 1, 1, 1]), ) @property def model_name(self): #return get_asset_full_path('sawyer_xyz/sawyer_pickPlace.xml') #self.reset_mocap_quat = zangle_to_quat(np.pi/2) #this is the reset_mocap_quat for wsg grippers #self.reset_mocap_quat = zangle_to_quat(-np.pi/2) init_quat = [1,0,0,1] self.reset_mocap_quat = (Quaternion(axis= [1,0,0] , angle = -np.pi/2)*Quaternion(init_quat)).elements return get_asset_full_path('sawyer_xyz/sawyer_wsg_coffee.xml') def _reset_hand(self): for _ in range(10): self.data.set_mocap_pos('mocap', self.hand_init_pos) self.data.set_mocap_quat('mocap', self.reset_mocap_quat) self.do_simulation([-1,1], self.frame_skip) def step(self, action): #action = [0,0,0,1] if self.demo: if self.curr_path_length <=20: action = [0 , 1, -1, -1] elif self.curr_path_length <=40: action = [0,1,1,1] elif self.curr_path_length <=70: action = [0,0.9,-0.25,1] elif self.curr_path_length<=100: action = [0,-1 ,1 , -1] noise = 5*1e-1*np.random.uniform(-1,1 , size= 3) noise_4d = np.concatenate([noise , [0]]) action = np.array(action) + noise_4d #object position after picking and placing coffe : [-0.00434313 0.76608467 0.26081535] self.set_xyz_action(action[:3]) self.do_simulation([ action[-1], -action[-1]]) self._set_goal_marker(self._state_goal) ob = self._get_obs() reward , reachReward , pickReward , placeReward , placingDist = self.compute_reward(action, ob) self.curr_path_length +=1 if self.curr_path_length == self.max_path_length: done = True else: done = False return ob, reward, done, OrderedDict({ 'epRew' : reward , 'reachRew': reachReward , 'pickRew': pickReward , 'placeRew': placeReward , 'placingDist': placingDist}) def change_task(self, task): task = {'goal': np.array([0, 1.0, 0.05]), 'height': 0.06, 'obj_init_pos':np.array([0, 0.6, 0.04])} self.grasp = False self.pickCompleted = False if len(task['goal']) == 3: self._state_goal = task['goal'] else: self._state_goal = np.concatenate([task['goal'] , [0.02]]) self._set_goal_marker(self._state_goal) if len(task['obj_init_pos']) == 3: self.obj_init_pos = task['obj_init_pos'] else: self.obj_init_pos = np.concatenate([task['obj_init_pos'] , [0.02]]) #self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget def render(self, mode = 'human'): if mode == 'human': im_size = 500 ; norm = 1.0 self.set_goal_visibility(visible = True) elif mode == 'nn': im_size = self.image_dim ; norm = 255.0 elif mode == 'vis_nn': im_size = self.image_dim ; norm = 1.0 else: raise AssertionError('Mode must be human, nn , or vis_nn') if self.camera_name == 'angled_cam': image = self.get_image(width= im_size , height = im_size , camera_name = 'angled_cam').transpose()/norm image = image.reshape((3, im_size, im_size)) image = np.rot90(image, axes = (-2,-1)) final_image = np.transpose(image , [1,2,0]) if 'nn' in mode: final_image = final_image[:48 ,10 : 74,:] # elif 'human' in mode: # final_image = final_image[:285, 60: 440,:] if self.hide_goal: self.set_goal_visibility(visible = False) return final_image def compute_reward(self, actions, obs): if isinstance(obs, dict): obs = obs['state_observation'] objPos = obs[3:6] rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector') fingerCOM = (rightFinger + leftFinger)/2 placingGoal = self._state_goal graspDist = np.linalg.norm(objPos - fingerCOM) placingDist = np.linalg.norm(objPos - placingGoal) def reachReward(): graspRew = -graspDist if np.linalg.norm(objPos[:2] - fingerCOM[:2]) < 0.02 and fingerCOM[2]<0.05: self.grasp = True return graspRew def pickRew(): if self.pickCompleted: return 10 elif self.grasp: if abs(0.07 - objPos[2])<0.1: self.pickCompleted = True return 1/(abs(0.07 - objPos[2])+1e-1) else: return 0 def placeRew(): if self.pickCompleted: return np.exp(-placingDist) else: return 0 reachReward = reachReward() pickReward = pickRew() placeReward = placeRew() reward = reachReward + pickReward + placeReward return [reward , reachReward , pickReward , placeReward, placingDist] def log_diagnostics(self, paths = None, prefix = '', logger = None): from rllab.misc import logger #if type(paths[0]) == dict: # if isinstance(paths[0]['env_infos'][0] , OrderedDict): # #For SAC # for key in self.info_logKeys: # nested_list = [[i[key] for i in paths[j]['env_infos']] for j in range(len(paths))] # logger.record_tabular(prefix + 'max_'+key, np.mean([max(_list) for _list in nested_list]) ) # logger.record_tabular(prefix + 'last_'+key, np.mean([_list[-1] for _list in nested_list]) ) #For TRPO for key in self.info_logKeys: #logger.record_tabular(prefix+ 'sum_'+key, np.mean([sum(path['env_infos'][key]) for path in paths]) ) logger.record_tabular(prefix+'max_'+key, np.mean([max(path['env_infos'][key]) for path in paths]) ) #logger.record_tabular(prefix+'min_'+key, np.mean([min(path['env_infos'][key]) for path in paths]) ) logger.record_tabular(prefix + 'last_'+key, np.mean([path['env_infos'][key][-1] for path in paths]) ) logger.record_tabular(prefix + 'mean_'+key, np.mean([np.mean(path['env_infos'][key]) for path in paths]) )
32.207965
166
0.668636
[ "MIT" ]
Neo-X/R_multiworld
multiworld/envs/mujoco/sawyer_xyz/pickPlace/sawyer_coffee.py
7,279
Python
from armstrong.core.arm_sections import utils from armstrong.core.arm_sections.models import Section from ._utils import ArmSectionsTestCase, override_settings from .support.models import SimpleCommon def rel_field_names(rels): return [rel.field.name for rel in rels] class get_configured_item_modelTestCase(ArmSectionsTestCase): def test_returns_configured_model(self): m = "%s.FooBar" % self.__class__.__module__ with self.settings(ARMSTRONG_SECTION_ITEM_MODEL=m): module, model = utils.get_module_and_model_names() self.assertEqual(self.__class__.__module__, module) self.assertEqual("FooBar", model) def test_provides_default_value(self): with self.settings(ARMSTRONG_SECTION_ITEM_MODEL=False): module, model = utils.get_module_and_model_names() self.assertEqual("armstrong.apps.content.models", module) self.assertEqual("Content", model) class get_item_model_classTestCase(ArmSectionsTestCase): @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon') def test_returns_specified_class(self): self.assertEqual(SimpleCommon, utils.get_item_model_class()) class get_section_relationsTestCase(ArmSectionsTestCase): @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon') def test_returns_relation_for_foreign_key_only(self): self.assertEqual( ['primary_section'], rel_field_names(utils.get_section_relations(Section))) @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.ComplexCommon') def test_returns_relations_for_foreign_key_and_many_to_many(self): self.assertEqual( ['primary_section', 'related_sections'], rel_field_names(utils.get_section_relations(Section))) @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.MultipleManyToManyModel') def test_returns_relations_for_subclass_with_foreign_key_and_m2m(self): self.assertEqual( ['primary_section', 'related_sections', 'more_sections'], rel_field_names(utils.get_section_relations(Section))) class get_section_many_to_many_relationsTestCase(ArmSectionsTestCase): @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.SimpleCommon') def test_returns_no_relations_for_foreign_key_only(self): self.assertEqual( [], rel_field_names(utils.get_section_many_to_many_relations(Section))) @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.ComplexCommon') def test_returns_relation_for_foreign_key_and_many_to_many(self): self.assertEqual( ['related_sections'], rel_field_names(utils.get_section_many_to_many_relations(Section))) @override_settings(ARMSTRONG_SECTION_ITEM_MODEL='tests.support.models.MultipleManyToManyModel') def test_returns_relations_for_subclass_with_foreign_key_and_m2m(self): self.assertEqual( ['related_sections', 'more_sections'], rel_field_names(utils.get_section_many_to_many_relations(Section)))
44.180556
99
0.762339
[ "Apache-2.0" ]
armstrong/armstrong.core.arm_sections
tests/utils.py
3,181
Python
import setuptools from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="ChromedriverInstall", version="0.0.1", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), entry_points={'console_scripts': ['ChromedriverInstall = ChromedriverInstall.ChromedriverInstall:main']} )
22.263158
108
0.742317
[ "MIT" ]
SagaOfAGuy/Chromedriver-Install
setup.py
423
Python
""" Evolutionary optimization of something """ import random import multiprocessing import numpy as np import numpy.random as npr import matplotlib.pylab as plt from tqdm import tqdm from automata import SnowDrift class EvolutionaryOptimizer(object): """ Optimize! """ def __init__(self): """ Set some parameters """ self.mutation_probability = 0.02 def init(self, size): """ Generate initial population """ raise NotImplementedError def get_fitness(self, obj): """ Compute fitness of individual of population """ raise NotImplementedError def mutate(self, obj): """ Mutate single individual """ raise NotImplementedError def crossover(self, mom, dad): """ Generate offspring from parents """ raise NotImplementedError def run(self, size, max_iter=100): """ Let life begin """ population = self.init(size) res = [] for _ in tqdm(range(max_iter)): pop_fitness = [self.get_fitness(o) for o in population] # crossover best individuals and replace worst with child best_indiv = np.argpartition(pop_fitness, -2)[-2:] mom, dad = population[best_indiv] child = self.crossover(mom, dad) worst_indiv = np.argmin(pop_fitness) population[worst_indiv] = child # apply mutations mut = lambda o: \ self.mutate(o) if random.random() < self.mutation_probability \ else o population = np.array([mut(o) for o in population]) res.append( (np.mean(population, axis=0), np.var(population, axis=0))) return res class SnowdriftOptimizer(EvolutionaryOptimizer): """ Optimize snowdrift game by assuming each individual to be the pair of benefit and cost floats """ def init(self, size): pop = [] for _ in range(size): pop.append((random.uniform(0, 1), random.uniform(0, 1))) return np.array(pop) def crossover(self, mom, dad): return np.mean([mom, dad], axis=0) def mutate(self, obj): sigma = 0.05 return (obj[0] * random.gauss(1, sigma), obj[1] * random.gauss(1, sigma)) def get_fitness(self, obj): # setup system lattice = npr.random_integers(0, 1, size=(2, 1)) model = SnowDrift(lattice) # generate dynamics iter_num = 100 benefit, cost = obj res = list(model.iterate(iter_num, benefit=benefit, cost=cost)) # cut off transient ss = res[-int(iter_num/10):] # compute fitness fit = -np.sum(ss) return fit def plot_runs(runs): """ Plot population evolutions """ ts = range(len(runs[0])) cmap = plt.get_cmap('viridis') for i, r in enumerate(runs): mean, var = zip(*r) bm, cm = zip(*mean) bv, cv = zip(*var) color = cmap(float(i)/len(runs)) plt.errorbar(ts, bm, fmt='-', yerr=bv, c=color) plt.errorbar(ts, cm, fmt='--', yerr=cv, c=color) plt.title('population evolution overview') plt.xlabel('time') plt.ylabel('value') plt.ylim((0, 1)) plt.plot(0, 0, '-', c='black', label='benefit value') plt.plot(0, 0, '--', c='black', label='cost value') plt.legend(loc='best') plt.savefig('result.pdf') plt.show() def work(i): """ Handle one optimization case """ opti = SnowdriftOptimizer() return opti.run(20) def main(): """ Setup environment """ core_num = int(multiprocessing.cpu_count() * 4/5) print('Using %d cores' % core_num) with multiprocessing.Pool(core_num) as p: runs = [i for i in p.imap_unordered(work, range(10))] plot_runs(runs) if __name__ == '__main__': main()
25.032051
81
0.580026
[ "MIT" ]
kpj/PySpaMo
evolutionary_optimization.py
3,905
Python
"""Define SetConfig message.""" # System imports # Third-party imports # Local imports from pyof.v0x01.common.header import Type from pyof.v0x01.controller2switch.common import SwitchConfig __all__ = ('SetConfig',) class SetConfig(SwitchConfig): """Set config message.""" def __init__(self, xid=None, flags=None, miss_send_len=None): """Create a SetConfig with the optional parameters below. Args: xid (int): xid to be used on the message header. flags (~pyof.v0x01.controller2switch.common.ConfigFlag): OFPC_* flags. miss_send_len (int): UBInt16 max bytes of new flow that the datapath should send to the controller. """ super().__init__(xid, flags, miss_send_len) self.header.message_type = Type.OFPT_SET_CONFIG
28.896552
71
0.663484
[ "MIT" ]
Niehaus/python-openflow
pyof/v0x01/controller2switch/set_config.py
838
Python
import numpy as np import scipy.interpolate from numpy import polyint, polymul, polyval from scipy.interpolate import BSpline as SciBSpline, PPoly from ..._utils import _domain_range from ._basis import Basis class BSpline(Basis): r"""BSpline basis. BSpline basis elements are defined recursively as: .. math:: B_{i, 1}(x) = 1 \quad \text{if } t_i \le x < t_{i+1}, \quad 0 \text{ otherwise} .. math:: B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x) + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x) Where k indicates the order of the spline. Implementation details: In order to allow a discontinuous behaviour at the boundaries of the domain it is necessary to placing m knots at the boundaries [RS05]_. This is automatically done so that the user only has to specify a single knot at the boundaries. Attributes: domain_range (tuple): A tuple of length 2 containing the initial and end values of the interval over which the basis can be evaluated. n_basis (int): Number of functions in the basis. order (int): Order of the splines. One greather than their degree. knots (list): List of knots of the spline functions. Examples: Constructs specifying number of basis and order. >>> bss = BSpline(n_basis=8, order=4) If no order is specified defaults to 4 because cubic splines are the most used. So the previous example is the same as: >>> bss = BSpline(n_basis=8) It is also possible to create a BSpline basis specifying the knots. >>> bss = BSpline(knots=[0, 0.2, 0.4, 0.6, 0.8, 1]) Once we create a basis we can evaluate each of its functions at a set of points. >>> bss = BSpline(n_basis=3, order=3) >>> bss([0, 0.5, 1]) array([[[ 1. ], [ 0.25], [ 0. ]], [[ 0. ], [ 0.5 ], [ 0. ]], [[ 0. ], [ 0.25], [ 1. ]]]) And evaluates first derivative >>> deriv = bss.derivative() >>> deriv([0, 0.5, 1]) array([[[-2.], [-1.], [ 0.]], [[ 2.], [ 0.], [-2.]], [[ 0.], [ 1.], [ 2.]]]) References: .. [RS05] Ramsay, J., Silverman, B. W. (2005). *Functional Data Analysis*. Springer. 50-51. """ def __init__(self, domain_range=None, n_basis=None, order=4, knots=None): """Bspline basis constructor. Args: domain_range (tuple, optional): Definition of the interval where the basis defines a space. Defaults to (0,1) if knots are not specified. If knots are specified defaults to the first and last element of the knots. n_basis (int, optional): Number of splines that form the basis. order (int, optional): Order of the splines. One greater that their degree. Defaults to 4 which mean cubic splines. knots (array_like): List of knots of the splines. If domain_range is specified the first and last elements of the knots have to match with it. """ if domain_range is not None: domain_range = _domain_range(domain_range) if len(domain_range) != 1: raise ValueError("Domain range should be unidimensional.") domain_range = domain_range[0] # Knots default to equally space points in the domain_range if knots is None: if n_basis is None: raise ValueError("Must provide either a list of knots or the" "number of basis.") else: knots = tuple(knots) knots = sorted(knots) if domain_range is None: domain_range = (knots[0], knots[-1]) else: if domain_range[0] != knots[0] or domain_range[1] != knots[-1]: raise ValueError("The ends of the knots must be the same " "as the domain_range.") # n_basis default to number of knots + order of the splines - 2 if n_basis is None: n_basis = len(knots) + order - 2 if (n_basis - order + 2) < 2: raise ValueError(f"The number of basis ({n_basis}) minus the " f"order of the bspline ({order}) should be " f"greater than 3.") self._order = order self._knots = None if knots is None else tuple(knots) super().__init__(domain_range=domain_range, n_basis=n_basis) # Checks if self.n_basis != self.order + len(self.knots) - 2: raise ValueError(f"The number of basis ({self.n_basis}) has to " f"equal the order ({self.order}) plus the " f"number of knots ({len(self.knots)}) minus 2.") @property def knots(self): if self._knots is None: return tuple(np.linspace(*self.domain_range[0], self.n_basis - self.order + 2)) else: return self._knots @property def order(self): return self._order def _evaluation_knots(self): """ Get the knots adding m knots to the boundary in order to allow a discontinuous behaviour at the boundaries of the domain [RS05]_. References: .. [RS05] Ramsay, J., Silverman, B. W. (2005). *Functional Data Analysis*. Springer. 50-51. """ return np.array((self.knots[0],) * (self.order - 1) + self.knots + (self.knots[-1],) * (self.order - 1)) def _evaluate(self, eval_points): # Input is scalar eval_points = eval_points[..., 0] # Places m knots at the boundaries knots = self._evaluation_knots() # c is used the select which spline the function splev below computes c = np.zeros(len(knots)) # Initialise empty matrix mat = np.empty((self.n_basis, len(eval_points))) # For each basis computes its value for each evaluation point for i in range(self.n_basis): # write a 1 in c in the position of the spline calculated in each # iteration c[i] = 1 # compute the spline mat[i] = scipy.interpolate.splev(eval_points, (knots, c, self.order - 1)) c[i] = 0 return mat def _derivative_basis_and_coefs(self, coefs, order=1): if order >= self.order: return ( BSpline(n_basis=1, domain_range=self.domain_range, order=1), np.zeros((len(coefs), 1))) deriv_splines = [self._to_scipy_BSpline(coefs[i]).derivative(order) for i in range(coefs.shape[0])] deriv_coefs = [BSpline._from_scipy_BSpline(spline)[1] for spline in deriv_splines] deriv_basis = BSpline._from_scipy_BSpline(deriv_splines[0])[0] return deriv_basis, np.array(deriv_coefs)[:, 0:deriv_basis.n_basis] def rescale(self, domain_range=None): r"""Return a copy of the basis with a new domain range, with the corresponding values rescaled to the new bounds. The knots of the BSpline will be rescaled in the new interval. Args: domain_range (tuple, optional): Definition of the interval where the basis defines a space. Defaults uses the same as the original basis. """ knots = np.array(self.knots, dtype=np.dtype('float')) if domain_range is not None: # Rescales the knots knots -= knots[0] knots *= ((domain_range[1] - domain_range[0] ) / (self.knots[-1] - self.knots[0])) knots += domain_range[0] # Fix possible round error knots[0] = domain_range[0] knots[-1] = domain_range[1] else: # TODO: Allow multiple dimensions domain_range = self.domain_range[0] return BSpline(domain_range, self.n_basis, self.order, knots) def __repr__(self): """Representation of a BSpline basis.""" return (f"{self.__class__.__name__}(domain_range={self.domain_range}, " f"n_basis={self.n_basis}, order={self.order}, " f"knots={self.knots})") def _gram_matrix(self): # Places m knots at the boundaries knots = self._evaluation_knots() # c is used the select which spline the function # PPoly.from_spline below computes c = np.zeros(len(knots)) # Initialise empty list to store the piecewise polynomials ppoly_lst = [] no_0_intervals = np.where(np.diff(knots) > 0)[0] # For each basis gets its piecewise polynomial representation for i in range(self.n_basis): # Write a 1 in c in the position of the spline # transformed in each iteration c[i] = 1 # Gets the piecewise polynomial representation and gets # only the positions for no zero length intervals # This polynomial are defined relatively to the knots # meaning that the column i corresponds to the ith knot. # Let the ith knot be a # Then f(x) = pp(x - a) pp = PPoly.from_spline((knots, c, self.order - 1)) pp_coefs = pp.c[:, no_0_intervals] # We have the coefficients for each interval in coordinates # (x - a), so we will need to subtract a when computing the # definite integral ppoly_lst.append(pp_coefs) c[i] = 0 # Now for each pair of basis computes the inner product after # applying the linear differential operator matrix = np.zeros((self.n_basis, self.n_basis)) for interval in range(len(no_0_intervals)): for i in range(self.n_basis): poly_i = np.trim_zeros(ppoly_lst[i][:, interval], 'f') # Indefinite integral square = polymul(poly_i, poly_i) integral = polyint(square) # Definite integral matrix[i, i] += np.diff(polyval( integral, self.knots[interval: interval + 2] - self.knots[interval]))[0] # The Gram matrix is banded, so not all intervals are used for j in range(i + 1, min(i + self.order, self.n_basis)): poly_j = np.trim_zeros(ppoly_lst[j][:, interval], 'f') # Indefinite integral integral = polyint(polymul(poly_i, poly_j)) # Definite integral matrix[i, j] += np.diff(polyval( integral, self.knots[interval: interval + 2] - self.knots[interval]) )[0] # The matrix is symmetric matrix[j, i] = matrix[i, j] return matrix def _to_scipy_BSpline(self, coefs): knots = np.concatenate(( np.repeat(self.knots[0], self.order - 1), self.knots, np.repeat(self.knots[-1], self.order - 1))) return SciBSpline(knots, coefs, self.order - 1) @staticmethod def _from_scipy_BSpline(bspline): order = bspline.k knots = bspline.t # Remove additional knots at the borders if order != 0: knots = knots[order: -order] coefs = bspline.c domain_range = [knots[0], knots[-1]] return BSpline(domain_range, order=order + 1, knots=knots), coefs @property def inknots(self): """Return number of basis.""" return self.knots[1:len(self.knots) - 1] def __eq__(self, other): return (super().__eq__(other) and self.order == other.order and self.knots == other.knots) def __hash__(self): return hash((super().__hash__(), self.order, self.knots))
35.566952
79
0.5467
[ "BSD-3-Clause" ]
alejandro-ariza/scikit-fda
skfda/representation/basis/_bspline.py
12,484
Python
import json import logging import traceback from google.appengine.api import taskqueue from google.appengine.ext import ndb from helpers.cache_clearer import CacheClearer from helpers.firebase.firebase_pusher import FirebasePusher from helpers.notification_helper import NotificationHelper from helpers.manipulator_base import ManipulatorBase class MatchManipulator(ManipulatorBase): """ Handle Match database writes. """ @classmethod def getCacheKeysAndControllers(cls, affected_refs): return CacheClearer.get_match_cache_keys_and_controllers(affected_refs) @classmethod def postDeleteHook(cls, matches): ''' To run after the match has been deleted. ''' for match in matches: try: FirebasePusher.delete_match(match) except Exception: logging.warning("Enqueuing Firebase delete failed!") @classmethod def postUpdateHook(cls, matches, updated_attr_list, is_new_list): ''' To run after the match has been updated. Send push notifications to subscribed users Only if the match is part of an active event ''' unplayed_match_events = [] for (match, updated_attrs, is_new) in zip(matches, updated_attr_list, is_new_list): event = match.event.get() # Only continue if the event is currently happening if event.within_a_day: if match.has_been_played: if is_new or 'alliances_json' in updated_attrs: # There is a score update for this match, push a notification logging.info("Sending push notifications for {}".format(match.key_name)) try: NotificationHelper.send_match_score_update(match) except Exception, exception: logging.error("Error sending match updates: {}".format(exception)) logging.error(traceback.format_exc()) else: if is_new or (set(['alliances_json', 'time', 'time_string']).symmetric_difference(set(updated_attrs)) != set()): # The match has not been played and we're changing a property that affects the event's schedule # So send a schedule update notification for the parent event if event not in unplayed_match_events: unplayed_match_events.append(event) ''' If we have an unplayed match during an event within a day, send out a schedule update notification ''' for event in unplayed_match_events: try: logging.info("Sending schedule updates for: {}".format(event.key_name)) NotificationHelper.send_schedule_update(event) except Exception, exception: logging.error("Eror sending schedule updates for: {}".format(event.key_name)) ''' Enqueue firebase push ''' event_keys = set() for match in matches: event_keys.add(match.event.id()) try: FirebasePusher.update_match(match) except Exception: logging.warning("Enqueuing Firebase push failed!") # Enqueue task to calculate matchstats for event_key in event_keys: taskqueue.add( url='/tasks/math/do/event_matchstats/' + event_key, method='GET') @classmethod def updateMerge(self, new_match, old_match, auto_union=True): """ Given an "old" and a "new" Match object, replace the fields in the "old" team that are present in the "new" team, but keep fields from the "old" team that are null in the "new" team. """ immutable_attrs = [ "comp_level", "event", "set_number", "match_number", ] # These build key_name, and cannot be changed without deleting the model. attrs = [ "year", "no_auto_update", "time", "time_string", ] json_attrs = [ "alliances_json", "score_breakdown_json", ] list_attrs = [ "team_key_names" ] auto_union_attrs = [ "tba_videos", "youtube_videos" ] old_match._updated_attrs = [] # if not auto_union, treat auto_union_attrs as list_attrs if not auto_union: list_attrs += auto_union_attrs auto_union_attrs = [] for attr in attrs: if getattr(new_match, attr) is not None: if getattr(new_match, attr) != getattr(old_match, attr): setattr(old_match, attr, getattr(new_match, attr)) old_match._updated_attrs.append(attr) old_match.dirty = True for attr in json_attrs: if getattr(new_match, attr) is not None: if (getattr(old_match, attr) is None) or (json.loads(getattr(new_match, attr)) != json.loads(getattr(old_match, attr))): setattr(old_match, attr, getattr(new_match, attr)) # changinging 'attr_json' doesn't clear lazy-loaded '_attr' setattr(old_match, '_{}'.format(attr.replace('_json', '')), None) old_match._updated_attrs.append(attr) old_match.dirty = True for attr in list_attrs: if len(getattr(new_match, attr)) > 0: if set(getattr(new_match, attr)) != set(getattr(old_match, attr)): # lists are treated as sets setattr(old_match, attr, getattr(new_match, attr)) old_match._updated_attrs.append(attr) old_match.dirty = True for attr in auto_union_attrs: old_set = set(getattr(old_match, attr)) new_set = set(getattr(new_match, attr)) unioned = old_set.union(new_set) if unioned != old_set: setattr(old_match, attr, list(unioned)) old_match._updated_attrs.append(attr) old_match.dirty = True return old_match
38.865031
136
0.581058
[ "MIT" ]
bvisness/the-blue-alliance
helpers/match_manipulator.py
6,335
Python
"""Test config validators.""" from datetime import timedelta, datetime, date import enum import os from socket import _GLOBAL_DEFAULT_TIMEOUT from unittest.mock import Mock, patch import pytest import voluptuous as vol import homeassistant.helpers.config_validation as cv def test_boolean(): """Test boolean validation.""" schema = vol.Schema(cv.boolean) for value in ('T', 'negative', 'lock'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('true', 'On', '1', 'YES', 'enable', 1, True): assert schema(value) for value in ('false', 'Off', '0', 'NO', 'disable', 0, False): assert not schema(value) def test_latitude(): """Test latitude validation.""" schema = vol.Schema(cv.latitude) for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('-89', 89, '12.34'): schema(value) def test_longitude(): """Test longitude validation.""" schema = vol.Schema(cv.longitude) for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('-179', 179, '12.34'): schema(value) def test_port(): """Test TCP/UDP network port.""" schema = vol.Schema(cv.port) for value in ('invalid', None, -1, 0, 80000, '81000'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('1000', 21, 24574): schema(value) def test_isfile(): """Validate that the value is an existing file.""" schema = vol.Schema(cv.isfile) fake_file = 'this-file-does-not.exist' assert not os.path.isfile(fake_file) for value in ('invalid', None, -1, 0, 80000, fake_file): with pytest.raises(vol.Invalid): schema(value) # patching methods that allow us to fake a file existing # with write access with patch('os.path.isfile', Mock(return_value=True)), \ patch('os.access', Mock(return_value=True)): schema('test.txt') def test_url(): """Test URL.""" schema = vol.Schema(cv.url) for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io', 'http://??,**', 'https://??,**'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ('http://localhost', 'https://localhost/test/index.html', 'http://home-assistant.io', 'http://home-assistant.io/test/', 'https://community.home-assistant.io/'): assert schema(value) def test_platform_config(): """Test platform config validation.""" options = ( {}, {'hello': 'world'}, ) for value in options: with pytest.raises(vol.MultipleInvalid): cv.PLATFORM_SCHEMA(value) options = ( {'platform': 'mqtt'}, {'platform': 'mqtt', 'beer': 'yes'}, ) for value in options: cv.PLATFORM_SCHEMA(value) def test_ensure_list(): """Test ensure_list.""" schema = vol.Schema(cv.ensure_list) assert [] == schema(None) assert [1] == schema(1) assert [1] == schema([1]) assert ['1'] == schema('1') assert ['1'] == schema(['1']) assert [{'1': '2'}] == schema({'1': '2'}) def test_entity_id(): """Test entity ID validation.""" schema = vol.Schema(cv.entity_id) with pytest.raises(vol.MultipleInvalid): schema('invalid_entity') assert schema('sensor.LIGHT') == 'sensor.light' def test_entity_ids(): """Test entity ID validation.""" schema = vol.Schema(cv.entity_ids) options = ( 'invalid_entity', 'sensor.light,sensor_invalid', ['invalid_entity'], ['sensor.light', 'sensor_invalid'], ['sensor.light,sensor_invalid'], ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ( [], ['sensor.light'], 'sensor.light' ) for value in options: schema(value) assert schema('sensor.LIGHT, light.kitchen ') == [ 'sensor.light', 'light.kitchen' ] def test_entity_domain(): """Test entity domain validation.""" schema = vol.Schema(cv.entity_domain('sensor')) options = ( 'invalid_entity', 'cover.demo', ) for value in options: with pytest.raises(vol.MultipleInvalid): print(value) schema(value) assert schema('sensor.LIGHT') == 'sensor.light' def test_entities_domain(): """Test entities domain validation.""" schema = vol.Schema(cv.entities_domain('sensor')) options = ( None, '', 'invalid_entity', ['sensor.light', 'cover.demo'], ['sensor.light', 'sensor_invalid'], ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ( 'sensor.light', ['SENSOR.light'], ['sensor.light', 'sensor.demo'] ) for value in options: schema(value) assert schema('sensor.LIGHT, sensor.demo ') == [ 'sensor.light', 'sensor.demo' ] assert schema(['sensor.light', 'SENSOR.demo']) == [ 'sensor.light', 'sensor.demo' ] def test_ensure_list_csv(): """Test ensure_list_csv.""" schema = vol.Schema(cv.ensure_list_csv) options = ( None, 12, [], ['string'], 'string1,string2' ) for value in options: schema(value) assert schema('string1, string2 ') == [ 'string1', 'string2' ] def test_event_schema(): """Test event_schema validation.""" options = ( {}, None, { 'event_data': {}, }, { 'event': 'state_changed', 'event_data': 1, }, ) for value in options: with pytest.raises(vol.MultipleInvalid): cv.EVENT_SCHEMA(value) options = ( {'event': 'state_changed'}, {'event': 'state_changed', 'event_data': {'hello': 'world'}}, ) for value in options: cv.EVENT_SCHEMA(value) def test_icon(): """Test icon validation.""" schema = vol.Schema(cv.icon) for value in (False, 'work'): with pytest.raises(vol.MultipleInvalid): schema(value) schema('mdi:work') schema('custom:prefix') def test_time_period(): """Test time_period validation.""" schema = vol.Schema(cv.time_period) options = ( None, '', 'hello:world', '12:', '12:34:56:78', {}, {'wrong_key': -10} ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ( '8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}, 1, '5' ) for value in options: schema(value) assert timedelta(seconds=180) == schema('180') assert timedelta(hours=23, minutes=59) == schema('23:59') assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15') def test_service(): """Test service validation.""" schema = vol.Schema(cv.service) with pytest.raises(vol.MultipleInvalid): schema('invalid_turn_on') schema('homeassistant.turn_on') def test_service_schema(): """Test service_schema validation.""" options = ( {}, None, { 'service': 'homeassistant.turn_on', 'service_template': 'homeassistant.turn_on' }, { 'data': {'entity_id': 'light.kitchen'}, }, { 'service': 'homeassistant.turn_on', 'data': None }, { 'service': 'homeassistant.turn_on', 'data_template': { 'brightness': '{{ no_end' } }, ) for value in options: with pytest.raises(vol.MultipleInvalid): cv.SERVICE_SCHEMA(value) options = ( {'service': 'homeassistant.turn_on'}, { 'service': 'homeassistant.turn_on', 'entity_id': 'light.kitchen', }, { 'service': 'homeassistant.turn_on', 'entity_id': ['light.kitchen', 'light.ceiling'], }, ) for value in options: cv.SERVICE_SCHEMA(value) def test_slug(): """Test slug validation.""" schema = vol.Schema(cv.slug) for value in (None, 'hello world'): with pytest.raises(vol.MultipleInvalid): schema(value) for value in (12345, 'hello'): schema(value) def test_string(): """Test string validation.""" schema = vol.Schema(cv.string) with pytest.raises(vol.Invalid): schema(None) with pytest.raises(vol.Invalid): schema([]) with pytest.raises(vol.Invalid): schema({}) for value in (True, 1, 'hello'): schema(value) def test_temperature_unit(): """Test temperature unit validation.""" schema = vol.Schema(cv.temperature_unit) with pytest.raises(vol.MultipleInvalid): schema('K') schema('C') schema('F') def test_x10_address(): """Test x10 addr validator.""" schema = vol.Schema(cv.x10_address) with pytest.raises(vol.Invalid): schema('Q1') schema('q55') schema('garbage_addr') schema('a1') schema('C11') def test_template(): """Test template validator.""" schema = vol.Schema(cv.template) for value in (None, '{{ partial_print }', '{% if True %}Hello', ['test']): with pytest.raises(vol.Invalid, message='{} not considered invalid'.format(value)): schema(value) options = ( 1, 'Hello', '{{ beer }}', '{% if 1 == 1 %}Hello{% else %}World{% endif %}', ) for value in options: schema(value) def test_template_complex(): """Test template_complex validator.""" schema = vol.Schema(cv.template_complex) for value in (None, '{{ partial_print }', '{% if True %}Hello'): with pytest.raises(vol.MultipleInvalid): schema(value) options = ( 1, 'Hello', '{{ beer }}', '{% if 1 == 1 %}Hello{% else %}World{% endif %}', {'test': 1, 'test2': '{{ beer }}'}, ['{{ beer }}', 1] ) for value in options: schema(value) def test_time_zone(): """Test time zone validation.""" schema = vol.Schema(cv.time_zone) with pytest.raises(vol.MultipleInvalid): schema('America/Do_Not_Exist') schema('America/Los_Angeles') schema('UTC') def test_date(): """Test date validation.""" schema = vol.Schema(cv.date) for value in ['Not a date', '23:42', '2016-11-23T18:59:08']: with pytest.raises(vol.Invalid): schema(value) schema(datetime.now().date()) schema('2016-11-23') def test_time(): """Test date validation.""" schema = vol.Schema(cv.time) for value in ['Not a time', '2016-11-23', '2016-11-23T18:59:08']: with pytest.raises(vol.Invalid): schema(value) schema(datetime.now().time()) schema('23:42:00') schema('23:42') def test_datetime(): """Test date time validation.""" schema = vol.Schema(cv.datetime) for value in [date.today(), 'Wrong DateTime', '2016-11-23']: with pytest.raises(vol.MultipleInvalid): schema(value) schema(datetime.now()) schema('2016-11-23T18:59:08') def test_deprecated(caplog): """Test deprecation log.""" schema = vol.Schema({ 'venus': cv.boolean, 'mars': cv.boolean }) deprecated_schema = vol.All( cv.deprecated('mars'), schema ) deprecated_schema({'venus': True}) # pylint: disable=len-as-condition assert len(caplog.records) == 0 deprecated_schema({'mars': True}) assert len(caplog.records) == 1 assert caplog.records[0].name == __name__ assert ("The 'mars' option (with value 'True') is deprecated, " "please remove it from your configuration.") in caplog.text def test_key_dependency(): """Test key_dependency validator.""" schema = vol.Schema(cv.key_dependency('beer', 'soda')) options = ( {'beer': None} ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ( {'beer': None, 'soda': None}, {'soda': None}, {} ) for value in options: schema(value) def test_has_at_least_one_key(): """Test has_at_least_one_key validator.""" schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda')) for value in (None, [], {}, {'wine': None}): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ({'beer': None}, {'soda': None}): schema(value) def test_enum(): """Test enum validator.""" class TestEnum(enum.Enum): """Test enum.""" value1 = "Value 1" value2 = "Value 2" schema = vol.Schema(cv.enum(TestEnum)) with pytest.raises(vol.Invalid): schema('value3') def test_socket_timeout(): # pylint: disable=invalid-name """Test socket timeout validator.""" schema = vol.Schema(cv.socket_timeout) with pytest.raises(vol.Invalid): schema(0.0) with pytest.raises(vol.Invalid): schema(-1) assert _GLOBAL_DEFAULT_TIMEOUT == schema(None) assert schema(1) == 1.0 def test_matches_regex(): """Test matches_regex validator.""" schema = vol.Schema(cv.matches_regex('.*uiae.*')) with pytest.raises(vol.Invalid): schema(1.0) with pytest.raises(vol.Invalid): schema(" nrtd ") test_str = "This is a test including uiae." assert(schema(test_str) == test_str) def test_is_regex(): """Test the is_regex validator.""" schema = vol.Schema(cv.is_regex) with pytest.raises(vol.Invalid): schema("(") with pytest.raises(vol.Invalid): schema({"a dict": "is not a regex"}) valid_re = ".*" schema(valid_re) def test_comp_entity_ids(): """Test config validation for component entity IDs.""" schema = vol.Schema(cv.comp_entity_ids) for valid in ('ALL', 'all', 'AlL', 'light.kitchen', ['light.kitchen'], ['light.kitchen', 'light.ceiling'], []): schema(valid) for invalid in (['light.kitchen', 'not-entity-id'], '*', ''): with pytest.raises(vol.Invalid): schema(invalid)
24.199667
79
0.569582
[ "Apache-2.0" ]
AidasK/home-assistant
tests/helpers/test_config_validation.py
14,544
Python
#!/usr/bin/env python from wsgiref.simple_server import make_server import sys import os import json import urlparse import json EXTRA_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__))) if EXTRA_DIR not in sys.path: sys.path.append(EXTRA_DIR) import dao try: import requests except ImportError: print("requests not found - please run: pip install requests") sys.exit() def get_tags(image_keys): mysql = dao.MySQLDAO() tag_dao = dao.TagDAO(mysql) return tag_dao.get_tags_for_image_keys(image_keys) def application(environ, start_response): # the environment variable CONTENT_LENGTH may be empty or missing try: request_body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 response_body = "" request_body = environ['wsgi.input'].read(request_body_size) keys = json.loads(request_body) tags = get_tags(keys) response_body = json.dumps(tags) status = '200 OK' response_headers = [ ('Content-Type', 'text/json'), ('Content-Length', str(len(response_body))) ] start_response(status, response_headers) return [response_body] if __name__ == '__main__': image_key = 'YqfCPKsmgXh40NPmI_wuEQ' print str(get_tags([image_key]))
23.75
69
0.688722
[ "MIT" ]
simonmikkelsen/mapillary-browser
api/getTags.py
1,330
Python
import io import os import ssl import boto3 import gzip import json import time import uuid import unittest import datetime import requests from io import BytesIO from pytz import timezone from botocore.exceptions import ClientError from six.moves.urllib.request import Request, urlopen from localstack import config from localstack.utils import testutil from localstack.constants import TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY from localstack.utils.aws import aws_stack from localstack.services.s3 import s3_listener from localstack.utils.common import ( short_uid, retry, get_service_protocol, to_bytes, safe_requests, to_str, new_tmp_file, rm_rf, load_file) from localstack.services.awslambda.lambda_utils import LAMBDA_RUNTIME_PYTHON36 TEST_BUCKET_NAME_WITH_POLICY = 'test-bucket-policy-1' TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION = 'test_queue_for_bucket_notification_1' TEST_BUCKET_WITH_VERSIONING = 'test-bucket-versioning-1' TEST_BUCKET_NAME_2 = 'test-bucket-2' TEST_KEY_2 = 'test-key-2' TEST_GET_OBJECT_RANGE = 17 THIS_FOLDER = os.path.dirname(os.path.realpath(__file__)) TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_triggered_by_s3.py') TEST_LAMBDA_PYTHON_DOWNLOAD_FROM_S3 = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_triggered_by_sqs_download_s3_file.py') BATCH_DELETE_BODY = """ <Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Object> <Key>%s</Key> </Object> <Object> <Key>%s</Key> </Object> </Delete> """ class PutRequest(Request): """ Class to handle putting with urllib """ def __init__(self, *args, **kwargs): return Request.__init__(self, *args, **kwargs) def get_method(self, *args, **kwargs): return 'PUT' class TestS3(unittest.TestCase): def setUp(self): self.s3_client = aws_stack.connect_to_service('s3') self.sqs_client = aws_stack.connect_to_service('sqs') def test_create_bucket_via_host_name(self): body = """<?xml version="1.0" encoding="UTF-8"?> <CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <LocationConstraint>eu-central-1</LocationConstraint> </CreateBucketConfiguration>""" headers = aws_stack.mock_aws_request_headers('s3') bucket_name = 'test-%s' % short_uid() headers['Host'] = '%s.s3.amazonaws.com' % bucket_name response = requests.put(config.TEST_S3_URL, data=body, headers=headers, verify=False) self.assertEquals(response.status_code, 200) response = self.s3_client.get_bucket_location(Bucket=bucket_name) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) self.assertIn('LocationConstraint', response) def test_bucket_policy(self): # create test bucket self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_POLICY) # put bucket policy policy = { 'Version': '2012-10-17', 'Statement': { 'Action': ['s3:GetObject'], 'Effect': 'Allow', 'Resource': 'arn:aws:s3:::bucketName/*', 'Principal': { 'AWS': ['*'] } } } response = self.s3_client.put_bucket_policy( Bucket=TEST_BUCKET_NAME_WITH_POLICY, Policy=json.dumps(policy) ) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204) # retrieve and check policy config saved_policy = self.s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy'] self.assertEqual(json.loads(saved_policy), policy) def test_s3_put_object_notification(self): bucket_name = 'notif-%s' % short_uid() key_by_path = 'key-by-hostname' key_by_host = 'key-by-host' queue_url, queue_attributes = self._create_test_queue() self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'}) # put an object where the bucket_name is in the path obj = self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body='something') # put an object where the bucket_name is in the host # it doesn't care about the authorization header as long as it's present headers = {'Host': '{}.s3.amazonaws.com'.format(bucket_name), 'authorization': 'some_token'} url = '{}/{}'.format(config.TEST_S3_URL, key_by_host) # verify=False must be set as this test fails on travis because of an SSL error non-existent locally response = requests.put(url, data='something else', headers=headers, verify=False) self.assertTrue(response.ok) self.assertEqual(self._get_test_queue_message_count(queue_url), '2') response = self.sqs_client.receive_message(QueueUrl=queue_url) messages = [json.loads(to_str(m['Body'])) for m in response['Messages']] record = messages[0]['Records'][0] self.assertIsNotNone(record['s3']['object']['versionId']) self.assertEquals(record['s3']['object']['versionId'], obj['VersionId']) # clean up self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'}) self.sqs_client.delete_queue(QueueUrl=queue_url) self._delete_bucket(bucket_name, [key_by_path, key_by_host]) def test_s3_upload_fileobj_with_large_file_notification(self): bucket_name = 'notif-large-%s' % short_uid() queue_url, queue_attributes = self._create_test_queue() self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) # has to be larger than 64MB to be broken up into a multipart upload file_size = 75000000 large_file = self.generate_large_file(file_size) download_file = new_tmp_file() try: self.s3_client.upload_file(Bucket=bucket_name, Key=large_file.name, Filename=large_file.name) self.assertEqual(self._get_test_queue_message_count(queue_url), '1') # ensure that the first message's eventName is ObjectCreated:CompleteMultipartUpload messages = self.sqs_client.receive_message(QueueUrl=queue_url, AttributeNames=['All']) message = json.loads(messages['Messages'][0]['Body']) self.assertEqual(message['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload') # download the file, check file size self.s3_client.download_file(Bucket=bucket_name, Key=large_file.name, Filename=download_file) self.assertEqual(os.path.getsize(download_file), file_size) # clean up self.sqs_client.delete_queue(QueueUrl=queue_url) self._delete_bucket(bucket_name, large_file.name) finally: # clean up large files large_file.close() rm_rf(large_file.name) rm_rf(download_file) def test_s3_multipart_upload_with_small_single_part(self): # In a multipart upload "Each part must be at least 5 MB in size, except the last part." # https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html bucket_name = 'notif-large-%s' % short_uid() key_by_path = 'key-by-hostname' queue_url, queue_attributes = self._create_test_queue() self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) # perform upload self._perform_multipart_upload(bucket=bucket_name, key=key_by_path, zip=True) self.assertEqual(self._get_test_queue_message_count(queue_url), '1') # clean up self.sqs_client.delete_queue(QueueUrl=queue_url) self._delete_bucket(bucket_name, [key_by_path]) def test_invalid_range_error(self): bucket_name = 'range-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.put_object(Bucket=bucket_name, Key='steve', Body=b'is awesome') try: self.s3_client.get_object(Bucket=bucket_name, Key='steve', Range='bytes=1024-4096') except ClientError as e: self.assertEqual(e.response['Error']['Code'], 'InvalidRange') # clean up self._delete_bucket(bucket_name, ['steve']) def test_range_key_not_exists(self): bucket_name = 'range-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.create_bucket(Bucket=bucket_name) with self.assertRaises(ClientError) as ctx: self.s3_client.get_object(Bucket=bucket_name, Key='key', Range='bytes=1024-4096') self.assertIn('NoSuchKey', str(ctx.exception)) # clean up self._delete_bucket(bucket_name) def test_upload_key_with_hash_prefix(self): bucket_name = 'hash-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) key_name = '#key-with-hash-prefix' content = b'test 123' self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body=content) downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=key_name) downloaded_content = to_str(downloaded_object['Body'].read()) self.assertEqual(to_str(downloaded_content), to_str(content)) # clean up self._delete_bucket(bucket_name, [key_name]) with self.assertRaises(Exception): self.s3_client.head_object(Bucket=bucket_name, Key=key_name) def test_s3_multipart_upload_acls(self): bucket_name = 'test-bucket-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name, ACL='public-read') def check_permissions(key, expected_perms): grants = self.s3_client.get_object_acl(Bucket=bucket_name, Key=key)['Grants'] grants = [g for g in grants if 'AllUsers' in g.get('Grantee', {}).get('URI', '')] self.assertEquals(len(grants), 1) permissions = grants[0]['Permission'] permissions = permissions if isinstance(permissions, list) else [permissions] self.assertEquals(len(permissions), expected_perms) # perform uploads (multipart and regular) and check ACLs self.s3_client.put_object(Bucket=bucket_name, Key='acl-key0', Body='something') check_permissions('acl-key0', 1) self._perform_multipart_upload(bucket=bucket_name, key='acl-key1') check_permissions('acl-key1', 1) self._perform_multipart_upload(bucket=bucket_name, key='acl-key2', acl='public-read-write') check_permissions('acl-key2', 2) def test_s3_presigned_url_upload(self): key_by_path = 'key-by-hostname' bucket_name = 'notif-large-%s' % short_uid() queue_url, queue_attributes = self._create_test_queue() self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) self._perform_presigned_url_upload(bucket=bucket_name, key=key_by_path) self.assertEqual(self._get_test_queue_message_count(queue_url), '1') # clean up self.sqs_client.delete_queue(QueueUrl=queue_url) self._delete_bucket(bucket_name, [key_by_path]) def test_s3_get_response_default_content_type(self): # When no content type is provided by a PUT request # 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body='something') url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': object_key}) # get object and assert headers response = requests.get(url, verify=False) self.assertEqual(response.headers['content-type'], 'binary/octet-stream') # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_put_presigned_url_metadata(self): # Object metadata should be passed as query params via presigned URL # https://github.com/localstack/localstack/issues/544 bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) metadata = { 'foo': 'bar' } # put object object_key = 'key-by-hostname' url = client.generate_presigned_url( 'put_object', Params={'Bucket': bucket_name, 'Key': object_key, 'Metadata': metadata}) # append metadata manually to URL (this is not easily possible with boto3, as "Metadata" cannot # be passed to generate_presigned_url, and generate_presigned_post works differently) # get object and assert metadata is present response = requests.put(url, data='content 123', verify=False) self.assertLess(response.status_code, 400) # response body should be empty, see https://github.com/localstack/localstack/issues/1317 self.assertEqual('', to_str(response.content)) response = client.head_object(Bucket=bucket_name, Key=object_key) self.assertEquals('bar', response.get('Metadata', {}).get('foo')) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_put_metadata_underscores(self): # Object metadata keys should accept keys with underscores # https://github.com/localstack/localstack/issues/1790 bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # put object object_key = 'key-with-metadata' metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'} self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo') metadata_saved = self.s3_client.head_object(Bucket=bucket_name, Key=object_key)['Metadata'] self.assertEqual(metadata, metadata_saved) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_object_expiry(self): # handle s3 object expiry # https://github.com/localstack/localstack/issues/1685 bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # put object object_key = 'key-with-metadata' metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'} self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo', Expires=datetime.datetime.now(timezone('GMT')) - datetime.timedelta(hours=1)) # try to fetch an object which is already expired self.assertRaises(Exception, self.s3_client.get_object, Bucket=bucket_name, Key=object_key.lower()) self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo', Expires=datetime.datetime.now(timezone('GMT')) + datetime.timedelta(hours=1)) # try to fetch has not been expired yet. resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key) self.assertIn('Expires', resp) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_presigned_url_expired(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object and CORS configuration object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body='something') # get object and assert headers url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=2 ) # retrieving it before expiry resp = requests.get(url, verify=False) self.assertEqual(resp.status_code, 200) self.assertEqual(to_str(resp.content), 'something') # waiting for the url to expire time.sleep(3) resp = requests.get(url, verify=False) self.assertEqual(resp.status_code, 403) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=120 ) resp = requests.get(url, verify=False) self.assertEqual(resp.status_code, 200) self.assertEqual(to_str(resp.content), 'something') # clean up self._delete_bucket(bucket_name, [object_key]) def test_bucket_availability(self): bucket_name = 'test_bucket_lifecycle' returned_empty_lifecycle = s3_listener.get_lifecycle(bucket_name) self.assertRegexpMatches(returned_empty_lifecycle._content, r'The bucket does not exist') response = s3_listener.get_replication(bucket_name) self.assertRegexpMatches(response._content, r'The bucket does not exist') response = s3_listener.get_encryption(bucket_name) self.assertRegexpMatches(response._content, r'The bucket does not exist') response = s3_listener.get_object_lock(bucket_name) self.assertRegexpMatches(response._content, r'The bucket does not exist') def test_range_header_body_length(self): # Test for https://github.com/localstack/localstack/issues/1952 object_key = 'sample.bin' bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) chunk_size = 1024 with io.BytesIO() as data: data.write(os.urandom(chunk_size * 2)) data.seek(0) self.s3_client.upload_fileobj(data, bucket_name, object_key) range_header = 'bytes=0-%s' % (chunk_size - 1) resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key, Range=range_header) content = resp['Body'].read() self.assertEquals(len(content), chunk_size) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_get_response_content_type_same_as_upload_and_range(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object object_key = '/foo/bar/key-by-hostname' content_type = 'foo/bar; charset=utf-8' client.put_object(Bucket=bucket_name, Key=object_key, Body='something ' * 20, ContentType=content_type) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': object_key} ) # get object and assert headers response = requests.get(url, verify=False) self.assertEqual(response.headers['content-type'], content_type) # get object using range query and assert headers response = requests.get(url, headers={'Range': 'bytes=0-18'}, verify=False) self.assertEqual(response.headers['content-type'], content_type) self.assertEqual(to_str(response.content), 'something something') # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_get_get_object_headers(self): object_key = 'sample.bin' bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) chunk_size = 1024 with io.BytesIO() as data: data.write(os.urandom(chunk_size * 2)) data.seek(0) self.s3_client.upload_fileobj(data, bucket_name, object_key) range_header = 'bytes=0-%s' % (chunk_size - 1) resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key, Range=range_header) self.assertEqual(resp.get('AcceptRanges'), 'bytes') self.assertIn('x-amz-request-id', resp['ResponseMetadata']['HTTPHeaders']) self.assertIn('x-amz-id-2', resp['ResponseMetadata']['HTTPHeaders']) self.assertIn('content-language', resp['ResponseMetadata']['HTTPHeaders']) self.assertIn('cache-control', resp['ResponseMetadata']['HTTPHeaders']) self.assertIn('content-encoding', resp['ResponseMetadata']['HTTPHeaders']) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_head_response_content_length_same_as_upload(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) body = 'something body \n \n\r' # put object object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body=body, ContentType='text/html; charset=utf-8') url = client.generate_presigned_url( 'head_object', Params={'Bucket': bucket_name, 'Key': object_key} ) # get object and assert headers response = requests.head(url, verify=False) self.assertEqual(response.headers['content-length'], str(len(body))) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_put_object_chunked_newlines(self): # Test for https://github.com/localstack/localstack/issues/1571 bucket_name = 'test-bucket-%s' % short_uid() object_key = 'data' self.s3_client.create_bucket(Bucket=bucket_name) body = 'Hello\r\n\r\n\r\n\r\n' headers = """ Authorization: %s Content-Type: audio/mpeg X-Amz-Content-Sha256: STREAMING-AWS4-HMAC-SHA256-PAYLOAD X-Amz-Date: 20190918T051509Z X-Amz-Decoded-Content-Length: %s """ % (aws_stack.mock_aws_request_headers('s3')['Authorization'], len(body)) headers = dict([[field.strip() for field in pair.strip().split(':', 1)] for pair in headers.strip().split('\n')]) data = ('d;chunk-signature=af5e6c0a698b0192e9aa5d9083553d4d241d81f69ec62b184d05c509ad5166af\r\n' + '%s\r\n0;chunk-signature=f2a50a8c0ad4d212b579c2489c6d122db88d8a0d0b987ea1f3e9d081074a5937\r\n') % body # put object url = '%s/%s/%s' % (config.TEST_S3_URL, bucket_name, object_key) req = PutRequest(url, to_bytes(data), headers) urlopen(req, context=ssl.SSLContext()).read() # get object and assert content length downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=object_key) download_file_object = to_str(downloaded_object['Body'].read()) self.assertEqual(len(str(download_file_object)), len(body)) self.assertEqual(str(download_file_object), body) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_put_object_on_presigned_url(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) body = 'something body' # get presigned URL object_key = 'test-presigned-key' url = client.generate_presigned_url( 'put_object', Params={'Bucket': bucket_name, 'Key': object_key} ) # put object response = requests.put(url, data=body, verify=False) self.assertEqual(response.status_code, 200) # get object and compare results downloaded_object = client.get_object(Bucket=bucket_name, Key=object_key) download_object = downloaded_object['Body'].read() self.assertEqual(to_str(body), to_str(download_object)) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_post_object_on_presigned_post(self): bucket_name = 'test-presigned-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) body = 'something body' # get presigned URL object_key = 'test-presigned-post-key' presigned_request = client.generate_presigned_post( Bucket=bucket_name, Key=object_key, ExpiresIn=60) # put object files = {'file': body} response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False) self.assertIn(response.status_code, [200, 204]) # get object and compare results downloaded_object = client.get_object(Bucket=bucket_name, Key=object_key) download_object = downloaded_object['Body'].read() self.assertEqual(to_str(body), to_str(download_object)) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_presigned_post_success_action_status_201_response(self): bucket_name = 'test-presigned-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) body = 'something body' # get presigned URL object_key = 'key-${filename}' presigned_request = client.generate_presigned_post( Bucket=bucket_name, Key=object_key, Fields={'success_action_status': 201}, ExpiresIn=60 ) files = {'file': ('my-file', body)} response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False) # test expected_response_content = """ <PostResponse> <Location>{location}</Location> <Bucket>{bucket}</Bucket> <Key>{key}</Key> <ETag>{etag}</ETag> </PostResponse> """.format( location='http://localhost/key-my-file', bucket=bucket_name, key='key-my-file', etag='d41d8cd98f00b204e9800998ecf8427f' ) self.assertEqual(response.status_code, 201) self.assertEqual(response.text, expected_response_content) # clean up self._delete_bucket(bucket_name, ['key-my-file']) def test_s3_presigned_post_expires(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # presign a post with a short expiry time object_key = 'test-presigned-post-key' presigned_request = client.generate_presigned_post( Bucket=bucket_name, Key=object_key, ExpiresIn=2 ) # sleep so it expires time.sleep(3) # attempt to use the presigned request files = {'file': 'file content'} response = requests.post(presigned_request['url'], data=presigned_request['fields'], files=files, verify=False) self.assertEqual(response.status_code, 400) self.assertTrue('ExpiredToken' in response.text) # clean up self._delete_bucket(bucket_name) def test_s3_delete_response_content_length_zero(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) for encoding in None, 'gzip': # put object object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body='something', ContentType='text/html; charset=utf-8') url = client.generate_presigned_url( 'delete_object', Params={'Bucket': bucket_name, 'Key': object_key} ) # get object and assert headers headers = {} if encoding: headers['Accept-Encoding'] = encoding response = requests.delete(url, headers=headers, verify=False) self.assertEqual(response.headers['content-length'], '0', f'Unexpected response Content-Length for encoding {encoding}') # clean up self._delete_bucket(bucket_name, [object_key]) def test_delete_object_tagging(self): bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name, ACL='public-read') object_key = 'test-key-tagging' self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something') # get object and assert response url = '%s/%s/%s' % (config.TEST_S3_URL, bucket_name, object_key) response = requests.get(url, verify=False) self.assertEqual(response.status_code, 200) # delete object tagging self.s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key) # assert that the object still exists response = requests.get(url, verify=False) self.assertEqual(response.status_code, 200) # clean up self._delete_bucket(bucket_name, [object_key]) def test_delete_non_existing_keys(self): bucket_name = 'test-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) object_key = 'test-key-nonexistent' self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something') response = self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}, {'Key': 'dummy1'}, {'Key': 'dummy2'}]}) self.assertEqual(len(response['Deleted']), 3) self.assertNotIn('Errors', response) # clean up self._delete_bucket(bucket_name) def test_bucket_exists(self): # Test setup bucket = 'test-bucket-%s' % short_uid() s3_client = aws_stack.connect_to_service('s3') s3_client.create_bucket(Bucket=bucket) s3_client.put_bucket_cors( Bucket=bucket, CORSConfiguration={ 'CORSRules': [{'AllowedMethods': ['GET', 'POST', 'PUT', 'DELETE'], 'AllowedOrigins': ['localhost']}] } ) response = s3_client.get_bucket_cors(Bucket=bucket) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) # Cleanup s3_client.delete_bucket(Bucket=bucket) def test_s3_uppercase_names(self): # bucket name should be case-insensitive bucket_name = 'TestUpperCase-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # key name should be case-sensitive object_key = 'camelCaseKey' self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Body='something') self.s3_client.get_object(Bucket=bucket_name, Key=object_key) self.assertRaises(Exception, self.s3_client.get_object, Bucket=bucket_name, Key=object_key.lower()) def test_s3_get_response_headers(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object and CORS configuration object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body='something') client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ 'CORSRules': [{ 'AllowedMethods': ['GET', 'PUT', 'POST'], 'AllowedOrigins': ['*'], 'ExposeHeaders': [ 'ETag', 'x-amz-version-id' ] }] }, ) # get object and assert headers url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': object_key} ) response = requests.get(url, verify=False) self.assertEquals(response.headers['Access-Control-Expose-Headers'], 'ETag,x-amz-version-id') # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_get_response_header_overrides(self): # Signed requests may include certain header overrides in the querystring # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object object_key = 'key-by-hostname' client.put_object(Bucket=bucket_name, Key=object_key, Body='something') # get object and assert headers expiry_date = 'Wed, 21 Oct 2015 07:28:00 GMT' url = client.generate_presigned_url( 'get_object', Params={ 'Bucket': bucket_name, 'Key': object_key, 'ResponseCacheControl': 'max-age=74', 'ResponseContentDisposition': 'attachment; filename="foo.jpg"', 'ResponseContentEncoding': 'identity', 'ResponseContentLanguage': 'de-DE', 'ResponseContentType': 'image/jpeg', 'ResponseExpires': expiry_date} ) response = requests.get(url, verify=False) self.assertEqual(response.headers['cache-control'], 'max-age=74') self.assertEqual(response.headers['content-disposition'], 'attachment; filename="foo.jpg"') self.assertEqual(response.headers['content-encoding'], 'identity') self.assertEqual(response.headers['content-language'], 'de-DE') self.assertEqual(response.headers['content-type'], 'image/jpeg') # Note: looks like depending on the environment/libraries, we can get different date formats... possible_date_formats = ['2015-10-21T07:28:00Z', expiry_date] self.assertIn(response.headers['expires'], possible_date_formats) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_copy_md5(self): bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) # put object src_key = 'src' client.put_object(Bucket=bucket_name, Key=src_key, Body='something') # copy object dest_key = 'dest' response = client.copy_object( Bucket=bucket_name, CopySource={ 'Bucket': bucket_name, 'Key': src_key }, Key=dest_key ) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) # Create copy object to try to match s3a setting Content-MD5 dest_key2 = 'dest' url = client.generate_presigned_url( 'copy_object', Params={'Bucket': bucket_name, 'CopySource': {'Bucket': bucket_name, 'Key': src_key}, 'Key': dest_key2} ) request_response = requests.put(url, verify=False) self.assertEqual(request_response.status_code, 200) # Cleanup self._delete_bucket(bucket_name, [src_key, dest_key, dest_key2]) def test_s3_invalid_content_md5(self): bucket_name = 'test-bucket-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # put object with invalid content MD5 hashes = { '__invalid__': 'InvalidDigest', '000': 'InvalidDigest', 'not base64 encoded checksum': 'InvalidDigest', # InvalidDigest 'MTIz': 'BadDigest' # "123" base64 encoded } for hash, error in hashes.items(): with self.assertRaises(Exception) as ctx: self.s3_client.put_object(Bucket=bucket_name, Key='test-key', Body='something', ContentMD5=hash) self.assertIn(error, str(ctx.exception)) # Cleanup self.s3_client.delete_bucket(Bucket=bucket_name) def test_s3_upload_download_gzip(self): bucket_name = 'test-bucket-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) data = '1234567890 ' * 100 # Write contents to memory rather than a file. upload_file_object = BytesIO() with gzip.GzipFile(fileobj=upload_file_object, mode='w') as filestream: filestream.write(data.encode('utf-8')) # Upload gzip self.s3_client.put_object(Bucket=bucket_name, Key='test.gz', ContentEncoding='gzip', Body=upload_file_object.getvalue()) # Download gzip downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key='test.gz') download_file_object = BytesIO(downloaded_object['Body'].read()) with gzip.GzipFile(fileobj=download_file_object, mode='rb') as filestream: downloaded_data = filestream.read().decode('utf-8') self.assertEqual(downloaded_data, data) def test_set_external_hostname(self): bucket_name = 'test-bucket-%s' % short_uid() key = 'test.file' hostname_before = config.HOSTNAME_EXTERNAL config.HOSTNAME_EXTERNAL = 'foobar' try: content = 'test content 123' acl = 'public-read' self.s3_client.create_bucket(Bucket=bucket_name) # upload file response = self._perform_multipart_upload(bucket=bucket_name, key=key, data=content, acl=acl) expected_url = '%s://%s:%s/%s/%s' % (get_service_protocol(), config.HOSTNAME_EXTERNAL, config.PORT_S3, bucket_name, key) self.assertEqual(expected_url, response['Location']) # fix object ACL - currently not directly support for multipart uploads self.s3_client.put_object_acl(Bucket=bucket_name, Key=key, ACL=acl) # download object via API downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=key) self.assertEqual(to_str(downloaded_object['Body'].read()), content) # download object directly from download link download_url = response['Location'].replace('%s:' % config.HOSTNAME_EXTERNAL, 'localhost:') response = safe_requests.get(download_url) self.assertEqual(response.status_code, 200) self.assertEqual(to_str(response.content), content) finally: config.HOSTNAME_EXTERNAL = hostname_before def test_s3_website_errordocument(self): # check that the error document is returned when configured bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) client.put_object(Bucket=bucket_name, Key='error.html', Body='This is the error document') client.put_bucket_website( Bucket=bucket_name, WebsiteConfiguration={'ErrorDocument': {'Key': 'error.html'}} ) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': 'nonexistent'} ) response = requests.get(url, verify=False) self.assertEqual(response.status_code, 200) self.assertEqual(response.text, 'This is the error document') # cleanup client.delete_object(Bucket=bucket_name, Key='error.html') client.delete_bucket(Bucket=bucket_name) # check that normal responses are returned for bucket with index configuration, but not error document bucket_name = 'test-bucket-%s' % short_uid() client.create_bucket(Bucket=bucket_name) client.put_bucket_website( Bucket=bucket_name, WebsiteConfiguration={'IndexDocument': {'Suffix': 'index.html'}} ) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': 'nonexistent'} ) response = requests.get(url, verify=False) self.assertEqual(response.status_code, 404) # cleanup client.delete_bucket(Bucket=bucket_name) # check that normal responses are returned for bucket without configuration bucket_name = 'test-bucket-%s' % short_uid() client.create_bucket(Bucket=bucket_name) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': 'nonexistent'} ) response = requests.get(url, verify=False) self.assertEqual(response.status_code, 404) # cleanup client.delete_bucket(Bucket=bucket_name) def test_s3_website_errordocument_missing(self): # check that 404 is returned when error document is configured but missing bucket_name = 'test-bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket_name) client.put_bucket_website( Bucket=bucket_name, WebsiteConfiguration={'ErrorDocument': {'Key': 'error.html'}} ) url = client.generate_presigned_url( 'get_object', Params={'Bucket': bucket_name, 'Key': 'nonexistent'} ) response = requests.get(url, verify=False) self.assertEqual(response.status_code, 404) client.delete_bucket(Bucket=bucket_name) def test_s3_event_notification_with_sqs(self): key_by_path = 'aws/bucket=2020/test1.txt' bucket_name = 'notif-sqs-%s' % short_uid() queue_url, queue_attributes = self._create_test_queue() self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'}) body = 'Lorem ipsum dolor sit amet, ... ' * 30 # put an object self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body=body) self.assertEqual(self._get_test_queue_message_count(queue_url), '1') rs = self.sqs_client.receive_message(QueueUrl=queue_url) record = [json.loads(to_str(m['Body'])) for m in rs['Messages']][0]['Records'][0] download_file = new_tmp_file() self.s3_client.download_file(Bucket=bucket_name, Key=key_by_path, Filename=download_file) self.assertEqual(record['s3']['object']['size'], os.path.getsize(download_file)) # clean up self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'}) self.sqs_client.delete_queue(QueueUrl=queue_url) self._delete_bucket(bucket_name, [key_by_path]) def test_s3_delete_object_with_version_id(self): test_1st_key = 'aws/s3/testkey1.txt' test_2nd_key = 'aws/s3/testkey2.txt' body = 'Lorem ipsum dolor sit amet, ... ' * 30 self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING) self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING, VersioningConfiguration={'Status': 'Enabled'}) # put 2 objects rs = self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_1st_key, Body=body) self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_2nd_key, Body=body) version_id = rs['VersionId'] # delete 1st object with version rs = self.s3_client.delete_objects(Bucket=TEST_BUCKET_WITH_VERSIONING, Delete={'Objects': [{'Key': test_1st_key, 'VersionId': version_id}]}) deleted = rs['Deleted'][0] self.assertEqual(deleted['Key'], test_1st_key) self.assertEqual(deleted['VersionId'], version_id) rs = self.s3_client.list_object_versions(Bucket=TEST_BUCKET_WITH_VERSIONING) object_versions = [object['VersionId'] for object in rs['Versions']] self.assertNotIn(version_id, object_versions) # clean up self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING, VersioningConfiguration={'Status': 'Disabled'}) self._delete_bucket(TEST_BUCKET_WITH_VERSIONING, [test_1st_key, test_2nd_key]) def test_etag_on_get_object_call(self): self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2) body = 'Lorem ipsum dolor sit amet, ... ' * 30 rs = self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=TEST_KEY_2, Body=body) etag = rs['ETag'] rs = self.s3_client.get_object( Bucket=TEST_BUCKET_NAME_2, Key=TEST_KEY_2 ) self.assertIn('ETag', rs) self.assertEqual(etag, rs['ETag']) self.assertEqual(rs['ContentLength'], len(body)) rs = self.s3_client.get_object( Bucket=TEST_BUCKET_NAME_2, Key=TEST_KEY_2, Range='bytes=0-{}'.format(TEST_GET_OBJECT_RANGE - 1) ) self.assertIn('ETag', rs) self.assertEqual(etag, rs['ETag']) self.assertEqual(rs['ContentLength'], TEST_GET_OBJECT_RANGE) # clean up self._delete_bucket(TEST_BUCKET_NAME_2, [TEST_KEY_2]) def test_get_object_versioning(self): bucket_name = 'bucket-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) rs = self.s3_client.list_object_versions( Bucket=bucket_name, EncodingType='url' ) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) self.assertEqual(rs['Name'], bucket_name) # clean up self._delete_bucket(bucket_name, []) def test_bucket_versioning(self): self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING) self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING, VersioningConfiguration={'Status': 'Enabled'}) result = self.s3_client.get_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING) self.assertEqual(result['Status'], 'Enabled') def test_get_bucket_versioning_order(self): bucket_name = 'version-order-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'}) self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body') self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body') self.s3_client.put_object(Bucket=bucket_name, Key='test2', Body='body') rs = self.s3_client.list_object_versions( Bucket=bucket_name, ) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) self.assertEqual(rs['Name'], bucket_name) self.assertEqual(rs['Versions'][0]['IsLatest'], True) self.assertEqual(rs['Versions'][2]['IsLatest'], True) def test_upload_big_file(self): bucket_name = 'bucket-big-file-%s' % short_uid() key1 = 'test_key1' key2 = 'test_key1' self.s3_client.create_bucket(Bucket=bucket_name) body1 = '\x01' * 10000000 rs = self.s3_client.put_object(Bucket=bucket_name, Key=key1, Body=body1) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) body2 = 'a' * 10000000 rs = self.s3_client.put_object(Bucket=bucket_name, Key=key2, Body=body2) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) rs = self.s3_client.head_object(Bucket=bucket_name, Key=key1) self.assertEqual(rs['ContentLength'], len(body1)) rs = self.s3_client.head_object(Bucket=bucket_name, Key=key2) self.assertEqual(rs['ContentLength'], len(body2)) # clean up self._delete_bucket(bucket_name, [key1, key2]) def test_s3_put_more_than_1000_items(self): self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2) for i in range(0, 1010, 1): body = 'test-' + str(i) key = 'test-key-' + str(i) self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=key, Body=body) # trying to get the last item of 1010 items added. resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-1009') self.assertEqual(to_str(resp['Body'].read()), 'test-1009') # trying to get the first item of 1010 items added. resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-0') self.assertEqual(to_str(resp['Body'].read()), 'test-0') resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, MaxKeys=1010) self.assertEqual(len(resp['Contents']), 1010) resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2) self.assertEqual(len(resp['Contents']), 1000) next_marker = resp['NextMarker'] # Second list resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, Marker=next_marker) self.assertEqual(len(resp['Contents']), 10) def test_s3_multipart_upload_file(self): def upload(size_in_mb, bucket): file_name = '{}.tmp'.format(short_uid()) path = '{}'.format(file_name) with open(path, 'wb') as f: f.seek(int(size_in_mb * 1e6)) f.write(b'\0') f.flush() self.s3_client.upload_file( path, bucket, f'{file_name}', ExtraArgs={'StorageClass': 'DEEP_ARCHIVE'} ) os.remove(path) bucket_name = 'bucket-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) upload(1, bucket_name) upload(9, bucket_name) upload(15, bucket_name) s3_resource = aws_stack.connect_to_resource('s3') objects = s3_resource.Bucket(bucket_name).objects.all() keys = [] for obj in objects: keys.append(obj.key) self.assertEqual(obj.storage_class, 'DEEP_ARCHIVE') self._delete_bucket(bucket_name, keys) def test_cors_with_single_origin_error(self): client = self._get_test_client() BUCKET_CORS_CONFIG = { 'CORSRules': [{ 'AllowedOrigins': ['https://localhost:4200'], 'AllowedMethods': ['GET', 'PUT'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['*'], }] } client.create_bucket(Bucket='my-s3-bucket') client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG) # create signed url url = client.generate_presigned_url( ClientMethod='put_object', Params={ 'Bucket': 'my-s3-bucket', 'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf', 'ContentType': 'application/pdf', 'ACL': 'bucket-owner-full-control' }, ExpiresIn=3600 ) result = requests.put(url, data='something', verify=False, headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'}) self.assertEqual(result.status_code, 200) BUCKET_CORS_CONFIG = { 'CORSRules': [{ 'AllowedOrigins': ['https://localhost:4200', 'https://localhost:4201'], 'AllowedMethods': ['GET', 'PUT'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['*'], }] } client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG) # create signed url url = client.generate_presigned_url( ClientMethod='put_object', Params={ 'Bucket': 'my-s3-bucket', 'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf', 'ContentType': 'application/pdf', 'ACL': 'bucket-owner-full-control' }, ExpiresIn=3600 ) result = requests.put(url, data='something', verify=False, headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'}) self.assertEqual(result.status_code, 200) result = requests.put(url, data='something', verify=False, headers={'Origin': 'https://localhost:4201', 'Content-Type': 'application/pdf'}) self.assertEqual(result.status_code, 200) def test_s3_put_object_notification_with_lambda(self): bucket_name = 'bucket-%s' % short_uid() function_name = 'func-%s' % short_uid() table_name = 'table-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_ECHO, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) aws_stack.create_dynamodb_table( table_name=table_name, partition_key='uuid' ) self.s3_client.put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration={ 'LambdaFunctionConfigurations': [ { 'LambdaFunctionArn': aws_stack.lambda_function_arn(function_name), 'Events': ['s3:ObjectCreated:*'] } ] } ) # put an object obj = self.s3_client.put_object(Bucket=bucket_name, Key=table_name, Body='something..') etag = obj['ETag'] time.sleep(2) table = aws_stack.connect_to_resource('dynamodb').Table(table_name) def check_table(): rs = table.scan() self.assertEqual(len(rs['Items']), 1) return rs rs = retry(check_table, retries=4, sleep=3) record = rs['Items'][0] self.assertEqual(record['data']['s3']['bucket']['name'], bucket_name) self.assertEqual(record['data']['s3']['object']['eTag'], etag) # clean up self._delete_bucket(bucket_name, [table_name]) lambda_client = aws_stack.connect_to_service('lambda') lambda_client.delete_function(FunctionName=function_name) dynamodb_client = aws_stack.connect_to_service('dynamodb') dynamodb_client.delete_table(TableName=table_name) def test_s3_put_object_notification_with_sns_topic(self): bucket_name = 'bucket-%s' % short_uid() topic_name = 'topic-%s' % short_uid() queue_name = 'queue-%s' % short_uid() key_name = 'bucket-key-%s' % short_uid() sns_client = aws_stack.connect_to_service('sns') self.s3_client.create_bucket(Bucket=bucket_name) queue_url = self.sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn'] sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=aws_stack.sqs_queue_arn(queue_name)) self.s3_client.put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration={ 'TopicConfigurations': [ { 'TopicArn': topic_arn, 'Events': ['s3:ObjectCreated:*'] } ] } ) # Put an object # This will trigger an event to sns topic, sqs queue will get a message since it's a subscriber of topic self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body='body content...') time.sleep(2) def get_message(q_url): resp = self.sqs_client.receive_message(QueueUrl=q_url) m = resp['Messages'][0] self.sqs_client.delete_message( QueueUrl=q_url, ReceiptHandle=m['ReceiptHandle'] ) return json.loads(m['Body']) message = retry(get_message, retries=3, sleep=2, q_url=queue_url) # We got a notification message in sqs queue (from s3 source) self.assertEqual(message['Type'], 'Notification') self.assertEqual(message['TopicArn'], topic_arn) self.assertEqual(message['Subject'], 'Amazon S3 Notification') r = json.loads(message['Message'])['Records'][0] self.assertEqual(r['eventSource'], 'aws:s3') self.assertEqual(r['s3']['bucket']['name'], bucket_name) self.assertEqual(r['s3']['object']['key'], key_name) # clean up self._delete_bucket(bucket_name, [key_name]) self.sqs_client.delete_queue(QueueUrl=queue_url) sns_client.delete_topic(TopicArn=topic_arn) def test_s3_get_deep_archive_object(self): bucket_name = 'bucket-%s' % short_uid() object_key = 'key-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # put DEEP_ARCHIVE object self.s3_client.put_object( Bucket=bucket_name, Key=object_key, Body='body data', StorageClass='DEEP_ARCHIVE' ) with self.assertRaises(ClientError) as ctx: self.s3_client.get_object( Bucket=bucket_name, Key=object_key ) self.assertIn('InvalidObjectState', str(ctx.exception)) # clean up self._delete_bucket(bucket_name, [object_key]) def test_s3_get_deep_archive_object_restore(self): bucket_name = 'bucket-%s' % short_uid() object_key = 'key-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) # put DEEP_ARCHIVE object self.s3_client.put_object( Bucket=bucket_name, Key=object_key, Body='body data', StorageClass='DEEP_ARCHIVE' ) with self.assertRaises(ClientError) as ctx: self.s3_client.get_object( Bucket=bucket_name, Key=object_key ) self.assertIn('InvalidObjectState', str(ctx.exception)) # put DEEP_ARCHIVE object self.s3_client.restore_object( Bucket=bucket_name, Key=object_key, RestoreRequest={ 'Days': 30, 'GlacierJobParameters': { 'Tier': 'Bulk' }, 'Tier': 'Bulk', }, ) response = self.s3_client.get_object( Bucket=bucket_name, Key=object_key ) self.assertIn('etag', response.get('ResponseMetadata').get('HTTPHeaders')) # clean up self._delete_bucket(bucket_name, [object_key]) def test_encoding_notification_messages(self): key = 'a@b' bucket_name = 'notif-enc-%s' % short_uid() queue_url = self.sqs_client.create_queue(QueueName='testQueue')['QueueUrl'] queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn']) self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name) # put an object where the bucket_name is in the path self.s3_client.put_object(Bucket=bucket_name, Key=key, Body='something') response = self.sqs_client.receive_message(QueueUrl=queue_url) self.assertEqual(json.loads(response['Messages'][0]['Body'])['Records'][0]['s3']['object']['key'], 'a%40b') # clean up self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) def test_s3_batch_delete_objects_using_requests(self): bucket_name = 'bucket-%s' % short_uid() object_key_1 = 'key-%s' % short_uid() object_key_2 = 'key-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.put_object(Bucket=bucket_name, Key=object_key_1, Body='This body document') self.s3_client.put_object(Bucket=bucket_name, Key=object_key_2, Body='This body document') base_url = '{}://{}:{}'.format(get_service_protocol(), config.LOCALSTACK_HOSTNAME, config.PORT_S3) url = '{}/{}?delete='.format(base_url, bucket_name) r = requests.post(url=url, data=BATCH_DELETE_BODY % (object_key_1, object_key_2)) self.assertEqual(r.status_code, 200) s3_resource = aws_stack.connect_to_resource('s3') bucket = s3_resource.Bucket(bucket_name) total_keys = sum(1 for _ in bucket.objects.all()) self.assertEqual(total_keys, 0) # clean up self._delete_bucket(bucket_name, []) def test_presigned_url_signature_authentication(self): client = self._get_test_client() OBJECT_KEY = 'test.txt' OBJECT_DATA = 'this should be found in when you download {}.'.format(OBJECT_KEY) BUCKET = 'presign-testing' client.create_bucket(Bucket=BUCKET) presign_url = client.generate_presigned_url( 'put_object', Params={'Bucket': BUCKET, 'Key': OBJECT_KEY}, ExpiresIn=3 ) # Valid request response = requests.put(presign_url, data=OBJECT_DATA) self.assertEqual(response.status_code, 200) # Invalid request response = requests.put(presign_url, data=OBJECT_DATA, headers={'Content-Type': 'my-fake-content/type'}) self.assertEqual(response.status_code, 403) # Expired request time.sleep(3) response = requests.put(presign_url, data=OBJECT_DATA) self.assertEqual(response.status_code, 403) client.delete_object(Bucket=BUCKET, Key=OBJECT_KEY) client.delete_bucket(Bucket=BUCKET) def test_precondition_failed_error(self): bucket = 'bucket-%s' % short_uid() client = self._get_test_client() client.create_bucket(Bucket=bucket) client.put_object(Bucket=bucket, Key='foo', Body=b'{"foo": "bar"}') # this line makes localstack crash: try: client.get_object(Bucket=bucket, Key='foo', IfMatch='"not good etag"') except ClientError as e: self.assertEqual(e.response['Error']['Code'], 'PreconditionFailed') self.assertEqual(e.response['Error']['Message'], 'At least one of the pre-conditions you ' 'specified did not hold') client.delete_object(Bucket=bucket, Key='foo') client.delete_bucket(Bucket=bucket) def test_cors_configurtaions(self): client = self._get_test_client() bucket = 'test-cors' object_key = 'index.html' url = '{}/{}/{}'.format(config.get_edge_url(), bucket, object_key) BUCKET_CORS_CONFIG = { 'CORSRules': [{ 'AllowedOrigins': [config.get_edge_url()], 'AllowedMethods': ['GET', 'PUT'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['x-amz-tagging'], }] } client.create_bucket(Bucket=bucket) client.put_bucket_cors(Bucket=bucket, CORSConfiguration=BUCKET_CORS_CONFIG) client.put_object(Bucket=bucket, Key=object_key, Body='<h1>Index</html>') response = requests.get(url, headers={'Origin': config.get_edge_url(), 'Content-Type': 'text/html'}) self.assertEqual(response.status_code, 200) self.assertIn('Access-Control-Allow-Origin'.lower(), response.headers) self.assertEqual(response.headers['Access-Control-Allow-Origin'], config.get_edge_url()) self.assertIn('Access-Control-Allow-Methods'.lower(), response.headers) self.assertIn('GET', response.headers['Access-Control-Allow-Methods']) self.assertIn('Access-Control-Allow-Headers', response.headers) self.assertEqual(response.headers['Access-Control-Allow-Headers'], 'x-amz-tagging') self.assertIn('Access-Control-Max-Age'.lower(), response.headers) self.assertEqual(response.headers['Access-Control-Max-Age'], '3000') self.assertIn('Access-Control-Allow-Credentials'.lower(), response.headers) self.assertEqual(response.headers['Access-Control-Allow-Credentials'].lower(), 'true') BUCKET_CORS_CONFIG = { 'CORSRules': [{ 'AllowedOrigins': ['https://anydomain.com'], 'AllowedMethods': ['GET', 'PUT'], 'MaxAgeSeconds': 3000, 'AllowedHeaders': ['x-amz-tagging'], }] } client.put_bucket_cors(Bucket=bucket, CORSConfiguration=BUCKET_CORS_CONFIG) response = requests.get(url, headers={'Origin': config.get_edge_url(), 'Content-Type': 'text/html'}) self.assertEqual(response.status_code, 200) self.assertNotIn('Access-Control-Allow-Origin'.lower(), response.headers) self.assertNotIn('Access-Control-Allow-Methods'.lower(), response.headers) self.assertNotIn('Access-Control-Allow-Headers', response.headers) self.assertNotIn('Access-Control-MaxAge', response.headers) # cleaning client.delete_object(Bucket=bucket, Key=object_key) client.delete_bucket(Bucket=bucket) def test_s3_download_object_with_lambda(self): bucket_name = 'bucket-%s' % short_uid() function_name = 'func-%s' % short_uid() key = 'key-%s' % short_uid() self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.put_object(Bucket=bucket_name, Key=key, Body='something..') testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_DOWNLOAD_FROM_S3, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36, envvars=dict({ 'BUCKET_NAME': bucket_name, 'OBJECT_NAME': key, 'LOCAL_FILE_NAME': '/tmp/' + key, }) ) lambda_client = aws_stack.connect_to_service('lambda') lambda_client.invoke(FunctionName=function_name, InvocationType='Event') retry(testutil.check_expected_lambda_log_events_length, retries=10, sleep=3, function_name=function_name, expected_length=1) # clean up self._delete_bucket(bucket_name, [key]) lambda_client.delete_function(FunctionName=function_name) def test_putobject_with_multiple_keys(self): client = self._get_test_client() bucket = 'bucket-%s' % short_uid() key_by_path = 'aws/key1/key2/key3' client.create_bucket(Bucket=bucket) client.put_object( Body=b'test', Bucket=bucket, Key=key_by_path ) # Cleanup self._delete_bucket(bucket, key_by_path) def test_terraform_request_sequence(self): reqs = load_file(os.path.join(os.path.dirname(__file__), 'files', 's3.requests.txt')) reqs = reqs.split('---') for req in reqs: header, _, body = req.strip().partition('\n\n') req, _, headers = header.strip().partition('\n') headers = {h.split(':')[0]: h.partition(':')[2].strip() for h in headers.split('\n')} method, path, _ = req.split(' ') url = '%s%s' % (config.get_edge_url(), path) result = getattr(requests, method.lower())(url, data=body, headers=headers) self.assertLess(result.status_code, 400) # --------------- # HELPER METHODS # --------------- @staticmethod def generate_large_file(size): # https://stackoverflow.com/questions/8816059/create-file-of-particular-size-in-python filename = 'large_file_%s' % uuid.uuid4() f = open(filename, 'wb') f.seek(size - 1) f.write(b'\0') f.close() return open(filename, 'r') def _create_test_queue(self): queue_url = self.sqs_client.create_queue(QueueName=TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION)['QueueUrl'] queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn']) return queue_url, queue_attributes def _create_test_notification_bucket(self, queue_attributes, bucket_name): self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration={ 'QueueConfigurations': [ { 'QueueArn': queue_attributes['Attributes']['QueueArn'], 'Events': ['s3:ObjectCreated:*'] } ] } ) def _get_test_queue_message_count(self, queue_url): queue_attributes = self.sqs_client.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['ApproximateNumberOfMessages'] ) return queue_attributes['Attributes']['ApproximateNumberOfMessages'] def _delete_bucket(self, bucket_name, keys=[]): keys = keys if isinstance(keys, list) else [keys] objects = [{'Key': k} for k in keys] if objects: self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects}) self.s3_client.delete_bucket(Bucket=bucket_name) def _perform_multipart_upload(self, bucket, key, data=None, zip=False, acl=None): kwargs = {'ACL': acl} if acl else {} multipart_upload_dict = self.s3_client.create_multipart_upload(Bucket=bucket, Key=key, **kwargs) upload_id = multipart_upload_dict['UploadId'] # Write contents to memory rather than a file. data = data or (5 * short_uid()) data = to_bytes(data) upload_file_object = BytesIO(data) if zip: upload_file_object = BytesIO() with gzip.GzipFile(fileobj=upload_file_object, mode='w') as filestream: filestream.write(data) response = self.s3_client.upload_part(Bucket=bucket, Key=key, Body=upload_file_object, PartNumber=1, UploadId=upload_id) multipart_upload_parts = [{'ETag': response['ETag'], 'PartNumber': 1}] return self.s3_client.complete_multipart_upload( Bucket=bucket, Key=key, MultipartUpload={'Parts': multipart_upload_parts}, UploadId=upload_id ) def _perform_presigned_url_upload(self, bucket, key): client = self._get_test_client() url = client.generate_presigned_url( 'put_object', Params={'Bucket': bucket, 'Key': key} ) url = url + '&X-Amz-Credential=x&X-Amz-Signature=y' requests.put(url, data='something', verify=False) def _get_test_client(self): return boto3.client( 's3', endpoint_url=config.get_edge_url(), aws_access_key_id=TEST_AWS_ACCESS_KEY_ID, aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY )
41.718584
119
0.638751
[ "Apache-2.0" ]
Josemaralves/localstack
tests/integration/test_s3.py
70,713
Python
# Generated by Django 2.2.1 on 2019-11-18 10:44 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0020_auto_20191112_1048'), ] operations = [ migrations.AlterModelOptions( name='payment', options={'ordering': ('date',), 'verbose_name': 'betaling', 'verbose_name_plural': 'betalinger'}, ), ]
22.722222
109
0.611247
[ "MPL-2.0", "MPL-2.0-no-copyleft-exception" ]
OS2bos/OS2bos
backend/core/migrations/0021_auto_20191118_1144.py
409
Python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 10 19:37:19 2018 @author: kaustabh """ #making a single list representing all sides in a serial fashion r = ['w1','w2','w3','w4','w5','w6','w7','w8','w9','b1','b2','b3','r1','r2','r3','g1','g2','g3','o1','o2','o3','b4','b5','b6','r4','r5','r6','g4','g5','g6','o4','o5','o6','b7','b8','b9','r7','r8','r9','g7','g8','g9','o7','o8','o9', 'y1','y2','y3','y4','y5','y6','y7','y8','y9'] real = r[:] # cube is the list and we can't use random.shuffle because certain colors stay together def shift(r,L): #r is cube list ; L is 12 element array to be shifted t1 = r[L[0]] t2 = r[L[1]] t3 = r[L[2]] r[L[0]] , r[L[3]] = r[L[3]] , r[L[0]] r[L[1]] , r[L[4]] = r[L[4]] , r[L[1]] r[L[2]] , r[L[5]] = r[L[5]] , r[L[2]] r[L[3]] , r[L[6]] = r[L[6]] , r[L[3]] r[L[4]] , r[L[7]] = r[L[7]] , r[L[4]] r[L[5]] , r[L[8]] = r[L[8]] , r[L[5]] r[L[6]] , r[L[9]] = r[L[9]] , r[L[6]] r[L[7]] , r[L[10]] = r[L[10]] , r[L[7]] r[L[8]] , r[L[11]] = r[L[11]] , r[L[8]] r[L[9]] = t1 r[L[10]] = t2 r[L[11]] = t3 return r def rotate(r, L): #a face will also rotate t1 = r[L[0]] t2 = r[L[7]] r[L[0]] , r[L[6]] = r[L[6]] , r[L[0]] r[L[7]] , r[L[5]] = r[L[5]] , r[L[7]] r[L[6]] , r[L[4]] = r[L[4]] , r[L[6]] r[L[3]] , r[L[5]] = r[L[5]] , r[L[3]] r[L[2]] , r[L[4]] = r[L[4]] , r[L[2]] r[L[1]] , r[L[3]] = r[L[3]] , r[L[1]] r[L[2]] = t1 r[L[1]] = t2 return def right_c(r): L = [33,34,35,36,37,38,39,40,41,42,43,44] rt = [45,48,51,52,53,50,47,46] rotate(r,rt) return shift(r, L) def right_ac(r): L = [33,34,35,36,37,38,39,40,41,42,43,44] L.reverse() rt = [45,48,51,52,53,50,47,46] rt.reverse() rotate(r,rt) return shift(r, L) def left_ac(r): rt = [0,1,2,5,8,7,6,3] rotate(r,rt) L = [9,10,11,12,13,14,15,16,17,18,19,20] return shift(r, L) def left_c(r): rt = [0,1,2,5,8,7,6,3] rt.reverse() rotate(r,rt) L = [9,10,11,12,13,14,15,16,17,18,19,20] L.reverse() return shift(r, L) def up_c(r): rt = [9,21,33,34,35,23,11,10] rotate(r, rt) L = [0,3,6,12,24,36,45,48,51,18,30,42] return shift(r, L) def up_ac(r): rt = [9,21,33,34,35,23,11,10] rt.reverse() rotate(r, rt) L = [0,3,6,12,24,36,45,48,51,18,30,42] L.reverse() return shift(r, L) def down_ac(r): rt = [15,16,17,29,41,40,39,27] rotate(r,rt) L = [2,5,8,14,26,38,47,50,53,20,32,44] return shift(r, L) def down_c(r): rt = [15,16,17,29,41,40,39,27] rt.reverse() rotate(r,rt) L = [2,5,8,14,26,38,47,50,53,20,32,44] L.reverse() return shift(r, L) def front_c(r): rt = [12,24,36,37,38,26,14,13] rotate(r,rt) L = [11,23,35,45,46,47,39,27,15,8,7,6] return shift(r, L) def front_ac(r): rt = [12,24,36,37,38,26,14,13] rt.reverse() rotate(r,rt) L = [11,23,35,45,46,47,39,27,15,8,7,6] L.reverse() return shift(r, L) def back_c(r): rt= [18,30,42,43,44,32,20,19] rotate(r,rt) L = [0,3,6,9,21,33,45,48,51,15,27,39] return shift(r, L) def back_ac(r): rt= [18,30,42,43,44,32,20,19] rt.reverse() rotate(r,rt) L = [0,3,6,9,21,33,45,48,51,15,27,39] L.reverse() return shift(r, L) def shuffle(r): import random for i in range(random.randint(17,32)): random.choice([right_c(r),left_c(r),up_c(r),down_c(r),front_c(r),back_c(r),back_ac(r),right_ac(r),left_ac(r),up_ac(r),front_ac(r),down_ac(r)]) return r def reset(): return real[:] def display(r): for j in range(15): for i in range(9): if j == 0 : if i == 0 : print(" ") while i<4: print(" ", end='') i += 1 if i == 5: print(r[9]+" "+r[21]+" "+r[33],end='\n') if j == 1 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[10]+" "+r[22]+" "+r[34],end='\n') if j == 2 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[11]+" "+r[23]+" "+r[35],end='\n') if j == 3 : if i == 0: print(" ") if j == 4 : if i==0 : print(r[0]+" "+r[3]+" "+r[6]+" ",end='') if i==3 : print(r[12]+" "+r[24]+" "+r[36]+" ",end='') if i==6 : print(" "+r[45]+" "+r[48]+" "+r[51],end='\n') if j == 5 : if i==0 : print(r[1]+" "+r[4]+" "+r[7]+" ",end='') if i==3 : print(r[13]+" "+r[25]+" "+r[37]+" ",end='') if i==6 : print(" "+r[46]+" "+r[49]+" "+r[52],end='\n') if j == 6 : if i==0 : print(r[2]+" "+r[5]+" "+r[8]+" ",end='') if i==3 : print(r[15]+" "+r[26]+" "+r[38]+" ",end='') if i==6 : print(" "+r[47]+" "+r[50]+" "+r[53],end='\n') if j == 7 : if i==0: print(" ") if j == 8 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[15]+" "+r[27]+" "+r[39],end='\n') if j == 9 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[16]+" "+r[28]+" "+r[40],end='\n') if j == 10 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[17]+" "+r[29]+" "+r[41],end='\n') if j == 11 : if i==0 : print(" ") if j == 12 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[18]+" "+r[30]+" "+r[42],end='\n') if j == 13 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[19]+" "+r[31]+" "+r[43],end='\n') if j == 14 : while i<4: print(" ", end='') i += 1 if i == 5: print(r[20]+" "+r[32]+" "+r[44],end='\n') if i == 8: print(" ") print(" ------------------------------------- ") return ' '.join(r) print(r) display(r) left_ac(r) left_ac(r) display(r) left_ac(r) left_ac(r) display(r)
24.701639
276
0.335678
[ "MIT" ]
KaustabhGanguly/RemixRubiks
Rubikmovement.py
7,534
Python
from pavilion import result_parsers import yaml_config as yc import re import sre_constants class Regex(result_parsers.ResultParser): """Find matches to the given regex in the given file. The matched string or strings are returned as the result.""" def __init__(self): super().__init__(name='regex', description="Find data using a basic regular " "expression.") self.range_re = re.compile('(-?[0-9]*\.?[0-9]*):(-?.*)') def get_config_items(self): config_items = super().get_config_items() config_items.extend([ yc.StrElem( 'regex', required=True, help_text="The python regex to use to search the given file. " "See: 'https://docs.python.org/3/library/re.html' " "You can use single quotes in YAML to have the " "string interpreted literally. IE '\\n' is a '\\' " "and an 'n'." ), # Use the built-in matches element. result_parsers.MATCHES_ELEM, yc.StrElem( 'threshold', default="", help_text="If a threshold is defined, 'True' will be returned " "if greater than or equal to that many instances " "of the specified word are found. If fewer " "instances are found, 'False' is returned. The " "value must be an integer greater than zero." ), yc.ListElem( 'expected', sub_elem=yc.StrElem(), help_text="Expected value(s) and/or range(s). If provided, " "the result will be 'True' if all of the found " "values (determined by the 'results' value) are " "within the expected range(s) or value(s). " "Otherwise, the result is 'False'. Supports " "integers and floats." ) ]) return config_items def _check_args(self, regex=None, match_type=None, threshold=None, expected=None): try: re.compile(regex) except (ValueError, sre_constants.error) as err: raise result_parsers.ResultParserError( "Invalid regular expression: {}".format(err)) if not isinstance(expected, list): raise result_parsers.ResultParserError( "Expected should be a list.") if threshold: try: int(threshold) except ValueError as err: raise result_parsers.ResultParserError( "Non-integer value provided for 'threshold'.") if int(threshold) < 0: raise result_parsers.ResultParserError( "'threshold' must be a non-negative integer.") if expected: raise result_parsers.ResultParserError( "'threshold' and 'expected' cannot be used at the same " "time.") for item in expected: test_list = [] if ':' in item: test_list = list(self.range_re.search(item).groups()) else: test_list = [ item ] none_used = False for test_item in test_list: if test_item is '': if not none_used: none_used = True else: raise result_parsers.ResultParserError( "No values provided in range: {}" .format(test_list)) else: try: # If the value is an int, it seems to work better to # cast it as a float first, just in case it is a float. float(test_item) except ValueError as err: raise result_parsers.ResultParserError( "Invalid value: {}".format(test_item) ) if len(test_list) > 1: if '.' in test_list[0]: low = float(test_list[0]) elif test_list[0] != '': low = int(test_list[0]) if '.' in test_list[1]: high = float(test_list[1]) elif test_list[1] != '': high = int(test_list[1]) # Check for range specification as # (<lesser value>:<greater value>) if '' not in test_list and high < low: raise result_parsers.ResultParserError( "Invalid range: {}".format(item)) def __call__(self, test, file, regex=None, match_type=None, threshold=None, expected=None): regex = re.compile(regex) matches = [] for line in file.readlines(): # Find all non-overlapping matches and return them as a list. # if more than one capture is used, list contains tuples of # captured strings. matches.extend(regex.findall(line)) # Test if the number of matches meets the specified threshold if threshold and int(threshold) > 0: return len(matches) >= int(threshold) elif match_type == result_parsers.MATCH_FIRST: matches = None if not matches else matches[0] elif match_type == result_parsers.MATCH_LAST: matches = None if not matches else matches[-1] elif match_type == result_parsers.MATCH_ALL: pass else: raise result_parsers.ResultParserError( "Invalid 'matches' value '{}'".format('matches') ) # Test if the found values are within any of the specified expected # ranges. if not expected: return matches# if matches else None else: if not isinstance(matches, list): matches = [matches] ret_vals = [] for i in range(0,len(matches)): match = matches[i] if '.' in match: match = float(match) elif match != '': match = int(match) for j in range(0,len(expected)): # Not a range, checking for exact match. if ':' not in expected[j]: expect = expected[j] if '.' in expect: expect = float(expect) elif expect != '': expect = int(expect) if match == expect: ret_vals.append(True) # Checking if found value is in this range. elif ':' in expected[j]: low, high = self.range_re.search(expected[j]).groups() if '.' in low: low = float(low) elif low != '': low = int(low) if '.' in high: high = float(high) elif high != '': high = int(high) if low is '' and match <= high: ret_vals.append(True) elif high is '' and match >= low: ret_vals.append(True) elif low <= match <= high: ret_vals.append(True) return ret_vals
39.06
79
0.46979
[ "BSD-3-Clause" ]
ubccr/pavilion2
lib/pavilion/plugins/results/regex.py
7,812
Python
#define functions that will extract the data from SDSS based on an input RA/DEC from astroquery.sdss import SDSS from astropy import coordinates as coords import pandas as pd from astroquery.ned import Ned import matplotlib.pyplot as plt from astropy.convolution import convolve, Box1DKernel import numpy as np from astropy import units as u def ra_dec_format(val): """ Ra/Dec string formatting Converts the input string format of a right ascension/ declination coordinate to one recognizable by astroquery Args: val (str): string; an ra/dec expression formatted as "005313.81 +130955.0". Returns: string: the ra/dec coordinates re-formatted as "00h53m13.81s +13d09m55.0s" """ #ra hour = val[0:2] min_ = val[2:4] sec = val[4:9] ra = hour+'h'+min_+'m'+sec+'s' #dec deg = val[9:13] min_d = val[13:15] sec_d = val[15:] dec = deg+'d'+min_d+'m'+sec_d+'s' return ra+" "+dec def extractor(position): """ This function extracts the information from the SDSS database and returns a pandas dataframe with the query region. Please ensure that the 'position' input is formatted as '005313.81 +130955.0 extractor(str) --> pd.DataFrame """ # convert the input position argument to the format recognized by astroquery.SDSS # position=ra_dec_format(position) # query the region and get the data position = ra_dec_format(position) pos = coords.SkyCoord(position, frame='icrs') data = SDSS.query_region(pos, spectro=True) return data.to_pandas() def downloader(data): """ This function uses extracted information in order to dwonaload spectra, separating the data from th SDSS and BOSS. downloader(pd.Dataframe) --> [list(fits)] """ #create a empty list spec_list=[] # iteration over the pandas for i in range(len(data)): results = SDSS.query_specobj(plate = data['plate'][i], mjd = data['mjd'][i], fiberID = data['fiberID'][i]) # try if it can download the data (SDSS) try: spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) # if it cant download, is because is from (BOSS) except: results.remove_column("instrument") results.add_column(name="instrument", col="eboss") # replace the instrument column spec = SDSS.get_spectra(matches=results)[0] spec_list.append(spec) return spec_list # test=downloader(result) # print(test) # define a function which grabs the object's redshift from the Ned database (better calibration)- needed for plotting in the object's rest-frame def redshift(position): # make sure to format the input position argument such that it is recognizable by astroquery.Ned # position=ra_dec_format(position) position = ra_dec_format(position) pos=coords.SkyCoord(position, frame='icrs') # create a position object ned_results=Ned.query_region(pos,equinox="J2000", radius=2*u.arcsecond) # query the database z=ned_results[0][6] # grab the redshift value from the query results return z # define a function that transforms an objects wavelength array into the object's rest-frame def redshift_correct(z, wavelengths): # takes as input the redshift and the array of wavelengths wavelengths_corrected = wavelengths/(z+1) return wavelengths_corrected # define a function that transforms the results of downloader() into an array of data which will be plotted def transform_data(spec_list, z): # takes as input a list of (I think?) fits files results and the redshift of the object # iterate over each file and grab the important data #fluxes={} # containers for each of the data arrays to be plotted ( will be lists of lists/arrays) #wavelengths={} #inverse_variances={} # <- dictionaries! dict={} for spec in spec_list: flux_array=[] wavelength_array=[] sigma_array=[] data=spec[1].data # this is the data part of the file #print(data.shape[0]) #print(data) # store the appropriate columns in the designated containers- each row is a single spectrum? # SOFIA- try a nested dictionary?!?! for j in range(data.shape[0]): #print(data[j][0]) #smoothedFlux=convolve(data[0],Box1DKernel(9)) # smooth the fluxes using a boxcar #print(smoothedFlux) flux_data = data[j][0] flux_array.append(flux_data) wavelengths_uncorrected=10**data[j][1] # the wavelengths (transformed from the log scale) #print(wavelengths_uncorrected) wavelengths_corrected=redshift_correct(z, wavelengths_uncorrected) # save the wavelengths after they have been scaled to the rest-frame #print(wavelengths_corrected) wavelength_array.append(wavelengths_corrected) inverse_variance=data[j][2] # the inverse variance of the flux one_over_sigma=inverse_variance**0.5 sigma=1/one_over_sigma # the one-sigma uncertainty associated with the flux array sigma_array.append(sigma) smoothedFlux = convolve(flux_array,Box1DKernel(9)) if 'flux' in dict: dict['flux'].append([smoothedFlux]) else: dict['flux'] = [smoothedFlux] if 'wavelength' in dict: dict['wavelength'].append([wavelength_array]) else: dict['wavelength'] = [wavelength_array] if '1sigma' in dict: dict['1sigma'].append([sigma_array]) else: dict['1sigma'] = [sigma_array] # now return the nested dictionary with three keys:(flux, wavelength and sigma) # each key should have data.shape[0] number of arrays with all fluxes, wavelength and sigmas for every spec in spec_list return dict def plot_spec(dict, radec, z): # takes as input the dictionary holding the data, the radec, and the redshift for i in range(len(dict['wavelength'])): #extract data wavelength = dict['wavelength'][i] sigma = dict['1sigma'][i] flux = dict['flux'][i] # instantiate a figure object fig=plt.figure() plt.title(str(radec)+str('; ')+'z={}'.format(z)) plt.xlabel("Rest-frame Wavelength [$\AA$]") plt.ylabel("Flux [$10^{-17}$ erg$^{-1}$s$^{-1}$cm$^{-2}$$\AA^{-1}$]") plt.plot(wavelength, flux) # plot the actual data # now create upper and lower bounds on the uncertainty regions sigmaUpper=np.add(flux,sigma) sigmaLower=np.subtract(flux,sigma) plt.fill_between(wavelength, sigmaLower, sigmaUpper, color='grey', alpha=0.5) plt.show() #TEST radec='223812.39 +213203.4' z=redshift(radec) data=extractor(radec) spec_list=downloader(data) dic = transform_data(spec_list,z) plot_spec(dic, radec, z)
34.722222
147
0.666764
[ "MIT" ]
sofiapasquini/Code-Astro-Group-23-Project
exampledoc/Extractor.py
6,875
Python