content
stringlengths 5
1.05M
|
---|
from sqlalchemy.orm import Session
import random
from . import models, schemas
def get_corpse(db: Session, corpse_name: str):
return (db.query(models.Corpse)
.filter(models.Corpse.corpse_name == corpse_name)
.first())
def create_corpse(db: Session, corpse_name: str, img: bytes):
db_corpse = models.Corpse(corpse_name=corpse_name, img=img)
db.add(db_corpse)
db.commit()
db.refresh(db_corpse)
return db_corpse
def update_corpse(db: Session, corpse_name: str, img: bytes):
db_corpse = get_corpse(db, corpse_name=corpse_name)
if db_corpse and db_corpse.stage < 4:
db_corpse.img = img
db_corpse.is_open = False
db_corpse.stage = db_corpse.stage + 1 # musn't use +=
db.commit()
return (True, db_corpse)
return (False, db_corpse)
def get_num_corpses(db: Session, complete: bool = False) -> int:
if complete:
filter_by = models.Corpse.stage == 4
else:
filter_by = models.Corpse.stage < 4
return (db.query(models.Corpse)
.filter(filter_by)
.count())
def close_corpse(db: Session, corpse_name: str):
db_corpse = get_corpse(db, corpse_name=corpse_name)
if db_corpse:
db_corpse.open = False
db.commit()
return True
return False
def get_rand_incomplete_corpses(db: Session, n: int):
incomplete = (db.query(models.Corpse)
.filter(models.Corpse.stage < 4, ~models.Corpse.is_open)
.values(models.Corpse.corpse_name, models.Corpse.stage))
incomplete = list(incomplete)
if n > len(incomplete):
n = len(incomplete)
sample = random.sample(list(incomplete), n)
return sample
def get_rand_complete_corpse(db: Session):
complete = (db.query(models.Corpse)
.filter(models.Corpse.stage >= 4)
.values(models.Corpse.corpse_name))
complete = list(complete)
choice = random.choice(list(complete))
return choice.corpse_name
def get_stage(db: Session, corpse_name: str):
db_corpse = get_corpse(db, corpse_name=corpse_name)
return db_corpse.stage if db_corpse else -1 |
#!python
# Author: Thomas Berg <[email protected]>
# License: BSD
import os
import shutil
# For this script to run, scons must be installed (http://www.scons.org),
# and Microsoft VS2008.
# Purpose of this program: Generate all files needed for a C++ build, compile
# it with scons, and demonstrate a crash in python.exe on Windows.
# For a description of the problem, see:
# http://old.nabble.com/SCons-Python-crashing-on-Windows---dereferencing-NULL-td22576049.html
#
# The crash happens more or less frequently, depending on which machine
# you run it on, so it may need to run a few times before you get a crash
# (or you may not get it at all).
#==========================================================================
# Template for the SConscript file
sconscript = """
#!python
import os
import glob
tools = ['mslink', 'msvc', 'mslib', 'msvs', 'install']
# We use VS2008 (9.0), but other versions might work too
env = Environment(tools = tools, MSVC_VERSION = '9.0')
# Enable tempfile munging for CXXCOM:
env['CXXCOM'] = '${TEMPFILE("$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM")}'
# Lower the threshold for when the tempfile munging is triggered:
env['MAXLINELENGTH'] = 5
# Some random C++ compiler flags
env.AppendUnique(CCFLAGS = ['/EHsc'])
env.AppendUnique(CCFLAGS = ['/Zc:forScope'])
env.AppendUnique(CCFLAGS = ['/GR'])
env.AppendUnique(CCFLAGS = ['/GF'])
env.AppendUnique(CCFLAGS = ['/FC'])
env.AppendUnique(CCFLAGS = ['/fp:precise'])
env.AppendUnique(CCFLAGS = ['/Oxityb2'])
env.AppendUnique(CCFLAGS = ['/MD'])
env.AppendUnique(LINKFLAGS = ['/SUBSYSTEM:CONSOLE'])
no_programs = %(no_programs)s
programs = ['program_%%s_' %% i for i in range(0, no_programs)]
abspath = env.Dir('.').srcnode().abspath
for p in programs:
src = glob.glob(os.path.join(abspath, '%%s*.cpp' %% p))
name = p[:-1]
program = env.Program(name, src)
env.Default(program)
"""
#==========================================================================
def main():
no_programs = 1000
files_per_program = 15
jobs = 24 # Note from author: I have 6 cores with hyperthreading, so 12 virtual cores. -j24 triggered the crash at least.
programs = ['program_%s_' % i for i in range(0, no_programs)]
runs = 10
for i in range(0, runs):
# Clear generated stuff from previous runs
if os.path.exists('sconstestcase'):
shutil.rmtree('sconstestcase')
os.makedirs('sconstestcase')
# Generate a SConstruct file
with open('sconstestcase/SConstruct', 'w') as f:
f.write(sconscript % dict(no_programs = no_programs))
# Generate a bunch of files to be grouped into programs
for program in programs:
# A few numbered files specific for each program
for i in range(0, files_per_program):
with open('sconstestcase/%s%s.cpp' % (program, i), 'w') as f:
# Include some C++ headers to slow down the compilation a bit
f.write('#include <iostream>\n')
f.write('#include <string>\n')
f.write('#include <vector>\n')
f.write('#include <algorithm>\n')
f.write('int test%s() { int a = 0; a += %s; return a;}\n' % (i, i))
# Create the main file for the program
with open('sconstestcase/%smain.cpp' % program, 'w') as f:
for i in range(0, files_per_program):
f.write('int test%s();\n' % i);
f.write('int main(int argc, char* argv[]){\n int sum = 0;')
for i in range(0, files_per_program):
f.write(' sum += test%s();\n' % i)
f.write(' return sum;\n}\n')
os.system('cd sconstestcase && scons.bat -j%s' % jobs)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import time
def generate_sdp(self, ip, audio_port, rtp_profiles, session_description=" "):
sdp = ""
#Protocol Version ("v=") https://tools.ietf.org/html/rfc4566#section-5.1 (always 0 for us)
sdp += "v=0\r\n"
#Origin ("o=") https://tools.ietf.org/html/rfc4566#section-5.2
username = "-"
sess_id = int(time.time())
sess_version = 0
nettype = "IN"
addrtype = "IP4"
sdp += "o=" + username + " " + str(sess_id) + " " + str(sess_version) + " " + nettype + " " + addrtype + " " + ip + "\r\n"
#Session Name ("s=") https://tools.ietf.org/html/rfc4566#section-5.3
sdp += "s=" + session_description + "\r\n"
#Connection Information ("c=") https://tools.ietf.org/html/rfc4566#section-5.7
sdp += "c=" + nettype + " " + addrtype + " " + ip + "\r\n"
#Timing ("t=") https://tools.ietf.org/html/rfc4566#section-5.9
sdp += "t=0 0\r\n"
#Media Descriptions ("m=") https://tools.ietf.org/html/rfc4566#section-5.14
sdp += "m=audio " + str(audio_port) + " RTP/AVP"
for rtp_profile in rtp_profiles:
sdp += " " + str(rtp_profile)
sdp += "\r\n"
sdp += "a=sendrecv\r\n"
return sdp |
class CuboidParams():
def __init__(self):
# TODO: put all the relevant parameters at this single place here!
# PARAMS FOR THE APROACH PRIMITIVE
self.approach_grasp_xy = [0.1, 0.1, 0.09, 0.036195386201143265]
self.approach_grasp_h = [0.1, 0.0, 0.0, 0.0]
self.approach_duration = [500, 1000, 1500, 2000]
self.approach_clip_pos = [0.03,0.03,0.03,0.015]
# PARAMS FOR THE RESET PRIMITIVE
self.reset_grasp_xy = [0.1,0.2]
self.reset_grasp_h = [0.0,0.05]
self.reset_duration = [200,400]
self.reset_clip_pos = [0.03,0.015]
# PARAMS FOR THE GRASP FLOOR PRIMITIVE:
self.grasp_xy = -0.002748661208897829
self.grasp_h = -0.004752709995955229
self.gain_xy_pre = 431.3075256347656
self.gain_z_pre = 350.3428955078125
self.init_dur = 100
self.gain_xy_final = 952.9378662109375
self.gain_z_final = 1364.1202392578125
self.pos_gain_impedance = 0.06652811169624329
self.force_factor = 0.613079309463501
class CubeLvl1Params():
def __init__(self,env):
# TODO: put all the relevant parameters at this single place here!
# PARAMS FOR THE APROACH PRIMITIVE
self.approach_grasp_xy = [0.1, 0.1, 0.09, 0.036195386201143265]
self.approach_grasp_h = [0.1, 0.0, 0.0, 0.0]
self.approach_duration = [250, 500, 750, 1000]
self.approach_clip_pos = [0.03,0.03,0.03,0.015]
# PARAMS FOR THE RESET PRIMITIVE
self.reset_grasp_xy = [0.1,0.2]
self.reset_grasp_h = [0.0,0.05]
self.reset_duration = [200,400]
self.reset_clip_pos = [0.03,0.015]
# PARAMS FOR THE GRASP FLOOR PRIMITIVE:
self.grasp_xy_floor = -0.002748661208897829-0.04
self.grasp_h_floor = 0.0
self.gain_xy_pre_floor = 294.8115234375
self.gain_z_pre_floor = 249.4430389404297
self.init_dur_floor = 100
if (env.simulation):
self.gain_xy_final_floor = 693.0245697498322
self.gain_z_final_floor = 744.9281394481659
self.pos_gain_impedance_floor = 0.015
else:
self.gain_xy_final_floor = 294.8115234375
self.gain_z_final_floor = 249.4430389404297
self.pos_gain_impedance_floor = 0.032040052115917206
self.force_factor_floor = 0.613079309463501
class CubeLvl2Params():
def __init__(self, env):
# TODO: put all the relevant parameters at this single place here!
# PARAMS FOR THE APROACH PRIMITIVE
self.approach_grasp_xy = [0.15, 0.15, 0.15, 0.09, 0.05, 0.0] #0.036195386201143265
self.approach_grasp_h = [0.15, 0.1 , 0.0, 0.0, 0.0, 0.0]
self.approach_duration = [250, 500, 750, 850, 1100, 1250]#[500, 1000, 1500, 2000]
self.approach_clip_pos = [0.03,0.03,0.03,0.03,0.015,0.015]
# PARAMS FOR THE RESET PRIMITIVE
self.reset_grasp_xy = [0.1,0.2]
self.reset_grasp_h = [0.0,0.05]
self.reset_duration = [200,400]
self.reset_clip_pos = [0.03,0.015]
# PARAMS FOR THE LIFT PRIMITIVE:
if (env.simulation):
self.grasp_normal_int_gain = 0.007945765256881714
self.grasp_xy_lift = -0.002748661208897829-0.04+0.0034238076210021985 + 0.019999999999999997#-0.02229524075984955
self.grasp_h_lift = 0.0+0.02 -0.02838870149105787#-0.012001985907554625
else:
self.grasp_normal_int_gain = 0.007708187699317932
self.grasp_xy_lift = -0.002748661208897829-0.04 + 0.019999999999999997
self.grasp_h_lift = 0.0 -0.017401267290115353
self.gain_xy_pre_lift = 294.8115234375
self.gain_z_pre_lift = 249.4430389404297
self.init_dur_lift = 100
self.gain_xy_ground_lift = 294.8115234375
self.gain_z_ground_lift = 249.4430389404297
self.pos_gain_impedance_ground_lift = 0.032040052115917206
# TODO: might want to consider changing this also on the real platform,....
self.switch_mode_lift = 0.02
self.clip_height_lift = 0.0881337970495224
if (env.simulation):
self.switch_mode_lift = 0.035
self.gain_xy_ground_lift = 693.0245697498322
self.gain_z_ground_lift = 744.9281394481659
self.pos_gain_impedance_ground_lift = 0.032040052115917206
self.gain_xy_lift_lift = 693.0245697498322#906.2994122505188 #400.0
self.gain_z_lift_lift = 744.9281394481659#762.2084140777588 #722.6458370685577
self.pos_gain_impedance_lift_lift = 0.01481801524758339#0.015682869404554364 #0.06
self.force_factor_lift = 0.32288771867752075#0.7260927557945251 #0.7495793342590333
else:
self.gain_xy_lift_lift = 752.3177862167358
self.gain_z_lift_lift = 1158.7008893489838
self.pos_gain_impedance_lift_lift = 0.02216305151581764
self.force_factor_lift = 0.6339841365814209
class CubeLvl4Params():
def __init__(self, env):
# TODO: put all the relevant parameters at this single place here!
# PARAMS FOR THE APROACH PRIMITIVE
self.approach_grasp_xy = [0.15, 0.15, 0.15, 0.09, 0.05, 0.0] #0.036195386201143265
self.approach_grasp_h = [0.15, 0.1 , 0.0, 0.0, 0.0, 0.0]
self.approach_duration = [150, 300, 450, 600, 750, 900]#[250, 500, 750, 850, 1100, 1250]#[500, 1000, 1500, 2000]
self.approach_clip_pos = [0.03,0.03,0.03,0.03,0.015,0.015]
# PARAMS FOR THE RESET PRIMITIVE
self.reset_grasp_xy = [0.05,0.1, 0.1, 0.1, 0.1, 0.1, 0.15]
self.reset_grasp_h = [0.0, 0.0, 0.05,0.1, 0.15,0.2, 0.15]
self.reset_duration = [50, 100, 150, 200, 250, 300, 350]#[200,400,600]
self.reset_clip_pos = [0.015,0.015,0.015, 0.015,0.015, 0.03, 0.03]
# # PARAMS FOR THE RESET PRIMITIVE
# self.reset_grasp_xy = [0.1,0.2,0.2]
# self.reset_grasp_h = [0.0,0.05,0.2]
# self.reset_duration = [100,200,350]#[200,400,600]
# self.reset_clip_pos = [0.03,0.015,0.015]
# PARAMS FOR THE GRASP FLOOR PRIMITIVE:
self.grasp_xy_floor = -0.002748661208897829-0.04
self.grasp_h_floor = 0.0
self.gain_xy_pre_floor = 294.8115234375
self.gain_z_pre_floor = 249.4430389404297
self.init_dur_floor = 100
self.gain_xy_final_floor = 294.8115234375
self.gain_z_final_floor = 249.4430389404297
self.pos_gain_impedance_floor = 0.032040052115917206
self.force_factor_floor = 0.613079309463501
if (env.simulation):
self.grasp_xy_floor = -0.002748661208897829-0.04+0.0034238076210021985 + 0.019999999999999997
self.grasp_h_floor = 0.0+0.02 -0.02838870149105787
self.gain_xy_pre_floor = 294.8115234375
self.gain_z_pre_floor = 249.4430389404297
self.init_dur_floor = 100
self.gain_xy_final_floor = 693.0245697498322
self.gain_z_final_floor = 744.9281394481659
self.pos_gain_impedance_floor = 0.015#0.032040052115917206
self.force_factor_floor = 0.613079309463501
# PARAMS FOR THE LIFT PRIMITIVE:
self.grasp_xy_lift = -0.002748661208897829-0.04
self.grasp_h_lift = 0.0
self.gain_xy_pre_lift = 294.8115234375
self.gain_z_pre_lift = 249.4430389404297
self.init_dur_lift = 100
self.gain_xy_ground_lift = 294.8115234375
self.gain_z_ground_lift = 249.4430389404297
self.pos_gain_impedance_ground_lift = 0.032040052115917206
self.switch_mode_lift = 0.02
self.gain_xy_lift_lift = 725.27587890625
self.gain_z_lift_lift = 915.2654418945312
self.pos_gain_impedance_lift_lift = 0.024435650557279587
self.clip_height_lift = 0.0881337970495224
self.force_factor_lift = 0.613079309463501
#PARAMS ROTATE GROUND
# NOTE: THOSE REAL PARAMS ARE NOT FULLY SET YET,...
if (env.simulation):
self.grasp_xy_rotate_ground = 0.0022661592811346054 - 0.04 + 0.0012392520904541023#-0.007273907661437987#+ 0.0012835693359374983#+ 0.00017907857894897475
self.grasp_h_rotate_ground = -0.0036394810304045677 + 0.006026389598846434 #0.009729766845703126#0.0013717865943908697#+ 0.006139404773712158
else:
self.grasp_xy_rotate_ground = 0.0022661592811346054 - 0.02 -0.004453001022338867#3+ 0.008769088983535766#+ 0.015980666875839232
self.grasp_h_rotate_ground = 0.0#-0.0036394810304045677 + 0.011232354640960694#-0.00455500602722168
self.gain_xy_pre_rotate_ground = 294.8115234375
self.gain_z_pre_rotate_ground = 249.4430389404297
self.init_dur_rotate_ground = 100
if (env.simulation):
self.gain_xy_final_rotate_ground = 556.7359089851379#803.606367111206#400.0#562.4382019042969
self.gain_z_final_rotate_ground = 1211.8926048278809#773.1327414512634#1124.0761876106262#1127.987027168274
self.pos_gain_impedance_rotate_ground = 0.0326704466342926#0.03204612851142883#0.03113249659538269#0.03760003924369812
self.force_factor_rotate_ground = 0.9432543694972992#1.11380175948143#0.3808450520038605#0.21552527211606504 # perhaps use 0.6 instead of 1.11
self.force_factor_ground_rot_rotate_ground = 0.2#0.14088733196258546#0.2#0.12187141180038452
self.gain_scheduling = 20.0
else:
self.gain_xy_final_rotate_ground = 736.6782665252686#499.13966059684753#605.1593139767647#704.6562671661377#611.3788452148438
self.gain_z_final_rotate_ground = 1000.9935677051544#1197.0452547073364#1243.352460861206#988.0030393600464#600.0
self.pos_gain_impedance_rotate_ground = 0.026462331414222717#0.013593301996588706#0.02067116618156433#0.011825856864452361#0.0#0.024435650557279587
self.force_factor_rotate_ground = 0.6#0.42808679342269895#0.38862146139144893#0.3449445694684983#0.61
self.force_factor_ground_rot_rotate_ground = 0.16#0.13#0.2#0.2#0.163668292760849#0.15#0.07469592690467834#0.05#0.02#0.075
self.gain_scheduling = 20.0
# PARAMS ROTATE LIFT
if (env.simulation):
self.grasp_xy_rotate_lift = 0.0022661592811346054 - 0.04 + 0.00303934335708618#-0.02
self.grasp_h_rotate_lift = 0.0 -0.017098468542099 #-0.00025067448616027804
else:
self.grasp_xy_rotate_lift = 0.0022661592811346054 - 0.04 -0.00622488021850586#+ 0.009622406959533692
self.grasp_h_rotate_lift = 0.0 + 0.014247066974639896#+ 0.005761625766754149
self.gain_xy_pre_rotate_lift = 294.8115234375
self.gain_z_pre_rotate_lift = 249.4430389404297
self.init_dur_rotate_lift = 100
if (env.simulation):
self.gain_xy_final_rotate_lift = 1007.8075528144836#1294.4734156131744
self.gain_z_final_rotate_lift = 1220.6658482551575#1028.2374143600464
self.pos_gain_impedance_rotate_lift = 0.02185524344444275#0.01539862297475338
self.force_factor_rotate_lift = 0.6349194526672364#1.0
self.force_factor_center_rotate_lift = 0.6068137288093567
self.force_factor_rot_rotate_lift = 0.02#0.34193327307701116
self.target_height_rot_lift_rotate_lift = 0.09
else:
self.gain_xy_final_rotate_lift = 940.2498722076416#820.5114454030991
self.gain_z_final_rotate_lift = 1295.9117591381073#1133.7310016155243
self.pos_gain_impedance_rotate_lift = 0.026721017956733706#0.02095555767416954
self.force_factor_rotate_lift = 0.886374345421791#1.0
self.force_factor_center_rotate_lift = 0.6068137288093567
self.force_factor_rot_rotate_lift = 0.01#0.02
self.target_height_rot_lift_rotate_lift = 0.09
self.clip_height_rotate_lift = 0.0881337970495224
# PARAMS FOR THE LIFT PRIMITIVE UNDER KEEPING ORIENTATION
self.orient_int_orient_gain = 0.019696855545043947#0.0
self.orient_int_pos_gain = 0.009067282676696778#0.0
if (env.simulation):
self.orient_int_orient_gain = 0.0005 #0.00127358540892601#
self.orient_int_pos_gain = 0.0051201748847961425#0.005
self.orient_grasp_xy_lift = -0.002748661208897829-0.04+0.0034238076210021985 + 0.019999999999999997#-0.02229524075984955
self.orient_grasp_h_lift = 0.0+0.02 -0.02838870149105787#-0.012001985907554625
else:
self.orient_grasp_xy_lift = -0.002748661208897829-0.04 + 0.012545742988586423
self.orient_grasp_h_lift = 0.0 + 0.002259004116058349
self.orient_gain_xy_pre_lift = 294.8115234375
self.orient_gain_z_pre_lift = 249.4430389404297
self.orient_init_dur_lift = 100
self.orient_gain_xy_ground_lift = 294.8115234375
self.orient_gain_z_ground_lift = 249.4430389404297
self.orient_pos_gain_impedance_ground_lift = 0.032040052115917206
self.orient_switch_mode_lift = 0.035
if (env.simulation):
self.orient_pos_gain_impedance_ground_lift = 0.01#0.0125
self.orient_gain_xy_ground_lift = 693.0245697498322
self.orient_gain_z_ground_lift = 744.9281394481659
self.orient_switch_mode_lift = 0.05
self.orient_gain_xy_lift_lift = 700#842.1571612358093#672.3513275384903#829.543000459671
self.orient_gain_z_lift_lift = 750#1378.0078768730164#1425.8808374404907#1101.177817583084
self.orient_pos_gain_impedance_lift_lift = 0.0125#0.01641496576368809#0.015692157968878746#0.0197468575835228#0.015233442336320877
self.orient_clip_height_lift = 0.0881337970495224
self.orient_force_factor_lift = 0.59#0.35#0.5970059156417846#0.4772495344281197#0.5929181218147278
self.orient_force_factor_rot_lift = 0.004292513728141785#0.0001#0.0014684607088565826#0.0005483480170369149#0.004665868282318115
else:
self.orient_gain_xy_lift_lift = 600.0
self.orient_gain_z_lift_lift = 900.0
self.orient_pos_gain_impedance_lift_lift = 0.020851443633437158
self.orient_clip_height_lift = 0.0881337970495224
self.orient_force_factor_lift = 0.35
self.orient_force_factor_rot_lift = 0.0017775391042232514#0.0011627759784460067
self.orient_force_factor_rot_ground = 0.0 |
from collections import defaultdict
def profile(count_matrix: Dict) -> Dict:
"""Generate the profile matrix of the given count matrix
Arguments:
count_matrix {Dict} -- motif count matrix
Returns:
Dict -- profile matrix of the counts matrix
Example:
>>> count_matrix = {'A': [2, 2, 0, 0, 0, 0, 9, 1, 1, 1, 3, 0],
... 'C': [1, 6, 0, 0, 0, 0, 0, 4, 1, 2, 4, 6],
... 'G': [0, 0, 10, 10, 9, 9, 1, 0, 0, 0, 0, 0],
... 'T': [7, 2, 0, 0, 1, 1, 0, 5, 8, 7, 3, 4]}
>>> profile_mat = profile(count_matrix)
>>> for k, v in profile_mat.items():
... print(k, v)
A [0.2, 0.2, 0.0, 0.0, 0.0, 0.0, 0.9, 0.1, 0.1, 0.1, 0.3, 0.0]
C [0.1, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4, 0.1, 0.2, 0.4, 0.6]
G [0.0, 0.0, 1.0, 1.0, 0.9, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0]
T [0.7, 0.2, 0.0, 0.0, 0.1, 0.1, 0.0, 0.5, 0.8, 0.7, 0.3, 0.4]
"""
# the number of motifs is the sum of all counts in any column - since all columns should be derived
# from the same number of motifs, simply sum the first column to get the number of motifs
n_motifs = sum([count_matrix['A'][0], count_matrix['C'][0], count_matrix['G'][0], count_matrix['T'][0]])
profile = defaultdict(list)
for k, v in count_matrix.items():
profile[k] = list(map(lambda x: x / n_motifs, v))
return dict(profile) |
"""Models for ripper app."""
import urllib.parse
import urllib.request
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import requests
import re
from dateutil.parser import parse
from dateutil.tz import gettz
from django.db import models
from django.urls import reverse as reverse_url
from django.core.validators import URLValidator
from django.utils import timezone
from .rip_tools import adjust_anchors
from .rip_tools import remove_tags
from .rip_tools import TargetTag
from .rip_tools import get_url_domain
from .rip_tools import url_is_valid
from .exceptions import URLDoesNotExist
TZINFOS = {
"EST": gettz("US/Eastern"),
"est": gettz("US/Eastern"),
"ET": gettz("US/Eastern"),
"et": gettz("US/Eastern"),
}
PARAGRAPHS_AND_HEADERS = ["p", "h1", "h2", "h3", "h4", "h5", "h6"]
def get_model_for_url(url):
DOMAIN_TO_CLASS = {
"cnn.com": CNNArticle,
"nytimes.com": NYTArticle,
"washingtonpost.com": WAPOArticle,
"politico.com": PoliticoArticle,
"thehill.com": HillArticle,
}
domain = get_url_domain(url)
return DOMAIN_TO_CLASS.get(domain, Article)
class ArticleQuerySet(models.QuerySet):
"""Custom QuerySet for Article model."""
def get_by_url(self, url):
"""Gets an Article instance using the passed URL."""
try:
# Attempt to find the URL with no schema first, which should be more common
article = self.get(url_no_schema=url)
except self.model.DoesNotExist:
# If that doesn't work, try .get with raw URL
# If exception raises from here, let the calling method handle it.
article = self.get(url=url)
return article
class ArticleManager(models.Manager):
"""Custom Manager for Article model."""
def get_queryset(self):
return ArticleQuerySet(self.model, using=self._db)
def get_by_url(self, url):
return self.get_queryset().get_by_url(url)
class Article(models.Model):
"""Content for an Article, including logic to grab its contents via URL
and produce output to be displayed within the site.
"""
objects = ArticleManager()
_REMOVE_TAGS_BASE = ["figure", "img", "aside", "script"]
REMOVE_TAGS = []
# Article contents
title = models.CharField(max_length=255, null=True, blank=True)
summary = models.TextField(null=True, blank=True)
byline = models.TextField(null=True, blank=True)
time_str = models.CharField(max_length=255, null=True, blank=True)
body_content = models.TextField(null=True, blank=True)
article_time = models.DateTimeField(null=True, blank=True)
manual = models.BooleanField(default=False)
# URL and Domain details
url = models.TextField(null=True, blank=True, validators=[URLValidator()])
url_no_schema = models.TextField(null=True, blank=True)
RIP_TYPE_UNKNOWN = "UNKNOWN"
RIP_TYPE_CNN = "CNN"
RIP_TYPE_NYT = "NYT"
RIP_TYPE_WAPO = "WAPO"
RIP_TYPE_POLITICO = "POLITICO"
RIP_TYPE_HILL = "HILL"
RIP_TYPE_CHOICES = (
(RIP_TYPE_UNKNOWN, "<unknown>"),
(RIP_TYPE_CNN, "CNN"),
(RIP_TYPE_NYT, "New York Times"),
(RIP_TYPE_WAPO, "Washington Post"),
(RIP_TYPE_POLITICO, "Politico"),
(RIP_TYPE_HILL, "The Hill"),
)
rip_type = models.CharField(
max_length=9, choices=RIP_TYPE_CHOICES, default=RIP_TYPE_UNKNOWN
)
# Timing
rip_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.rip_type} - {self.title}"
@property
def time_str_for_template(self):
return f'"{self.time_str}"'
def get_absolute_url(self):
return reverse_url("ripper:article_detail", args=(self.url_no_schema,))
def parse_url(self):
self.url_no_schema = re.sub(r"^https?://", "", self.url)
def process(self):
"""Defines all steps to fully process this article,
from the URL to final content.
"""
self.parse_url()
soup = self.get_the_soup()
# Remove the base tags all articles should not have...
remove_tags(soup, *self._REMOVE_TAGS_BASE)
# ...then specific tags defined for an article subclass
remove_tags(soup, *self.REMOVE_TAGS)
self.extract_content(soup)
self.parse_time_str()
def parse_time_str(self):
"""Uses dateutil to attempt to parse the time_str into a datetime object."""
if not self.time_str:
# Short circuit
return
test_str = self.time_str.lower()
if "update" in test_str:
test_str = re.search(r"updated?(.*)$", test_str).group(1)
test_str = test_str.replace(" et", " est").replace(" est", " EST")
try:
parsed = parse(test_str, tzinfos=TZINFOS)
except ValueError as exc:
print(exc)
return
self.article_time = parsed
def get_the_soup(self):
if self.url is None:
# Early exit
return
resp = requests.get(self.url, proxies=urllib.request.getproxies())
http_encoding = (
resp.encoding
if "charset" in resp.headers.get("content-type", "").lower()
else None
)
html_encoding = EncodingDetector.find_declared_encoding(
resp.content, is_html=True
)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, "lxml", from_encoding=encoding)
adjust_anchors(soup)
return soup
def extract_content(self, soup):
"""Extracts article contents.
Method should be overwritten by subclassed proxy models,
with a `super()` call to get shared elements.
"""
self.title = soup.title.text
@property
def summary_part(self):
if self.summary:
return self.summary
soup = BeautifulSoup(self.body_content, "lxml")
return soup.find_all("p")[0].text
def copy_article(self, other_article):
"""Given another Article instance, copy relevant article details
to this instance.
Useful when re-processing an article: a temporary one is generated, but not
saved in the database.
"""
# List of fields that should be copied from `instance`.
COPYABLE_FIELDS = [
"title",
"summary",
"byline",
"time_str",
"body_content",
"article_time",
"rip_type",
]
for field in COPYABLE_FIELDS:
setattr(self, field, getattr(other_article, field))
self.rip_time = timezone.now()
class NYTArticle(Article):
"""Article from New York Times."""
class Meta:
proxy = True
def extract_content(self, soup):
"""Extraction logic for New York Times articles."""
super().extract_content(soup)
self.rip_type = self.RIP_TYPE_NYT
self.title = soup.find("h1", {"itemprop": "headline"}).text
summary_tag = soup.find("p", {"id": "article-summary"})
if summary_tag:
self.summary = summary_tag.text
# Pick out sections in the body that we want
body_section = soup.find("section", {"name": "articleBody"})
body_content = []
for div in body_section.find_all("div", {"class": "StoryBodyCompanionColumn"}):
body_content.extend([x for x in div.find_all(PARAGRAPHS_AND_HEADERS)])
self.body_content = "\n".join([str(x) for x in body_content])
# Byline. Reserve the tag in order to find time data relative to it.
byline_tag = soup.find("p", {"itemprop": "author"})
self.byline = byline_tag.text
# Find the time portions relative to the byline.
time_tags = None
for parent in byline_tag.parents:
time_tags = parent.find_all("time")
if time_tags:
break
if time_tags is not None:
self.time_str = " ".join([x.text for x in time_tags])
class WAPOArticle(Article):
"""Article from the Washington Post."""
class Meta:
proxy = True
REMOVE_TAGS = [
# These appear to all be wrappers for advertisements, and occasionally for figures.
TargetTag("div", {"class": "cb"}),
# Interstitials are used for links to other articles, breaking the article flow.
TargetTag("p", {"class": "interstitial"}),
]
def extract_content(self, soup):
"""Extraction logic for Washington Post articles."""
super().extract_content(soup)
self.rip_type = self.RIP_TYPE_WAPO
# Main parts
self.title = soup.find("h1", {"class": "font--headline"}).text
self.body_content = soup.find("div", {"class": "article-body"}).prettify()
# There's a lot of extra text inside the author-names section, mainly tooltip info.
# To get the simpler name output, we have to build it manually.
authors = soup.find("div", {"class": "author-names"})
byline = "By "
author_names = []
for author in authors.find_all("span", {"class": "author-name"}):
author_names.append(author.text)
self.byline = byline + (
", ".join(author_names) if author_names else "<Unknown>"
)
self.time_str = soup.find("div", {"class": "display-date"}).text
class CNNArticle(Article):
"""Article from CNN."""
class Meta:
proxy = True
REMOVE_TAGS = [
# Shit-ton of ad embeds.
TargetTag("div", {"class": "ad"}),
TargetTag("div", {"class": "el__embedded"}),
# Instagram shit
TargetTag("div", {"class": "el__leafmedia--instagram-aside"}),
# "Read more" sections at the end
TargetTag("div", {"class": "zn-body__read-more-outbrain"}),
TargetTag("div", {"class": "zn-body__read-more"}),
# More ad bullshit
TargetTag("ul", {"class": "cn-zoneAdContainer"}),
]
def extract_content(self, soup):
"""Extraction logic for CNN articles."""
super().extract_content(soup)
self.rip_type = self.RIP_TYPE_CNN
# CNN puts all their paragraphs in `div.zn-body__paragraph` tags, which is dumb
# Replace each one with a simpler `p` tag, by changing that tag's name
# (the class is irrelevant, as we don't make changes based on it)
for graf in soup.find_all("div", {"class": "zn-body__paragraph"}):
graf.name = "p"
# Main parts
self.title = soup.find("h1", {"class": "pg-headline"}).text
self.summary = ""
self.body_content = soup.find("section", {"id": "body-text"}).prettify()
# Byline. Reserve the tag in order to find time data relative to it.
self.byline = soup.find("span", {"class": "metadata__byline__author"}).text
self.time_str = soup.find("p", {"class": "update-time"}).text
class PoliticoArticle(Article):
"""Article from Politico."""
class Meta:
proxy = True
REMOVE_TAGS = [
# The topbar sits in the middle of stuff for some reason.
# Probably not going to end up grabbing it, but just in case.
TargetTag("div", {"class": "pop-up-bar"}),
# Some sections that look like story content are "below" and "comments" sections.
TargetTag("section", {"class": "below-article-section"}),
TargetTag("section", {"class": "comments-section"}),
# Some "section"s are used for ads.
"section[data-ad-section]",
# Literally an "ad" div. Good job.
TargetTag("div", {"class": "ad"}),
]
def extract_content(self, soup):
"""Extraction logic for Politico articles."""
super().extract_content(soup)
self.rip_type = self.RIP_TYPE_POLITICO
# Title
self.title = (
soup.find("h2", {"class": "headline"})
or soup.find("span", {"itemprop": "headline"})
).text
# Summary
summary = soup.find("p", {"class": "dek"})
if summary is not None:
self.summary = summary.text
# Body
body_content = soup.select("p.story-text__paragraph") or soup.select(
"p:not(.byline):not(.timestamp)"
)
self.body_content = "\n".join([str(x) for x in body_content])
# Byline
self.byline = (
soup.find("p", {"class": "story-meta__authors"})
or soup.find("p", {"class": "byline"})
).text
# Time string
self.time_str = (
soup.find("p", {"class": "story-meta__timestamp"})
or soup.find("time", {"itemprop": "datePublished"})
).text
class HillArticle(Article):
"""Article from The Hill."""
REMOVE_TAGS = [TargetTag("span", {"class": "rollover-people-block"})]
class Meta:
proxy = True
def extract_content(self, soup):
"""Extraction logic for The Hill articles."""
super().extract_content(soup)
self.rip_type = self.RIP_TYPE_HILL
self.title = soup.find("h1", {"id": "page-title"}).text
# Byline and timestr are joined in the same element.
# Split out the time_str, then use that to strip it from the byline text
byline = soup.find("span", {"class": "submitted-by"})
self.time_str = byline.find("span", {"class": "submitted-date"}).text
self.byline = byline.text.replace(self.time_str, "").strip("- ")
# Most of the body content can be found in `p` tags,
# but a summary may be available in a preceding `div`,
# which can mess things up.
# Fortunately, it appears to all be wrapped up in some deep-nested
# `div.field-item` tag beneath `div.field-name-body`. So we'll look there.
body = soup.select("article.node-article div.field-name-body div.field-item")[0]
self.summary = body.select("div:first-child")[0].text
# body = soup.find("article", {"class": "node-article"})
self.body_content = "\n".join(
[str(x) for x in body.find_all(PARAGRAPHS_AND_HEADERS)]
)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test PauliSumOp """
import unittest
from itertools import product
from test.python.opflow import QiskitOpflowTestCase
import numpy as np
from scipy.sparse import csr_matrix
from qiskit import QuantumCircuit, transpile
from qiskit.circuit import Parameter, ParameterVector
from qiskit.opflow import (
CX,
CircuitStateFn,
DictStateFn,
H,
I,
One,
OperatorStateFn,
PauliSumOp,
SummedOp,
X,
Y,
Z,
Zero,
)
from qiskit.quantum_info import Pauli, PauliTable, SparsePauliOp
class TestPauliSumOp(QiskitOpflowTestCase):
"""PauliSumOp tests."""
def test_construct(self):
"""constructor test"""
sparse_pauli = SparsePauliOp(Pauli("XYZX"), coeffs=[2.0])
coeff = 3.0
pauli_sum = PauliSumOp(sparse_pauli, coeff=coeff)
self.assertIsInstance(pauli_sum, PauliSumOp)
self.assertEqual(pauli_sum.primitive, sparse_pauli)
self.assertEqual(pauli_sum.coeff, coeff)
self.assertEqual(pauli_sum.num_qubits, 4)
def test_add(self):
"""add test"""
pauli_sum = 3 * X + Y
self.assertIsInstance(pauli_sum, PauliSumOp)
expected = PauliSumOp(3.0 * SparsePauliOp(Pauli("X")) + SparsePauliOp(Pauli("Y")))
self.assertEqual(pauli_sum, expected)
pauli_sum = X + Y
summed_op = SummedOp([X, Y])
self.assertEqual(pauli_sum, summed_op)
a = Parameter("a")
b = Parameter("b")
actual = a * PauliSumOp.from_list([("X", 2)]) + b * PauliSumOp.from_list([("Y", 1)])
expected = SummedOp(
[PauliSumOp.from_list([("X", 2)], a), PauliSumOp.from_list([("Y", 1)], b)]
)
self.assertEqual(actual, expected)
def test_mul(self):
"""multiplication test"""
target = 2 * (X + Z)
self.assertEqual(target.coeff, 1)
self.assertListEqual(target.primitive.to_list(), [("X", (2 + 0j)), ("Z", (2 + 0j))])
target = 0 * (X + Z)
self.assertEqual(target.coeff, 0)
self.assertListEqual(target.primitive.to_list(), [("X", (1 + 0j)), ("Z", (1 + 0j))])
beta = Parameter("β")
target = beta * (X + Z)
self.assertEqual(target.coeff, 1.0 * beta)
self.assertListEqual(target.primitive.to_list(), [("X", (1 + 0j)), ("Z", (1 + 0j))])
def test_adjoint(self):
"""adjoint test"""
pauli_sum = PauliSumOp(SparsePauliOp(Pauli("XYZX"), coeffs=[2]), coeff=3)
expected = PauliSumOp(SparsePauliOp(Pauli("XYZX")), coeff=6)
self.assertEqual(pauli_sum.adjoint(), expected)
pauli_sum = PauliSumOp(SparsePauliOp(Pauli("XYZY"), coeffs=[2]), coeff=3j)
expected = PauliSumOp(SparsePauliOp(Pauli("XYZY")), coeff=-6j)
self.assertEqual(pauli_sum.adjoint(), expected)
pauli_sum = PauliSumOp(SparsePauliOp(Pauli("X"), coeffs=[1]))
self.assertEqual(pauli_sum.adjoint(), pauli_sum)
pauli_sum = PauliSumOp(SparsePauliOp(Pauli("Y"), coeffs=[1]))
self.assertEqual(pauli_sum.adjoint(), pauli_sum)
pauli_sum = PauliSumOp(SparsePauliOp(Pauli("Z"), coeffs=[1]))
self.assertEqual(pauli_sum.adjoint(), pauli_sum)
pauli_sum = (Z ^ Z) + (Y ^ I)
self.assertEqual(pauli_sum.adjoint(), pauli_sum)
def test_equals(self):
"""equality test"""
self.assertNotEqual((X ^ X) + (Y ^ Y), X + Y)
self.assertEqual((X ^ X) + (Y ^ Y), (Y ^ Y) + (X ^ X))
theta = ParameterVector("theta", 2)
pauli_sum0 = theta[0] * (X + Z)
pauli_sum1 = theta[1] * (X + Z)
expected = PauliSumOp(
SparsePauliOp(Pauli("X")) + SparsePauliOp(Pauli("Z")),
coeff=1.0 * theta[0],
)
self.assertEqual(pauli_sum0, expected)
self.assertNotEqual(pauli_sum1, expected)
def test_tensor(self):
"""Test for tensor operation"""
with self.subTest("Test 1"):
pauli_sum = ((I - Z) ^ (I - Z)) + ((X - Y) ^ (X + Y))
expected = (I ^ I) - (I ^ Z) - (Z ^ I) + (Z ^ Z) + (X ^ X) + (X ^ Y) - (Y ^ X) - (Y ^ Y)
self.assertEqual(pauli_sum, expected)
with self.subTest("Test 2"):
pauli_sum = (Z + I) ^ Z
expected = (Z ^ Z) + (I ^ Z)
self.assertEqual(pauli_sum, expected)
with self.subTest("Test 3"):
pauli_sum = Z ^ (Z + I)
expected = (Z ^ Z) + (Z ^ I)
self.assertEqual(pauli_sum, expected)
def test_permute(self):
"""permute test"""
pauli_sum = PauliSumOp(SparsePauliOp((X ^ Y ^ Z).primitive))
expected = PauliSumOp(SparsePauliOp((X ^ I ^ Y ^ Z ^ I).primitive))
self.assertEqual(pauli_sum.permute([1, 2, 4]), expected)
def test_compose(self):
"""compose test"""
target = (X + Z) @ (Y + Z)
expected = 1j * Z - 1j * Y - 1j * X + I
self.assertEqual(target, expected)
observable = (X ^ X) + (Y ^ Y) + (Z ^ Z)
state = CircuitStateFn((CX @ (X ^ H @ X)).to_circuit())
self.assertAlmostEqual((~OperatorStateFn(observable) @ state).eval(), -3)
def test_to_matrix(self):
"""test for to_matrix method"""
target = (Z + Y).to_matrix()
expected = np.array([[1.0, -1j], [1j, -1]])
np.testing.assert_array_equal(target, expected)
def test_str(self):
"""str test"""
target = 3.0 * (X + 2.0 * Y - 4.0 * Z)
expected = "3.0 * X\n+ 6.0 * Y\n- 12.0 * Z"
self.assertEqual(str(target), expected)
alpha = Parameter("α")
target = alpha * (X + 2.0 * Y - 4.0 * Z)
expected = "1.0*α * (\n 1.0 * X\n + 2.0 * Y\n - 4.0 * Z\n)"
self.assertEqual(str(target), expected)
def test_eval(self):
"""eval test"""
target0 = (2 * (X ^ Y ^ Z) + 3 * (X ^ X ^ Z)).eval("000")
target1 = (2 * (X ^ Y ^ Z) + 3 * (X ^ X ^ Z)).eval(Zero ^ 3)
expected = DictStateFn({"110": (3 + 2j)})
self.assertEqual(target0, expected)
self.assertEqual(target1, expected)
phi = 0.5 * ((One + Zero) ^ 2)
zero_op = (Z + I) / 2
one_op = (I - Z) / 2
h1 = one_op ^ I
h2 = one_op ^ (one_op + zero_op)
h2a = one_op ^ one_op
h2b = one_op ^ zero_op
self.assertEqual((~OperatorStateFn(h1) @ phi).eval(), 0.5)
self.assertEqual((~OperatorStateFn(h2) @ phi).eval(), 0.5)
self.assertEqual((~OperatorStateFn(h2a) @ phi).eval(), 0.25)
self.assertEqual((~OperatorStateFn(h2b) @ phi).eval(), 0.25)
pauli_op = (Z ^ I ^ X) + (I ^ I ^ Y)
mat_op = pauli_op.to_matrix_op()
full_basis = ["".join(b) for b in product("01", repeat=pauli_op.num_qubits)]
for bstr1, bstr2 in product(full_basis, full_basis):
self.assertEqual(pauli_op.eval(bstr1).eval(bstr2), mat_op.eval(bstr1).eval(bstr2))
def test_exp_i(self):
"""exp_i test"""
# TODO: add tests when special methods are added
pass
def test_to_instruction(self):
"""test for to_instruction"""
target = ((X + Z) / np.sqrt(2)).to_instruction()
qc = QuantumCircuit(1)
qc.u(np.pi / 2, 0, np.pi, 0)
qc_out = transpile(target.definition, basis_gates=["u"])
self.assertEqual(qc_out, qc)
def test_to_pauli_op(self):
"""test to_pauli_op method"""
target = X + Y
self.assertIsInstance(target, PauliSumOp)
expected = SummedOp([X, Y])
self.assertEqual(target.to_pauli_op(), expected)
def test_getitem(self):
"""test get item method"""
target = X + Z
self.assertEqual(target[0], PauliSumOp(SparsePauliOp(X.primitive)))
self.assertEqual(target[1], PauliSumOp(SparsePauliOp(Z.primitive)))
def test_len(self):
"""test len"""
target = X + Y + Z
self.assertEqual(len(target), 3)
def test_reduce(self):
"""test reduce"""
target = X + X + Z
self.assertEqual(len(target.reduce()), 2)
def test_to_spmatrix(self):
"""test to_spmatrix"""
target = X + Y
expected = csr_matrix([[0, 1 - 1j], [1 + 1j, 0]])
self.assertEqual((target.to_spmatrix() - expected).nnz, 0)
def test_from_list(self):
"""test from_list"""
target = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
expected = (
-1.052373245772859 * (I ^ I)
+ 0.39793742484318045 * (I ^ Z)
- 0.39793742484318045 * (Z ^ I)
- 0.01128010425623538 * (Z ^ Z)
+ 0.18093119978423156 * (X ^ X)
)
self.assertEqual(target, expected)
def test_matrix_iter(self):
"""Test PauliSumOp dense matrix_iter method."""
labels = ["III", "IXI", "IYY", "YIZ", "XYZ", "III"]
coeffs = np.array([1, 2, 3, 4, 5, 6])
table = PauliTable.from_labels(labels)
coeff = 10
op = PauliSumOp(SparsePauliOp(table, coeffs), coeff)
for idx, i in enumerate(op.matrix_iter()):
self.assertTrue(np.array_equal(i, coeff * coeffs[idx] * Pauli(labels[idx]).to_matrix()))
def test_matrix_iter_sparse(self):
"""Test PauliSumOp sparse matrix_iter method."""
labels = ["III", "IXI", "IYY", "YIZ", "XYZ", "III"]
coeffs = np.array([1, 2, 3, 4, 5, 6])
coeff = 10
table = PauliTable.from_labels(labels)
op = PauliSumOp(SparsePauliOp(table, coeffs), coeff)
for idx, i in enumerate(op.matrix_iter(sparse=True)):
self.assertTrue(
np.array_equal(i.toarray(), coeff * coeffs[idx] * Pauli(labels[idx]).to_matrix())
)
if __name__ == "__main__":
unittest.main()
|
from enum import Enum
from random import choices
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Direction(Enum):
Up = 1
Right = 2
Down = 3
Left = 4
class Position():
x = 0
y = 0
direction = Direction.Right
def __init__(self, x, y, direction = Direction.Right):
self.x = x
self.y = y
self.direction = direction
class Step():
direction = Direction.Right
sensedDistance = 0
realPosition = Position(0, 0)
def __init__(self, direction, sensedDistance, position):
self.direction = direction
self.sensedDistance = sensedDistance
self.realPosition = position
N = 10
map = []
epsilon = 1/(N*N*N*N)
# x = row
# y = column
currentPosition = Position(5, 2)
# steps = [Step(direction=Direction.Up, sensedDistance=8, position=Position(5, 5))]
# normal
# steps = [Step(direction=Direction.Right, sensedDistance=0, position=Position(5, 6)),
# Step(direction=Direction.Up, sensedDistance=0, position=Position(0, 6)),
# Step(direction=Direction.Left, sensedDistance=0, position=Position(0, 0)),
# Step(direction=Direction.Down, sensedDistance=5, position=Position(4, 0)), # robot gets kidnapped here
# Step(direction=Direction.Right, sensedDistance=4, position=Position(4, 6)),
# Step(direction=Direction.Down, sensedDistance=1, position=Position(8, 5)),
# Step(direction=Direction.Right, sensedDistance=0, position=Position(8, 9)),
# Step(direction=Direction.Up, sensedDistance=1, position=Position(4, 9))]
# kidnapped
steps = [Step(direction=Direction.Right, sensedDistance=0, position=Position(5, 6)),
Step(direction=Direction.Up, sensedDistance=0, position=Position(0, 6)),
Step(direction=Direction.Left, sensedDistance=0, position=Position(0, 0)), # robot gets kidnapped here
Step(direction=Direction.Down, sensedDistance=0, position=Position(9, 4)),
Step(direction=Direction.Right, sensedDistance=0, position=Position(9, 9)),
Step(direction=Direction.Up, sensedDistance=1, position=Position(4, 9)),
Step(direction=Direction.Left, sensedDistance=5, position=Position(4, 5)),
Step(direction=Direction.Down, sensedDistance=1, position=Position(8, 5))]
def normalize(matrix):
retVal = matrix.copy()
retVal = retVal - retVal.mean()
retVal = retVal / np.abs(retVal).max()
return retVal
def plotData(step, prior, posterior):
global epsilon, map, currentPosition
fig = plt.figure(dpi=500)
# fig.tight_layout()
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 1.07, 1.07])
# =======
# Map
# =======
ax = fig.add_subplot(gs[0])
ax.matshow(map, vmin=0, vmax=1, cmap='Greys')
plt.title('Map', x=0.5, y=1.2)
ticks = np.arange(0, N, 1)
plt.grid(which='major', axis='both', linestyle=':', color='black')
for i in range(len(map)):
for j in range(len(map)):
if currentPosition.x == i and currentPosition.y == j:
ax.text(j, i, "R", ha="center", va="center", color="red", weight='bold')
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(range(0, N))
ax.set_yticklabels(range(0, N))
# =======
# Prior
# =======
ax = fig.add_subplot(gs[1])
data = normalize(prior)
im = ax.matshow(data, vmin=np.min(data), vmax=np.max(data))
plt.title('Prior', x=0.5, y=1.2)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
ticks = np.arange(0, N, 1)
plt.grid(which='major', axis='both', linestyle=':', color='black')
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(range(0, N))
ax.set_yticklabels(range(0, N))
# =======
# Posterior
# =======
ax = fig.add_subplot(gs[2])
data = normalize(posterior)
im = ax.matshow(data, vmin=np.min(data), vmax=np.max(data))
plt.title('Posterior', x=0.5, y=1.2)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax)
ticks = np.arange(0, N, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(range(0, N))
ax.set_yticklabels(range(0, N))
# plt.tight_layout()
plt.show()
fig.savefig(f"step{step}.png")
def calcPrior(oldPosterior, direction):
global epsilon, map
steps = [3, 4, 5, 6, 7]
stepProbability = [0, 0, 0, 0.1, 0.2, 0.4, 0.2, 0.1]
prior = np.full((N, N), 0.0)
# x = row = i
# y = column = j
# for each row
for i in range(0, N):
# for each column
for j in range(0, N):
# horizontal movement
if direction == Direction.Right or direction == Direction.Left:
# if this row contains a wall
if map[i, :].__contains__(1):
# if robot moves to the right
if direction == Direction.Right:
# start from the left side and calc prior up to a wall
for stepSize in steps:
# wall found, restart right from the wall
if map[i, j] == 1:
break
else:
if j - stepSize >= 0:
skip = False
for t in range(0, stepSize + 1):
if j - t >= 0 and map[i, j - t] == 1:
skip = True
break
if not skip:
if j - stepSize >= 0:
prior[i, j] += oldPosterior[i, j - stepSize] * stepProbability[stepSize]
else:
break
# robot moves to the left
else:
# start from the left side and calc prior up to a wall
for stepSize in steps:
# wall found, restart right from the wall
if map[i, j] == 1:
break
else:
if j + stepSize < N:
skip = False
for t in range(0, stepSize + 1):
if j + t < N and map[i, j + t] == 1:
skip = True
break
if not skip:
if j + stepSize < N:
prior[i, j] += oldPosterior[i, j + stepSize] * stepProbability[stepSize]
else:
break
# no wall in this row
else:
# normal calculation
for stepSize in steps:
if direction == Direction.Right:
if j - stepSize >= 0:
prior[i, j] += oldPosterior[i, j - stepSize] * stepProbability[stepSize]
if direction == Direction.Left:
if j + stepSize < N:
prior[i, j] += oldPosterior[i, j + stepSize] * stepProbability[stepSize]
# vertical movement
elif direction == Direction.Down or direction == Direction.Up:
# if this row contains a wall
if map[:, j].__contains__(1):
# robot moves up
if direction == Direction.Up:
# start from the left side and calc prior up to a wall
for stepSize in steps:
# wall found, restart right from the wall
if map[i, j] == 1:
break
else:
if i + stepSize < N:
skip = False
for t in range(0, stepSize + 1):
if i + t < N and map[i + t, j] == 1:
skip = True
break
if not skip:
if i + stepSize < N:
prior[i, j] += oldPosterior[i + stepSize, j] * stepProbability[stepSize]
else:
break
# robot moves down
else:
# start from the left side and calc prior up to a wall
for stepSize in steps:
# wall found, restart right from the wall
if map[i, j] == 1:
break
else:
if i - stepSize >= 0:
skip = False
for t in range(0, stepSize + 1):
if 0 <= i - t and map[i - t, j] == 1:
skip = True
break
if not skip:
if i - stepSize > 0:
prior[i, j] += oldPosterior[i - stepSize, j] * stepProbability[
stepSize]
else:
break
# no wall in this row
else:
# normal calculation
for stepSize in steps:
if direction == Direction.Up:
if i + stepSize < N:
prior[i, j] += oldPosterior[i + stepSize, j] * stepProbability[stepSize]
if direction == Direction.Down:
if i - stepSize >= 0:
prior[i, j] += oldPosterior[i - stepSize, j] * stepProbability[stepSize]
return prior
def calcPosterior(sensorValue, direction, prior):
global epsilon, map
sensorProbability = {
sensorValue - 2: 0.1,
sensorValue - 1: 0.2,
sensorValue: 0.4,
sensorValue + 1: 0.2,
sensorValue + 2: 0.1
}
posterior = np.full((N, N), 0.0)
for i in range(0, N):
for j in range(0, N):
# horizontal movement
if direction == Direction.Right or direction == Direction.Left:
# if this row contains a wall
if map[i, :].__contains__(1):
# if robot moves to the right
if direction == Direction.Right:
# wall found, restart right from the next field
if map[i, j] == 1:
continue
else:
# is there a wall in range?
for k in range(max(0, sensorValue - 2), sensorValue + 2 + 1):
if j + k + 1 == N or (j + k + 1 < N and map[i, j + k + 1] == 1):
posterior[i, j] = prior[i, j] * sensorProbability[k]
# left
elif direction == Direction.Left:
# wall found, restart right from the next field
if map[i, j] == 1:
continue
else:
# is there a wall in range?
for k in range(max(0, sensorValue - 2), sensorValue + 2 + 1):
if j - k - 1 == -1 or (j - k - 1 >= 0 and map[i, j - k - 1] == 1):
posterior[i, j] = prior[i, j] * sensorProbability[k]
# no wall in this row
else:
# normal calculation
if direction == Direction.Right:
for k in range(sensorValue - 2, sensorValue + 2 + 1):
if N > N - k - 1 >= 0:
posterior[i, N - k - 1] = prior[i, N - k - 1] * sensorProbability[k]
if direction == Direction.Left:
for k in range(sensorValue - 2, sensorValue + 2 + 1):
if 0 <= k < N:
posterior[i, k] = prior[i, k] * sensorProbability[k]
#vertical movement
elif direction == Direction.Down or direction == Direction.Up:
# if this column contains a wall
if map[:, j].__contains__(1):
# robot moves up
if direction == Direction.Up:
# wall found, restart right from the next field
if map[i, j] == 1:
continue
else:
# is there a wall in range?
for k in range(max(0, sensorValue - 2), sensorValue + 2 + 1):
if i - k - 1 == -1 or (i - k - 1 >= 0 and map[i - k - 1, j] == 1):
posterior[i, j] = prior[i, j] * sensorProbability[k]
# robot moves down
else:
# wall found, restart right from the next field
if map[i, j] == 1:
continue
else:
# is there a wall in range?
for k in range(max(0, sensorValue - 2), sensorValue + 2 + 1):
if i + k + 1 == N or (i + k + 1 < N and map[i + k + 1, j] == 1):
posterior[i, j] = prior[i, j] * sensorProbability[k]
# no wall in this column
else:
# normal calculation
if direction == Direction.Up:
for k in range(max(0, sensorValue - 2), sensorValue + 2 + 1):
if 0 <= k < N:
posterior[k, j] = prior[k, j] * sensorProbability[k]
if direction == Direction.Down:
for k in range(sensorValue - 2, sensorValue + 2 + 1):
if N > N - k - 1 >= 0:
posterior[N - k - 1, j] = prior[N - k - 1, j] * sensorProbability[k]
posterior[posterior < epsilon] = epsilon
posterior = posterior / np.sum(posterior)
return posterior
def getSensorDerivation():
# z_t
population = [-2, -1, 0, 1, 2]
weights = [0.1, 0.2, 0.4, 0.2, 0.1]
return choices(population, weights)[0]
def getStepSize():
# x_t
population = [3, 4, 5, 6, 7]
weights = [0.1, 0.2, 0.4, 0.2, 0.1]
return choices(population, weights)[0]
def doStep(direction):
global realPos
stepSize = getStepSize()
print(f'Moving {stepSize} in Direction {direction}')
if direction == Direction.Up:
if realPos.y - stepSize < 0:
stepSize = realPos.y
print(f'robot hit upper wall, moved only {stepSize}')
realPos.y = realPos.y - stepSize
elif direction == Direction.Right:
if realPos.x + stepSize > N:
stepSize = N - realPos.x
print(f'robot hit right wall, moved only {stepSize}')
realPos.x = realPos.x + stepSize
elif direction == Direction.Down:
if realPos.y + stepSize > N:
stepSize = N - realPos.y
print(f'robot hit lower wall, moved only {stepSize}')
realPos.y = realPos.y + stepSize
elif direction == Direction.Left:
if realPos.x - stepSize < 0:
stepSize = realPos.x
print(f'robot hit left wall, moved only {stepSize}')
realPos.x = realPos.x - stepSize
def senseDistance(direction):
global realPos
distance = 0
if direction == Direction.Up:
for i in range(1, realPos.y+1):
if realPos.y - i < 0:
break
if map[realPos.x - i, realPos.y] == 0:
distance += 1
else:
break
elif direction == Direction.Right:
for i in range(1, N - realPos.x):
if realPos.x + i > N:
break
if map[realPos.x, realPos.y + i] == 0:
distance += 1
else:
break
elif direction == Direction.Down:
for i in range(1, N - realPos.y):
if realPos.y + i > N:
break
if map[realPos.x, realPos.y + i] == 0:
distance += 1
else:
break
elif direction == Direction.Left:
for i in range(1, realPos.x + 1):
if realPos.x - i < 0:
break
if map[realPos.x - i, realPos.y] == 0:
distance += 1
else:
break
return distance
def main():
global map, currentPosition
map = np.empty((N, N))
map[:] = 0
map[1, 1] = 1
map[1, 8] = 1
map[2, 9] = 1
map[3, 4] = 1
map[5, 7] = 1
map[6, 8] = 1
map[7, 2] = 1
map[7, 3] = 1
map[7, 7] = 1
probabilities = np.full((N, N), 1 / (N * N - np.sum(map)))
i = 0
plotData(i, probabilities, probabilities)
for step in steps:
i += 1
# 1. take random step
# doStep(step.direction)
currentPosition = step.realPosition
# 2. calulate prior
prior = calcPrior(probabilities, step.direction)
# 3. get sensor values
# distance = senseDistance(step.direction) + getSensorDerivation()
distance = step.sensedDistance
# 4. calulate posterior
posterior = calcPosterior(distance, step.direction, prior)
# probabilities[map == 1] = 0
plotData(i, prior, posterior)
probabilities = posterior
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 31 21:42:33 2019
@author: george
"""
import sys
import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.cluster import KMeans
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import BernoulliNB
def logisticRegressionCv2(Xtrain = None, ytrain = None, Xtest = None,
ytest = None, Cs = [10], penalty = 'l1',
solver = 'saga', scoring = 'f1'):
model = LogisticRegressionCV(Cs = Cs, penalty = penalty, random_state = 0,
solver = solver, scoring = scoring)\
.fit(Xtrain, ytrain)
probTrain = model.predict_proba( Xtrain )[:, 1]
probTest = model.predict_proba( Xtest )[:, 1]
params = {'model': model, 'probTrain': probTrain, 'probTest': probTest}
return params, probTest, probTrain
def neural_nets(Xtrain = None, ytrain = None, Xtest = None,
ytest = None, h_l_s = (5, 3, 2), cv = 2,
scoring = 'f1'):
sgd = MLPClassifier( hidden_layer_sizes = h_l_s, early_stopping = True,
random_state = 0)
param_grid = {'alpha' : [0.001, 0.01, 0.1 , 1, 10, 100, 1000, 10000] }
model = GridSearchCV( sgd, param_grid = param_grid,
n_jobs = -1,
scoring = scoring, cv = cv).fit(Xtrain, ytrain)
probTrain = model.predict_proba( Xtrain )[:, 1]
probTest = model.predict_proba( Xtest )[:, 1]
params = {'model': model, 'probTrain': probTrain, 'probTest': probTest}
return params, probTest, probTrain
def kmeansLogRegr( Xtrain = None, ytrain = None, Xtest = None,
ytest = None, Cs = [10], penalty = 'l1',
solver = 'saga', scoring = 'f1', n_clusters = 2,
adaR = 1):
#CLUSTER WITH KMEANS
kmeans = KMeans(n_clusters = n_clusters, random_state = 0).\
fit( np.concatenate(( Xtrain, Xtest ), axis = 0) )
#TAKE THE LABELS
labelsTrain = kmeans.labels_[0: Xtrain.shape[0]]
labelsTest = kmeans.labels_[ Xtrain.shape[0]:]
#TRAIN LOGISTIC REGRESSION
models = []
probTrain = []
probTest = []
for i in np.arange( n_clusters ):
indxTr = np.where(labelsTrain == i)[0]
indxTest = np.where( labelsTest == i)[0]
if adaR == 1:
Csnew = (np.array(Cs)/len(indxTr)).tolist()
params, _, _ = logisticRegressionCv2(Xtrain = Xtrain[indxTr],
ytrain = ytrain[indxTr],
ytest = ytest[indxTest],
Xtest = Xtest[indxTest],
Cs = Csnew, penalty = penalty,
solver = solver, scoring = scoring)
models.append( params['model'] )
probTrain.append( params['probTrain'] )
probTest.append( params['probTest'] )
params = {'models': models,'labelsTrain': labelsTrain,
'labelsTest': labelsTest, 'probTrain': probTrain,
'probTest': probTest}
return params
def randomforests(Xtrain = None, ytrain = None, Xtest = None,
ytest = None, cv = 2, scoring = 'f1'):
"RANDOM FOREST CLASSIFIER"
param_grid = {'n_estimators' : [10, 50, 100, 150, 200, 250, 300, 350,
400, 500, 700, 900] }
forest = RandomForestClassifier()
model = GridSearchCV( forest, param_grid = param_grid,
n_jobs = -1,
scoring = scoring, cv = cv).\
fit(Xtrain, ytrain) #fit model
probTrain = model.predict_proba( Xtrain )[:, 1]
probTest = model.predict_proba( Xtest )[:, 1]
params = {'model': model, 'probTrain': probTrain, 'probTest': probTest}
return params, probTest, probTrain
def xboost(Xtrain = None, ytrain = None, Xtest = None,
ytest = None, cv = 2, scoring = 'f1'):
param_grid = {'n_estimators' : [10, 50, 100, 150, 200, 250, 300, 350,
400, 500, 700, 900]}
ada = AdaBoostClassifier()
model = GridSearchCV( ada, param_grid = param_grid,
n_jobs = -1,
scoring = scoring, cv = cv).\
fit(Xtrain, ytrain) #fit model
probTrain = model.predict_proba( Xtrain )[:, 1]
probTest = model.predict_proba( Xtest )[:, 1]
params = {'model': model, 'probTrain': probTrain, 'probTest': probTest}
return params, probTest, probTrain
def gradboost(Xtrain = None, ytrain = None, Xtest = None,
ytest = None, cv = 2, scoring = 'f1'):
"RANDOM FOREST CLASSIFIER"
param_grid = {'n_estimators' : [10, 50, 100, 150, 200, 250, 300, 350,
400, 500, 700, 900]}
grad = GradientBoostingClassifier(subsample = 0.5, max_features = 'sqrt',
learning_rate = 0.01, max_depth = 5)
model = GridSearchCV( grad, param_grid = param_grid,
n_jobs = -1,
scoring = scoring, cv = cv).\
fit(Xtrain, ytrain) #fit model
probTrain = model.predict_proba( Xtrain )[:, 1]
probTest = model.predict_proba( Xtest )[:, 1]
params = {'model': model, 'probTrain': probTrain, 'probTest': probTest}
return params, probTest, probTrain
def kmeansBNB( Xtrain = None, ytrain = None, Xtest = None,
ytest = None, n_clusters = 2):
#CLUSTER WITH KMEANS
kmeans = KMeans(n_clusters = n_clusters, random_state = 0).\
fit( np.concatenate(( Xtrain, Xtest ), axis = 0) )
#TAKE THE LABELS
labelsTrain = kmeans.labels_[0: Xtrain.shape[0]]
labelsTest = kmeans.labels_[ Xtrain.shape[0]:]
#TRAIN NaiveBNB
models = []
probTrain = []
probTest = []
for i in np.arange( n_clusters ):
indxTr = np.where(labelsTrain == i)[0]
indxTest = np.where( labelsTest == i)[0]
bnb =BernoulliNB(alpha=1)
bnb.fit(Xtrain[indxTr], ytrain[indxTr])
probTrainNB,probTestNB = bnb.predict_proba(Xtrain[indxTr])[:,1], bnb.predict_proba(Xtest[indxTest])[:,1]
models.append( bnb )
probTrain.append( probTrainNB )
probTest.append( probTestNB )
params = {'models': models,'labelsTrain': labelsTrain,
'labelsTest': labelsTest, 'probTrain': probTrain,
'probTest': probTest}
return params |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.hstore
from django import VERSION as django_version
from django.contrib.postgres.operations import HStoreExtension
from django.db import migrations, models
try:
from django.contrib.gis.geos.libgeos import geos_version_info
HAS_GEOS = geos_version_info()["version"] >= "3.3.0"
except (ImportError, OSError):
HAS_GEOS = False
if HAS_GEOS:
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = []
operations = [HStoreExtension()]
operations += [
migrations.CreateModel(
name="Chef",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("slug", models.SlugField()),
("first_name", models.CharField(max_length=60)),
("last_name", models.CharField(max_length=60)),
("uuid_id", models.UUIDField()),
("email_address", models.EmailField(max_length=60)),
("twitter_profile", models.URLField(max_length=60)),
],
),
migrations.CreateModel(
name="Topping",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name="Pizza",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=50)),
(
"price",
models.DecimalField(null=True, max_digits=4, decimal_places=2),
),
("gluten_free", models.BooleanField(default=False)),
("vegan", models.BooleanField()),
("description", models.TextField(blank=True)),
(
"thickness",
models.CharField(
max_length=50,
choices=[(0, b"thin"), (1, b"thick"), (2, b"deep dish")],
),
),
("baked_on", models.DateTimeField()),
("expiration", models.DateField()),
(
"chef",
models.ForeignKey(
to="tests.Chef",
on_delete=models.CASCADE,
related_name="invented_pizzas",
),
),
(
"critic",
models.ForeignKey(
to="tests.Chef",
null=True,
on_delete=models.CASCADE,
related_name="reviewed_pizzas",
),
),
("toppings", models.ManyToManyField(to="tests.Topping")),
("rating", models.PositiveSmallIntegerField()),
("unique_comment", models.TextField(unique=True)),
],
),
]
if HAS_GEOS:
pizzeria_fields = [
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("hq", django.contrib.gis.db.models.fields.PointField(srid=4326)),
(
"directions",
django.contrib.gis.db.models.fields.LineStringField(srid=4326),
),
("floor_plan", django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
(
"locations",
django.contrib.gis.db.models.fields.MultiPointField(srid=4326),
),
(
"routes",
django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326),
),
(
"delivery_areas",
django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326),
),
(
"all_the_things",
django.contrib.gis.db.models.fields.GeometryCollectionField(srid=4326),
),
]
if django_version >= (1, 9, 0):
pizzeria_fields.append(
("rast", django.contrib.gis.db.models.fields.RasterField(srid=4326))
)
operations += [migrations.CreateModel(name="Pizzeria", fields=pizzeria_fields)]
specialtypizza_fields = [
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=50)),
(
"toppings",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=20), size=4
),
),
("metadata", django.contrib.postgres.fields.hstore.HStoreField()),
("price_range", django.contrib.postgres.fields.IntegerRangeField()),
("sales", django.contrib.postgres.fields.BigIntegerRangeField()),
("available_on", django.contrib.postgres.fields.DateTimeRangeField()),
("season", django.contrib.postgres.fields.DateRangeField()),
]
if django_version >= (1, 9, 0):
specialtypizza_fields.append(
("nutritional_values", django.contrib.postgres.fields.JSONField())
)
operations += [
migrations.CreateModel(name="SpecialtyPizza", fields=specialtypizza_fields)
]
|
"""Tests of session module"""
import pytest
try:
from unittest import mock
except ImportError:
import mock
from rasterio.session import DummySession, AWSSession, Session, OSSSession, GSSession, SwiftSession
def test_base_session_hascreds_notimpl():
"""Session.hascreds must be overridden"""
assert Session.hascreds({}) is NotImplemented
def test_base_session_get_credential_options_notimpl():
"""Session.get_credential_options must be overridden"""
assert Session().get_credential_options() is NotImplemented
def test_dummy_session():
"""DummySession works"""
sesh = DummySession()
assert sesh._session is None
assert sesh.get_credential_options() == {}
def test_aws_session_class():
"""AWSSession works"""
sesh = AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar')
assert sesh._session
assert sesh.get_credential_options()['AWS_ACCESS_KEY_ID'] == 'foo'
assert sesh.get_credential_options()['AWS_SECRET_ACCESS_KEY'] == 'bar'
def test_aws_session_class_session():
"""AWSSession works"""
boto3 = pytest.importorskip("boto3")
sesh = AWSSession(session=boto3.session.Session(aws_access_key_id='foo', aws_secret_access_key='bar'))
assert sesh._session
assert sesh.get_credential_options()['AWS_ACCESS_KEY_ID'] == 'foo'
assert sesh.get_credential_options()['AWS_SECRET_ACCESS_KEY'] == 'bar'
def test_aws_session_class_unsigned():
"""AWSSession works"""
pytest.importorskip("boto3")
sesh = AWSSession(aws_unsigned=True, region_name='us-mountain-1')
assert sesh.get_credential_options()['AWS_NO_SIGN_REQUEST'] == 'YES'
assert sesh.get_credential_options()['AWS_REGION'] == 'us-mountain-1'
def test_aws_session_class_profile(tmpdir, monkeypatch):
"""Confirm that profile_name kwarg works."""
pytest.importorskip("boto3")
credentials_file = tmpdir.join('credentials')
credentials_file.write("[testing]\n"
"aws_access_key_id = foo\n"
"aws_secret_access_key = bar\n"
"aws_session_token = baz")
monkeypatch.setenv('AWS_SHARED_CREDENTIALS_FILE', str(credentials_file))
monkeypatch.setenv('AWS_SESSION_TOKEN', 'ignore_me')
sesh = AWSSession(profile_name='testing')
assert sesh._session
assert sesh.get_credential_options()['AWS_ACCESS_KEY_ID'] == 'foo'
assert sesh.get_credential_options()['AWS_SECRET_ACCESS_KEY'] == 'bar'
assert sesh.get_credential_options()['AWS_SESSION_TOKEN'] == 'baz'
monkeypatch.undo()
def test_aws_session_class_endpoint():
"""Confirm that endpoint_url kwarg works."""
pytest.importorskip("boto3")
sesh = AWSSession(endpoint_url="example.com")
assert sesh.get_credential_options()['AWS_S3_ENDPOINT'] == 'example.com'
def test_session_factory_unparsed():
"""Get a DummySession for unparsed paths"""
sesh = Session.from_path("/vsicurl/lolwut")
assert isinstance(sesh, DummySession)
def test_session_factory_empty():
"""Get a DummySession for no path"""
sesh = Session.from_path("")
assert isinstance(sesh, DummySession)
def test_session_factory_local():
"""Get a DummySession for local paths"""
sesh = Session.from_path("file:///lolwut")
assert isinstance(sesh, DummySession)
def test_session_factory_unknown():
"""Get a DummySession for unknown paths"""
sesh = Session.from_path("https://fancy-cloud.com/lolwut")
assert isinstance(sesh, DummySession)
def test_session_factory_s3():
"""Get an AWSSession for s3:// paths"""
pytest.importorskip("boto3")
sesh = Session.from_path("s3://lol/wut")
assert isinstance(sesh, AWSSession)
def test_session_factory_s3_no_boto3(monkeypatch):
"""Get an AWSSession for s3:// paths"""
pytest.importorskip("boto3")
with monkeypatch.context() as mpctx:
mpctx.setattr("rasterio.session.boto3", None)
sesh = Session.from_path("s3://lol/wut")
assert isinstance(sesh, DummySession)
def test_session_factory_s3_kwargs():
"""Get an AWSSession for s3:// paths with keywords"""
pytest.importorskip("boto3")
sesh = Session.from_path("s3://lol/wut", aws_access_key_id='foo', aws_secret_access_key='bar')
assert isinstance(sesh, AWSSession)
assert sesh._session.get_credentials().access_key == 'foo'
assert sesh._session.get_credentials().secret_key == 'bar'
def test_foreign_session_factory_dummy():
sesh = Session.from_foreign_session(None)
assert isinstance(sesh, DummySession)
def test_foreign_session_factory_s3():
boto3 = pytest.importorskip("boto3")
aws_session = boto3.Session(aws_access_key_id='foo', aws_secret_access_key='bar')
sesh = Session.from_foreign_session(aws_session, cls=AWSSession)
assert isinstance(sesh, AWSSession)
assert sesh._session.get_credentials().access_key == 'foo'
assert sesh._session.get_credentials().secret_key == 'bar'
def test_requester_pays():
"""GDAL is configured with requester pays"""
sesh = AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar', requester_pays=True)
assert sesh._session
assert sesh.get_credential_options()['AWS_REQUEST_PAYER'] == 'requester'
def test_oss_session_class():
"""OSSSession works"""
oss_session = OSSSession(
oss_access_key_id='foo',
oss_secret_access_key='bar',
oss_endpoint='null-island-1')
assert oss_session._creds
assert oss_session.get_credential_options()['OSS_ACCESS_KEY_ID'] == 'foo'
assert oss_session.get_credential_options()['OSS_SECRET_ACCESS_KEY'] == 'bar'
def test_session_factory_oss_kwargs():
"""Get an OSSSession for oss:// paths with keywords"""
sesh = Session.from_path("oss://lol/wut", oss_access_key_id='foo', oss_secret_access_key='bar')
assert isinstance(sesh, OSSSession)
assert sesh.get_credential_options()['OSS_ACCESS_KEY_ID'] == 'foo'
assert sesh.get_credential_options()['OSS_SECRET_ACCESS_KEY'] == 'bar'
def test_google_session_ctor_no_arg():
session = GSSession()
assert not session._creds
def test_gs_session_class():
"""GSSession works"""
gs_session = GSSession(
google_application_credentials='foo')
assert gs_session._creds
assert gs_session.get_credential_options()['GOOGLE_APPLICATION_CREDENTIALS'] == 'foo'
assert gs_session.hascreds({'GOOGLE_APPLICATION_CREDENTIALS': 'foo'})
def test_swift_session_class():
"""SwiftSession works"""
swift_session = SwiftSession(
swift_storage_url='foo',
swift_auth_token='bar',)
assert swift_session._creds
assert swift_session.get_credential_options()['SWIFT_STORAGE_URL'] == 'foo'
assert swift_session.get_credential_options()['SWIFT_AUTH_TOKEN'] == 'bar'
def test_swift_session_by_user_key():
def mock_init(self, session=None,
swift_storage_url=None, swift_auth_token=None,
swift_auth_v1_url=None, swift_user=None, swift_key=None):
self._creds = {'SWIFT_STORAGE_URL':'foo',
'SWIFT_AUTH_TOKEN':'bar'}
with mock.patch('rasterio.session.SwiftSession.__init__', new=mock_init):
swift_session = SwiftSession(
swift_auth_v1_url='foo',
swift_user='bar',
swift_key='key')
assert swift_session._creds
assert swift_session.get_credential_options()['SWIFT_STORAGE_URL'] == 'foo'
assert swift_session.get_credential_options()['SWIFT_AUTH_TOKEN'] == 'bar'
def test_session_factory_swift_kwargs():
"""Get an SwiftSession for /vsiswift/bucket/key with keywords"""
sesh = Session.from_path("/vsiswift/lol/wut", swift_storage_url='foo', swift_auth_token='bar')
assert isinstance(sesh, DummySession)
def test_session_aws_or_dummy_aws():
"""Get an AWSSession when boto3 is available"""
boto3 = pytest.importorskip("boto3")
assert isinstance(Session.aws_or_dummy(), AWSSession)
def test_session_aws_or_dummy_dummy(monkeypatch):
"""Get a DummySession when boto3 is not available"""
boto3 = pytest.importorskip("boto3")
with monkeypatch.context() as mpctx:
mpctx.setattr("rasterio.session.boto3", None)
assert isinstance(Session.aws_or_dummy(), DummySession)
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.bandits.metrics.tf_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.metrics import tf_metrics
from tf_agents.bandits.policies import constraints
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
def compute_optimal_reward(unused_observation):
return tf.constant(10.0)
def compute_optimal_action(unused_observation):
return tf.constant(5, dtype=tf.int32)
class SimpleThresholdConstraint(constraints.BaseConstraint):
def __init__(self, time_step_spec, action_spec, batch_size, threshold,
name=None):
self.batch_size = batch_size
self.threshold = threshold
super(SimpleThresholdConstraint, self).__init__(
time_step_spec, action_spec, name='SimpleThresholdConstraint')
def __call__(self, observation, actions=None):
"""Returns the probability of input actions being feasible."""
if actions is None:
actions = tf.range(
self._action_spec.minimum, self._action_spec.maximum + 1)
actions = tf.reshape(actions, [1, -1])
actions = tf.tile(actions, [self.batch_size, 1])
feasibility_prob = tf.cast(tf.greater(actions, self.threshold), tf.float32)
return feasibility_prob
class TFMetricsTest(parameterized.TestCase, tf.test.TestCase):
def _create_trajectory(self):
return trajectory.Trajectory(observation=(),
action=(tf.constant(1)),
policy_info=(),
reward=tf.constant(1.0),
discount=tf.constant(1.0),
step_type=ts.StepType.FIRST,
next_step_type=ts.StepType.LAST)
def _create_batched_trajectory(self, batch_size):
return trajectory.Trajectory(observation=(),
action=tf.range(batch_size, dtype=tf.int32),
policy_info=(),
reward=tf.range(batch_size, dtype=tf.float32),
discount=tf.ones(batch_size),
step_type=ts.StepType.FIRST,
next_step_type=ts.StepType.LAST)
def _create_test_trajectory(self, batch_size):
num_actions = tf.cast(batch_size / 2, dtype=tf.int32)
action_tensor = tf.concat([
tf.range(num_actions, dtype=tf.int32),
tf.range(num_actions, dtype=tf.int32)], axis=-1)
return trajectory.Trajectory(observation=tf.ones(batch_size),
action=action_tensor,
policy_info=(),
reward=tf.range(batch_size, dtype=tf.float32),
discount=tf.ones(batch_size),
step_type=ts.StepType.FIRST,
next_step_type=ts.StepType.LAST)
def _create_batched_trajectory_with_reward_dict(self, batch_size):
reward_dict = {
'reward': tf.range(batch_size, dtype=tf.float32),
'constraint': tf.range(batch_size, dtype=tf.float32),
}
return trajectory.Trajectory(observation=(),
action=tf.range(batch_size, dtype=tf.int32),
policy_info=(),
reward=reward_dict,
discount=tf.ones(batch_size),
step_type=ts.StepType.FIRST,
next_step_type=ts.StepType.LAST)
@parameterized.named_parameters(
('RegretMetricName', tf_metrics.RegretMetric, compute_optimal_reward,
'RegretMetric'),
('SuboptimalArmsMetricName', tf_metrics.SuboptimalArmsMetric,
compute_optimal_action, 'SuboptimalArmsMetric')
)
def testName(self, metric_class, fn, expected_name):
metric = metric_class(fn)
self.assertEqual(expected_name, metric.name)
@parameterized.named_parameters([
('TestRegret', tf_metrics.RegretMetric,
compute_optimal_reward, 9),
('TestSuboptimalArms',
tf_metrics.SuboptimalArmsMetric, compute_optimal_action, 1),
])
def testRegretMetric(self, metric_class, fn, expected_result):
traj = self._create_trajectory()
metric = metric_class(fn)
self.evaluate(metric.init_variables())
traj_out = metric(traj)
deps = tf.nest.flatten(traj_out)
with tf.control_dependencies(deps):
result = metric.result()
result_ = self.evaluate(result)
self.assertEqual(result_, expected_result)
@parameterized.named_parameters([
('TestRegretBatched', tf_metrics.RegretMetric,
compute_optimal_reward, 8, 6.5),
('TestSuboptimalArmsBatched',
tf_metrics.SuboptimalArmsMetric, compute_optimal_action, 8, 7.0 / 8.0),
])
def testRegretMetricBatched(self, metric_class, fn, batch_size,
expected_result):
traj = self._create_batched_trajectory(batch_size)
metric = metric_class(fn)
self.evaluate(metric.init_variables())
traj_out = metric(traj)
deps = tf.nest.flatten(traj_out)
with tf.control_dependencies(deps):
result = metric.result()
result_ = self.evaluate(result)
self.assertEqual(result_, expected_result)
def testRegretMetricWithRewardDict(
self, metric_class=tf_metrics.RegretMetric, fn=compute_optimal_reward,
batch_size=8, expected_result=6.5):
traj = self._create_batched_trajectory_with_reward_dict(batch_size)
metric = metric_class(fn)
self.evaluate(metric.init_variables())
traj_out = metric(traj)
deps = tf.nest.flatten(traj_out)
with tf.control_dependencies(deps):
result = metric.result()
result_ = self.evaluate(result)
self.assertEqual(result_, expected_result)
@parameterized.named_parameters([
('TestConstraintViolationTh1', 8, 1, 0.5),
('TestConstraintViolationTh2', 8, 2, 0.75),
])
def testConstraintViolationMetric(
self, batch_size, threshold, expected_result):
traj = self._create_test_trajectory(batch_size)
num_actions = batch_size / 2
obs_spec = tensor_spec.TensorSpec([], tf.float32)
time_step_spec = ts.time_step_spec(obs_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions-1)
stc = SimpleThresholdConstraint(
time_step_spec, action_spec, batch_size=batch_size,
threshold=threshold)
metric = tf_metrics.ConstraintViolationsMetric(constraint=stc)
self.evaluate(metric.init_variables())
traj_out = metric(traj)
deps = tf.nest.flatten(traj_out)
with tf.control_dependencies(deps):
result = metric.result()
result_ = self.evaluate(result)
self.assertEqual(result_, expected_result)
def testDistanceFromGreedyMetric(self):
batch_size = 11
num_actions = 12
traj = self._create_batched_trajectory(batch_size)
def estimated_reward_fn(unused_observation):
return tf.stack([tf.range(num_actions, dtype=tf.float32)] * batch_size)
metric = tf_metrics.DistanceFromGreedyMetric(estimated_reward_fn)
self.evaluate(metric.init_variables())
traj_out = metric(traj)
deps = tf.nest.flatten(traj_out)
with tf.control_dependencies(deps):
result = metric.result()
result_ = self.evaluate(result)
self.assertEqual(result_, 6)
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
import uuid
import domain_model
from domain_model import DomainModel
from domain_model import DomainModelWithUuid
from misc_test_utils import copy_dict_with_key_removed
from misc_test_utils import domain_model_validate_internals_test
from misc_test_utils import domain_model_validation_test
from misc_test_utils import misc_test_utils
import pytest
def test_copy_dict_with_key_removed__creates_copy_of_dict():
test_dict = {"key1": 1, "key2": 2}
actual = copy_dict_with_key_removed(test_dict)
assert actual == test_dict
assert actual is not test_dict
def test_copy_dict_with_key_removed__removes_key_if_specified():
test_dict = {"key1": 1, "key2": 2}
actual = copy_dict_with_key_removed(test_dict, key_to_remove="key2")
assert actual == {"key1": 1}
def test_domain_model_validation_test__creates_DomainModel_object(mocker):
mocked_init = mocker.patch.object(
domain_model.DomainModel, "__init__", autospec=True, return_value=None
)
domain_model_validation_test(DomainModel)
mocked_init.assert_called_once()
def test_domain_model_validation_test__calls_validate_with_no_expected_error(mocker):
mocked_validate = mocker.patch.object(
domain_model.DomainModelWithUuid, "validate", autospec=True
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validation_test(DomainModelWithUuid, "uuid", test_uuid)
mocked_validate.assert_called_once()
def test_domain_model_validation_test__calls_validate_with_no_expected_error__and_uuid_passed_as_additional_kwarg(
mocker,
):
mocked_validate = mocker.patch.object(
domain_model.DomainModelWithUuid, "validate", autospec=True
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validation_test(
DomainModelWithUuid, additional_kwargs={"uuid": test_uuid}
)
mocked_validate.assert_called_once()
def test_domain_model_validation_test__catches_error(mocker):
spied_raises = mocker.spy(misc_test_utils.pytest, "raises")
expected_error = ValueError()
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validation_test(
DomainModelWithUuid, "uuid", test_uuid, expected_error=ValueError
)
spied_raises.assert_called_once()
def test_domain_model_validation_test__catches_error_with_text(mocker):
spied_raises = mocker.spy(misc_test_utils.pytest, "raises")
expected_text = "test"
expected_error = ValueError(expected_text)
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validation_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_text,
)
spied_raises.assert_called_once()
def test_domain_model_validation_test__raises_assertion_error_if_single_expected_text_is_not_in_expected_error(
mocker,
):
expected_text = "test"
expected_error = ValueError()
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
with pytest.raises(AssertionError):
domain_model_validation_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_text,
)
def test_domain_model_validation_test__raises_assertion_error_if_one_of_multiple_expected_texts_not_in_expected_error(
mocker,
):
expected_texts = ["test1", "test2"]
expected_error = ValueError("test1")
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
with pytest.raises(AssertionError):
domain_model_validation_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_texts,
)
def test_domain_model_validate_internals_test__creates_DomainModel_object(mocker):
mocked_init = mocker.patch.object(
domain_model.DomainModel, "__init__", autospec=True, return_value=None
)
domain_model_validate_internals_test(DomainModel)
mocked_init.assert_called_once()
def test_domain_model_validate_internals_test__calls_validate_with_no_expected_error(
mocker,
):
mocked_validate = mocker.patch.object(
domain_model.DomainModelWithUuid, "validate_internals", autospec=True
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validate_internals_test(DomainModelWithUuid, "uuid", test_uuid)
mocked_validate.assert_called_once()
def test_domain_model_validate_internals_test__calls_validate_with_no_expected_error__and_uuid_passed_as_additional_kwarg(
mocker,
):
mocked_validate = mocker.patch.object(
domain_model.DomainModelWithUuid, "validate_internals", autospec=True
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validate_internals_test(
DomainModelWithUuid, additional_kwargs={"uuid": test_uuid}
)
mocked_validate.assert_called_once()
def test_domain_model_validate_internals_test__catches_error(mocker):
spied_raises = mocker.spy(misc_test_utils.pytest, "raises")
expected_error = ValueError()
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate_internals",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validate_internals_test(
DomainModelWithUuid, "uuid", test_uuid, expected_error=ValueError
)
spied_raises.assert_called_once()
def test_domain_model_validate_internals_test__catches_error_with_text(mocker):
spied_raises = mocker.spy(misc_test_utils.pytest, "raises")
expected_text = "test"
expected_error = ValueError(expected_text)
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate_internals",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
domain_model_validate_internals_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_text,
)
spied_raises.assert_called_once()
def test_domain_model_validate_internals_test__raises_assertion_error_if_single_expected_text_is_not_in_expected_error(
mocker,
):
expected_text = "test"
expected_error = ValueError()
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate_internals",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
with pytest.raises(AssertionError):
domain_model_validate_internals_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_text,
)
def test_domain_model_validate_internals_test__raises_assertion_error_if_one_of_multiple_expected_texts_not_in_expected_error(
mocker,
):
expected_texts = ["test1", "test2"]
expected_error = ValueError("test1")
mocker.patch.object(
domain_model.DomainModelWithUuid,
"validate_internals",
autospec=True,
side_effect=expected_error,
)
test_uuid = uuid.UUID("abc8a386-b6e0-47ed-a752-f2721545f3c6")
with pytest.raises(AssertionError):
domain_model_validate_internals_test(
DomainModelWithUuid,
"uuid",
test_uuid,
expected_error=ValueError,
expected_texts_in_error=expected_texts,
)
|
frase = str(input('Digite uma frase: ')).strip().upper()
cont = frase.count('A')
pos1 = (frase.find('A')+1)
print('A letra A aparece {} vezes na frase.'.format(cont))
print('A primeira letra A apareceu na posição {}.'.format(pos1))
print('A última letra A apareceu na posição {}.'.format(frase.rfind('A')+1))
|
import torch
class OTR():
def __init__(self):
self.name = "name"
class NormalisedRatio(OTR):
def __init__(self):
super(NormalisedRatio, self).__init__()
def __call__(self, y_1, y_0):
ratio = (y_1 - y_0) / y_0
return torch.sigmoid(ratio, 2)
|
#!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
"""
Script that runs through all the setup and samples.
"""
__author__ = 'VMware, Inc.'
import pyVim.connect
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import sample_util
from samples.vsphere.vcenter.setup import testbed
from samples.vsphere.vcenter.setup import setup_cli
from samples.vsphere.vcenter.setup.testbed_setup import cleanup as testbed_cleanup
from samples.vsphere.vcenter.setup.testbed_setup import setup as testbed_setup
from samples.vsphere.vcenter.setup.testbed_setup import validate as testbed_validate
from samples.vsphere.vcenter.vm.main import VMSetup
from samples.vsphere.common.ssl_helper import get_unverified_context, \
get_unverified_session
# Parse command line params for setup script
args = setup_cli.build_arg_parser().parse_args()
_testbed = testbed.get()
# If VC/ESX/NFS Server IPs are passed as arguments,
# then override testbed.py values
if args.vcenterserver:
_testbed.config['SERVER'] = args.vcenterserver
if args.vcenterpassword:
_testbed.config['PASSWORD'] = args.vcenterpassword
if args.esxhost1:
_testbed.config['ESX_HOST1'] = args.esxhost1
if args.esxhost2:
_testbed.config['ESX_HOST2'] = args.esxhost2
if args.esxpassword:
_testbed.config['ESX_PASS'] = args.esxpassword
if args.nfsserver:
_testbed.config['NFS_HOST'] = args.nfsserver
print(_testbed.to_config_string())
# Connect to VIM API Endpoint on vCenter system
context = None
if args.skipverification:
context = get_unverified_context()
service_instance = pyVim.connect.SmartConnect(host=_testbed.config['SERVER'],
user=_testbed.config['USERNAME'],
pwd=_testbed.config['PASSWORD'],
sslContext=context)
# Connect to vAPI Endpoint on vCenter system
session = get_unverified_session() if args.skipverification else None
client = create_vsphere_client(server=_testbed.config['SERVER'],
username=_testbed.config['USERNAME'],
password=_testbed.config['PASSWORD'],
session=session)
context = sample_util.Context(_testbed, service_instance, client)
context.option['DO_TESTBED_SETUP'] = args.testbed_setup
context.option['DO_TESTBED_VALIDATE'] = args.testbed_validate
context.option['DO_TESTBED_CLEANUP'] = args.testbed_cleanup
context.option['DO_TESTBED_ISO_CLEANUP'] = args.iso_cleanup
context.option['DO_SAMPLES_SETUP'] = args.samples_setup
context.option['DO_SAMPLES'] = args.samples
context.option['DO_SAMPLES_INCREMENTAL'] = args.samples_incremental
context.option['DO_SAMPLES_CLEANUP'] = args.samples_cleanup
context.option['SKIP_VERIFICATION'] = args.skipverification
print(context.to_option_string())
###############################################################################
# Testbed Setup
###############################################################################
vm_setup = VMSetup(context)
# Setup testbed
if context.option['DO_TESTBED_SETUP']:
# Clean up in case of past failures
vm_setup.cleanup()
testbed_cleanup(context)
testbed_setup(context)
# Validate testbed
if (context.option['DO_TESTBED_SETUP'] or
context.option['DO_TESTBED_VALIDATE'] or
context.option['DO_SAMPLES_SETUP'] or
context.option['DO_SAMPLES']):
if not testbed_validate(context):
exit(0)
print(context.testbed.to_entities_string())
###############################################################################
# Sample Run and Cleanup
###############################################################################
# Run Sample
if context.option['DO_SAMPLES']:
vm_setup.setup(context)
vm_setup.run()
# Cleanup after sample run
if context.option['DO_SAMPLES_CLEANUP']:
vm_setup.cleanup()
###############################################################################
# Testbed Cleanup
###############################################################################
# Teardown testbed.
if context.option['DO_TESTBED_CLEANUP']:
vm_setup.cleanup()
testbed_cleanup(context)
|
# cluster from AttentionXML
import os
import tqdm
import joblib
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import MultiLabelBinarizer
def get_sparse_feature(feature_file, label_file):
sparse_x, _ = load_svmlight_file(feature_file, multilabel=True)
sparse_labels = [i.replace('\n', '').split() for i in open(label_file)]
return normalize(sparse_x), np.array(sparse_labels)
def build_tree_by_level(sparse_data_x, sparse_data_y, eps: float, max_leaf: int, levels: list, groups_path):
print('Clustering')
sparse_x, sparse_labels = get_sparse_feature(sparse_data_x, sparse_data_y)
mlb = MultiLabelBinarizer()
sparse_y = mlb.fit_transform(sparse_labels)
joblib.dump(mlb, groups_path+'mlb')
print('Getting Labels Feature')
labels_f = normalize(csr_matrix(sparse_y.T) @ csc_matrix(sparse_x))
print(F'Start Clustering {levels}')
levels, q = [2**x for x in levels], None
for i in range(len(levels)-1, -1, -1):
if os.path.exists(F'{groups_path}-Level-{i}.npy'):
print(F'{groups_path}-Level-{i}.npy')
labels_list = np.load(F'{groups_path}-Level-{i}.npy', allow_pickle=True)
q = [(labels_i, labels_f[labels_i]) for labels_i in labels_list]
break
if q is None:
q = [(np.arange(labels_f.shape[0]), labels_f)]
while q:
labels_list = np.asarray([x[0] for x in q])
assert sum(len(labels) for labels in labels_list) == labels_f.shape[0]
if len(labels_list) in levels:
level = levels.index(len(labels_list))
print(F'Finish Clustering Level-{level}')
np.save(F'{groups_path}-Level-{level}.npy', np.asarray(labels_list))
else:
print(F'Finish Clustering {len(labels_list)}')
next_q = []
for node_i, node_f in q:
if len(node_i) > max_leaf:
next_q += list(split_node(node_i, node_f, eps))
else:
np.save(F'{groups_path}-last.npy', np.asarray(labels_list))
q = next_q
print('Finish Clustering')
return mlb
def split_node(labels_i: np.ndarray, labels_f: csr_matrix, eps: float):
n = len(labels_i)
c1, c2 = np.random.choice(np.arange(n), 2, replace=False)
centers, old_dis, new_dis = labels_f[[c1, c2]].toarray(), -10000.0, -1.0
l_labels_i, r_labels_i = None, None
while new_dis - old_dis >= eps:
dis = labels_f @ centers.T # N, 2
partition = np.argsort(dis[:, 1] - dis[:, 0])
l_labels_i, r_labels_i = partition[:n//2], partition[n//2:]
old_dis, new_dis = new_dis, (dis[l_labels_i, 0].sum() + dis[r_labels_i, 1].sum()) / n
centers = normalize(np.asarray([np.squeeze(np.asarray(labels_f[l_labels_i].sum(axis=0))),
np.squeeze(np.asarray(labels_f[r_labels_i].sum(axis=0)))]))
return (labels_i[l_labels_i], labels_f[l_labels_i]), (labels_i[r_labels_i], labels_f[r_labels_i])
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=False, default='eurlex4k')
parser.add_argument('--tree', action='store_true')
parser.add_argument('--id', type=str, required=False, default='0')
args = parser.parse_args()
if __name__ == '__main__':
dataset = args.dataset
if dataset == '670k':
mlb = build_tree_by_level('./data/Amazon-670K/train_v1.txt',
'./data/Amazon-670K/train_labels.txt',
1e-4, 100, [], './data/Amazon-670K/label_group'+args.id)
groups = np.load(f'./data/Amazon-670K/label_group{args.id}-last.npy', allow_pickle=True)
new_group = []
for group in groups:
new_group.append([mlb.classes_[i] for i in group])
np.save(f'./data/Amazon-670K/label_group{args.id}.npy', np.array(new_group))
elif dataset == '500k':
mlb = build_tree_by_level('./data/Wiki-500K/train.txt',
'./data/Wiki-500K/train_labels.txt',
1e-4, 8, [11, 14, 17], './data/Wiki-500K/groups')
groups = np.load(f'./data/Wiki-500K/groups-last{args.id}.npy', allow_pickle=True)
new_group = []
for group in groups:
new_group.append([mlb.classes_[i] for i in group])
np.save(f'./data/Wiki-500K/label_group{args.id}.npy', np.array(new_group))
|
"""LogTools Log viewer application
By BigBird who like to Code
https://github.com/bigbirdcode/logtools
"""
import argparse
import io
import os
import pathlib
import sys
from textwrap import dedent
from typing import Any, NoReturn
import wx
# Python has 2 types of calls:
# - direct call, like: python main.py
# - package call, like: python -m cliptools
# Below quite ugly code will handle these
if __name__ == "__main__" and __package__ is None:
# This was a direct call
# package information is missing, and relative imports will fail
# this hack imitates the package behavior and add outer dir to the path
__package__ = "logtools" # pylint: disable=redefined-builtin
logtools_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if logtools_dir not in sys.path:
sys.path.insert(0, logtools_dir)
del logtools_dir # clean up global name space
# Now relative import is ok
# pylint: disable=wrong-import-position
from logtools.gui_main_frame import MainFrame
from logtools.log_data import LogData
from logtools.log_patterns import LogPatterns
DEFAULT_PATTERNS_YML = "logtools_default_patterns.yml"
FOLDER_HELP = """
If pattern_file is not provided then logtools will try to read the file
'logtools_default_patterns.yml' from the following possible locations:
1. actual folder 2. user's home folder 3. user's Documents folder
"""
def errormessage(msg: str) -> NoReturn:
"""
Print out error messages either to the console or to a dialog box then exit
When there is a problem in argparse parameters or provided files do not exist
then we can still show these to the user however the app was started.
"""
if sys.executable.endswith("pythonw.exe"):
app = wx.App()
dlg = wx.MessageDialog(None, msg, "LogTools Error", wx.OK | wx.ICON_ERROR)
dlg.Center()
dlg.ShowModal()
dlg.Destroy()
app.Destroy()
else:
print(msg)
sys.exit(1)
def parse_arguments() -> Any:
"""
Parse command line arguments and checks for errors
Notes:
argparse do not handle well non-CLI usage
the newly added exit_on_error=False parameter is buggy!
it can also throw various types of exceptions...
the only option is to redirect output and capture SystemExit
ugly as hell, sorry
"""
parser = argparse.ArgumentParser(
description="Log file viewer.",
epilog=FOLDER_HELP,
)
parser.add_argument("log_files", type=pathlib.Path, nargs="+", help="log files to display")
parser.add_argument(
"-p", "--pattern_file", type=pathlib.Path, help="pattern file in strict YAML format"
)
args = None
was_error = False
bkp_stdout, bkp_stderr = sys.stdout, sys.stderr
output = io.StringIO()
sys.stdout, sys.stderr = output, output
try:
args = parser.parse_args()
except (SystemExit, Exception): # pylint: disable=broad-except
was_error = True
finally:
sys.stdout, sys.stderr = bkp_stdout, bkp_stderr
if was_error:
errormessage(output.getvalue())
return args
def check_logfiles(log_files: list[pathlib.Path]) -> None:
"""
Check the log files
"""
for log_file in log_files:
if not log_file.is_file():
errormessage(f"{log_file} log file not found!")
def read_patterns(args: Any) -> LogPatterns:
"""
Locate and check the pattern file
"""
pattern_file = pathlib.Path(DEFAULT_PATTERNS_YML) # initial value, it will be overwritten
if args.pattern_file:
pattern_file = args.pattern_file
if not pattern_file.is_file():
errormessage(f"{pattern_file} pattern file not found!")
else:
default_pattern_file = pattern_file
if default_pattern_file.is_file():
pattern_file = default_pattern_file
elif (pathlib.Path.home() / default_pattern_file).is_file():
pattern_file = pathlib.Path.home() / default_pattern_file
elif (pathlib.Path.home() / "Documents" / default_pattern_file).is_file():
pattern_file = pathlib.Path.home() / "Documents" / default_pattern_file
else:
errormessage("Pattern file not found!\n" + dedent(FOLDER_HELP))
log_patterns = LogPatterns(pattern_file)
return log_patterns
def main() -> None:
"""
Main function, starting point as usual
"""
args = parse_arguments()
check_logfiles(args.log_files)
log_patterns = read_patterns(args)
log_data = LogData(log_patterns, args.log_files)
# Making GUI
app = wx.App(0)
frame = MainFrame(None, log_data)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
import os
import shutil
from pathlib import Path
from pprint import pprint
import csv
import time
from PIL import Image
from datetime import datetime
import re
import numpy as np
from typing import List
import cv2
import torch
from pytube import YouTube
from facenet_pytorch import MTCNN
from utils import (
variance_of_laplacian,
load_images2,
clean_string,
read_dict_from_csv,
save_dict_to_csv2,
)
# cut is per frame
def video_to_frames(
input_loc: str, output_loc: str, number_of_images_to_log=1000
) -> List[str]:
"""
https://stackoverflow.com/questions/33311153/python-extracting-and-saving-video-frames
Function to extract frames from input video file
and save them as separate frames in an output directory.
Args:
input_loc: Input video file.
output_loc: Output directory to save the frames.
Returns:
None
"""
try:
os.mkdir(output_loc)
except OSError:
pass
#
# Log the time
time_start = time.time()
# Start capturing the feed
cap = cv2.VideoCapture(input_loc)
# Find the number of frames
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
print("Number of frames: ", video_length)
count = 0
print("Converting video..\n")
# calculate per hom much frames we willo save
log_interval = video_length // number_of_images_to_log
if log_interval < 1:
log_interval = 1
print("log_interval ", log_interval)
# Start converting the video
images_paths = []
while cap.isOpened():
# Extract the frame
ret, frame = cap.read()
if not ret:
continue
if count % log_interval == 0:
# Write the results back to output location.
image_path = output_loc + "/%#05d.jpg" % (count + 1)
cv2.imwrite(image_path, frame)
images_paths.append(image_path)
count = count + 1
# If there are no more frames left
if count > (video_length - 1):
# Log the time again
time_end = time.time()
# Release the feed
cap.release()
# Print stats
print("Done extracting frames.\n%d frames extracted" % count)
print("It took %d seconds forconversion." % (time_end - time_start))
break
return images_paths
# remove blure images
def remove_blure_images(dir_path):
images_paths = load_images2(dir_path)
for image_path in images_paths:
image = cv2.imread(image_path)
if check_blure_image(image):
print(image_path)
def check_blure_image(image: np.ndarray) -> bool:
blure = False
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
if fm < 1000:
blure = True
return blure
# find all faces in images
def find_faces(
dir_path: str,
face_dir_path: str,
device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
) -> List[str]:
mtcnn = MTCNN(keep_all=True, device=device, thresholds=[0.8, 0.85, 0.85])
images_paths = load_images2(dir_path)
face_dir_path = Path(face_dir_path)
face_dir_path.mkdir(exist_ok=True)
faces_images_paths = []
for image_path in images_paths:
image_name = Path(image_path).stem
image = Image.open(image_path)
bboxes, _ = mtcnn.detect(image)
if isinstance(bboxes, np.ndarray):
for bbox_idx, bbox in enumerate(bboxes):
face_bbox = image.crop(bbox)
bbox_str = ",".join(["{:.2f}".format(x) for x in bbox])
face_bbox_path = face_dir_path.joinpath(
f"image_name_{image_name}_bbox_idx_{bbox_idx}_bboxcord_{bbox_str}.jpg"
)
face_bbox.save(face_bbox_path)
faces_images_paths.append(face_bbox_path)
# brak bd we want only bigest face on image
break
return faces_images_paths
def download_yt_video(yt_video_url: str, yt_video_save_dir: str) -> str:
yt_video = YouTube(yt_video_url)
max_resolution_tag = 0
max_resolution = 0
yt_video_filename = clean_and_define_video_name(
yt_video.streams[0].default_filename
)
for stream in yt_video.streams:
if stream.resolution:
resolution = int(stream.resolution[:-1])
tag = stream.itag
if max_resolution < resolution:
max_resolution = resolution
max_resolution_tag = tag
yt_video.streams.get_by_itag(max_resolution_tag).download(
yt_video_save_dir, yt_video_filename
)
return yt_video_filename
def clean_and_define_video_name(start_video_name: str) -> str:
start_video_name = clean_string(start_video_name)
start_video_name += ".mp4"
now = datetime.now()
date_time = now.strftime("%d,%m,%Y,,%H,%M,%S")
start_video_name = f"date_time_{date_time}_title_{start_video_name}"
return start_video_name
def full_pipe_line(data_dict: dict) -> None:
yt_video_url, class_name = data_dict["url"], data_dict["face"]
dataset_dir_path = "face_dataset"
frames_base_dir = "frames"
yt_video_save_dir = "."
# download video
yt_video_filename = download_yt_video(
yt_video_url=yt_video_url,
yt_video_save_dir=yt_video_save_dir,
)
# frames
frame_dir = Path(frames_base_dir).joinpath(Path(yt_video_filename).stem)
Path(frame_dir).mkdir(exist_ok=True, parents=True)
frames_paths = video_to_frames(yt_video_filename, str(frame_dir))
# rm video
try:
os.remove(yt_video_filename)
Path(yt_video_filename).unlink()
except FileNotFoundError:
pass
# faces
face_dir = Path(dataset_dir_path).joinpath(class_name)
face_dir.mkdir(exist_ok=True, parents=True)
faces_paths = find_faces(frame_dir, face_dir)
# rm frames
shutil.rmtree(frame_dir)
if __name__ == "__main__":
data_dicts = read_dict_from_csv("data.csv")
for data_dict in data_dicts:
full_pipe_line(data_dict)
|
from rest_framework import status
from lego.apps.notifications import constants
from lego.apps.notifications.models import Announcement
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseAPITestCase
class NotificationSettingsViewSetTestCase(BaseAPITestCase):
fixtures = [
"test_abakus_groups.yaml",
"test_users.yaml",
"test_notification_settings.yaml",
]
def setUp(self):
self.url = "/api/v1/notification-settings/"
self.user = User.objects.get(pk=2)
def test_no_auth(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(
self.url,
{"notificationType": "weekly_mail", "enabled": True, "channels": ["email"]},
)
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_list(self):
self.client.force_authenticate(self.user)
def test_alternatives(self):
self.client.force_authenticate(self.user)
response = self.client.get(f"{self.url}alternatives/")
self.assertEquals(
response.json(),
{
"notificationTypes": constants.NOTIFICATION_TYPES,
"channels": constants.CHANNELS,
},
)
def test_change_setting(self):
self.client.force_authenticate(self.user)
response = self.client.post(
self.url, {"notificationType": "weekly_mail", "enabled": True}
)
self.assertEquals(
response.json(),
{
"notificationType": "weekly_mail",
"enabled": True,
"channels": ["email", "push"],
},
)
def test_change_setting_defaults(self):
"""Make sure a new setting is created with correct defaults"""
self.client.force_authenticate(self.user)
response = self.client.post(
self.url, {"notificationType": constants.MEETING_INVITE}
)
self.assertEquals(
response.json(),
{
"notificationType": constants.MEETING_INVITE,
"enabled": True,
"channels": constants.CHANNELS,
},
)
class AnnouncementViewSetTestCase(BaseAPITestCase):
fixtures = [
"test_abakus_groups.yaml",
"test_users.yaml",
"test_events.yaml",
"test_companies.yaml",
"test_announcements.yaml",
]
def setUp(self):
self.url = "/api/v1/announcements/"
admin_group = AbakusGroup.objects.get(name="Webkom")
self.authorized_user = User.objects.get(pk=9)
self.authorized_user_2 = User.objects.get(pk=3)
admin_group.add_user(self.authorized_user)
admin_group.add_user(self.authorized_user_2)
self.unauthorized_user = User.objects.get(pk=1)
self.unsent_announcement = Announcement.objects.get(pk=5)
self.unsent_not_own_announcement = Announcement.objects.get(pk=4)
def test_unauthorized_create(self):
"""
An unauthorized user should not be able to create an Announcement
"""
self.client.force_authenticate(self.unauthorized_user)
response = self.client.post(
self.url, {"message": "test_message", "groups": [2], "events": [1]}
)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_authorized_create(self):
"""
An authorized user should be able to create an announcement
"""
self.client.force_authenticate(self.authorized_user)
message = "test message"
response = self.client.post(
self.url,
{"message": message, "groups": [2], "events": [1], "fromGroup": 11},
)
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(response.data["from_group"]["id"], 11)
self.assertEquals(response.data["message"], message)
self.assertEquals(len(response.data["groups"]), 1)
self.assertEquals(response.data["groups"][0]["id"], 2)
self.assertEquals(len(response.data["events"]), 1)
self.assertEquals(response.data["events"][0]["id"], 1)
def test_authorized_create_from_nonexistent_group(self):
"""
An authorized user should not be able to create an announcement with sender as
nonexisting group
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.post(
self.url,
{"message": "test_message", "groups": [2], "events": [1], "fromGroup": 29},
)
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_authorized_create_invalid_recipient_groups(self):
"""
An authorized user should not be able to create an announcement with recipient
as invalid group
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.post(
self.url,
{"message": "test_message", "groups": [28], "events": [3], "fromGroup": 11},
)
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_authorized_patch(self):
"""
It is not possible to patch an announcement
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.patch(
self.url,
{
"id": self.unsent_announcement.id,
"message": "test_message",
"groups": [3],
"events": [1],
},
)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_authorized_list_own(self):
"""
An authorized user should be able to list announcements created by self
"""
self.client.force_authenticate(self.authorized_user_2)
response = self.client.get(self.url)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data["results"]), 1)
self.assertEquals(response.data["results"][0]["id"], 6)
def test_authorized_detail_not_own(self):
"""
An authorized user should not be able to list details about an announcement
created by another user.
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.get(f"{self.url}1/")
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_authorized_detail_own(self):
"""
An authorized user should be able to list details about an announcement
created by self.
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.get(f"{self.url}5/")
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_unauthorized_list(self):
"""
An unauthorized user should not be able to list announcements
"""
self.client.force_authenticate(self.unauthorized_user)
response = self.client.get(self.url)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_send_own_announcement_authorized(self):
"""
An authorized user can send an Announcement created by self once
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.post(f"{self.url}{self.unsent_announcement.id}/send/")
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue(Announcement.objects.get(pk=self.unsent_announcement.id).sent)
def test_send_not_own_announcement_authorized(self):
"""
An authorized user can not send an Announcement created by another user
"""
self.client.force_authenticate(self.authorized_user)
response = self.client.post(
f"{self.url}{self.unsent_not_own_announcement.id}/send/"
)
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_send_announcement_unauthorized(self):
"""
An unauthorized user can not send an Announcement
"""
self.client.force_authenticate(self.unauthorized_user)
response = self.client.post(f"{self.url}{self.unsent_announcement.id}/send/")
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
|
# This source code is licensed under the license found in the
# LICENSE file in the {root}/privgem/tabular/ppgm directory of this source tree.
#
# This code has been modified from the version at
# https://github.com/BorealisAI/private-data-generation/tree/master/models/Private_PGM/mbi
# Modifications copyright (C) 2021-present, Kasra Hosseini.
#
# Copy of the header from https://github.com/BorealisAI/private-data-generation/tree/master/models/Private_PGM/mbi
#
# This source code is licensed under the license found in the
# LICENSE file in the {root}/models/Private_PGM/ directory of this source tree.
from .domain import Domain
from .dataset import Dataset
from .factor import Factor
from .graphical_model import GraphicalModel
from .inference import FactoredInference
|
# Demonstrate the use of acceleration test
import sys
import os
import numpy as np
from fastsim import simdrive, vehicle, cycle
def create_accel_cyc(length_in_seconds=300, spd_mph=89.48, grade=0.0, hz=10):
"""
Create a synthetic Drive Cycle for acceleration targeting.
Defaults to a 15 second acceleration cycle. Should be adjusted based on target acceleration time
and initial vehicle acceleration time, so that time isn't wasted on cycles that are needlessly long.
spd_mph @ 89.48 FASTSim XL version mph default speed for acceleration cycles
grade @ 0 and hz @ 10 also matches XL version settings
"""
mphPerMps = 2.23694
cycMps = [(1/mphPerMps)*float(spd_mph)]*(length_in_seconds*hz)
cycMps[0] = 0.
cycMps = np.asarray(cycMps)
cycSecs = np.arange(0, length_in_seconds, 1./hz)
cycGrade = np.asarray([float(grade)]*(length_in_seconds*hz))
cycRoadType = np.zeros(length_in_seconds*hz)
cyc = {'cycMps': cycMps, 'cycSecs': cycSecs, 'cycGrade': cycGrade, 'cycRoadType':cycRoadType}
return cyc
def main():
# just use first vehicle in default database
for i in range(1,27):
veh = vehicle.Vehicle(i)
accel_cyc = cycle.Cycle(std_cyc_name=None,
cyc_dict=create_accel_cyc())
accel_out = simdrive.SimAccelTest(cyc=accel_cyc, veh=veh)
accel_out.sim_drive()
acvhd_0_to_acc_speed_secs = simdrive.SimDrivePost(accel_out).get_output()['ZeroToSixtyTime_secs']
print('vehicle {}: acceleration [s] {:.3f}'.format(i, acvhd_0_to_acc_speed_secs))
if __name__=='__main__':
main() |
#!/usr/bin/env python
# Wenchang Yang ([email protected])
# Wed Aug 7 12:49:43 EDT 2019
from .accessor import LinearRegressAccessor
|
# -*- coding: utf-8 -*-
"""
Anki Add-on: HTML Cleaner
Entry point for the add-on into Anki
Please don't edit this if you don't know what you're doing.
Copyright: (c) Glutanimate 2017
License: GNU AGPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
"""
from .html_cleaner import editor, browser
|
import abc
import asyncio
import threading
__all__ = ()
class Wait(abc.ABC):
__slots__ = ()
@abc.abstractmethod
def _make_event(self):
raise NotImplementedError()
@abc.abstractmethod
def _make(self, event):
raise NotImplementedError()
def __call__(self, manage, event = None):
if not event:
event = self._make_event()
self._make(manage, event)
return event
class Asyncio(Wait):
__slots__ = ()
def _make_event(self):
return asyncio.Event()
def _make(self, manage, event):
coroutine = event.wait()
loop = asyncio.get_event_loop()
task = loop.create_task(coroutine)
callback = lambda task: manage()
task.add_done_callback(callback)
class Threading(Wait):
__slots__ = ()
def _make_event(self):
return threading.Event()
def _make(self, manage, event):
def callback():
event.wait()
manage()
thread = threading_.Thread(target = callback)
thread.start()
|
import pytest
from vidispine.errors import NotFound
def test_delete(vidispine, cassette, collection):
vidispine.collection.delete(collection)
assert cassette.all_played
def test_non_existent_collection(vidispine, cassette):
with pytest.raises(NotFound) as err:
vidispine.collection.delete('VX-1000000')
assert cassette.all_played
err.match(r'Not Found: DELETE')
|
import torch
from torch_geometric.utils import contains_isolated_nodes
def test_contains_isolated_nodes():
row = torch.tensor([0, 1, 0])
col = torch.tensor([1, 0, 0])
assert not contains_isolated_nodes(torch.stack([row, col], dim=0))
assert contains_isolated_nodes(torch.stack([row, col], dim=0), num_nodes=3)
row = torch.tensor([0, 1, 2, 0])
col = torch.tensor([1, 0, 2, 0])
assert contains_isolated_nodes(torch.stack([row, col], dim=0))
|
# -*- coding: utf-8 -*-
from twisted.internet import reactor, protocol, ssl
from twisted.mail import imap4
# from twisted.internet import defer
# from twisted.python import log
# log.startLogging(open("/tmp/twisted.log","w"))
# defer.setDebugging(True)
from . import debug #@UnresolvedImport # pylint: disable-msg=F0401
class SimpleIMAP4Client(imap4.IMAP4Client):
greetDeferred = None
def serverGreeting(self, caps):
debug("[SimpleIMAP4Client] serverGreeting: %s" %caps)
self.serverCapabilities = caps
if self.greetDeferred is not None:
self.greetDeferred(self)
class SimpleIMAP4ClientFactory(protocol.ReconnectingClientFactory):
protocol = SimpleIMAP4Client
def __init__(self, e2session, username, factory):
self.maxDelay = 30
self.noisy = True
self.ctx = factory
self.e2session = e2session
self.username = username
def buildProtocol(self, addr):
debug("[SimpleIMAP4ClientFactory] building protocol: %s" %addr)
pr = self.protocol(contextFactory = self.ctx)
pr.factory = self
pr.greetDeferred = self.e2session.onConnect
auth = imap4.CramMD5ClientAuthenticator(self.username)
pr.registerAuthenticator(auth)
return pr
def startedConnecting(self, connector):
debug("[SimpleIMAP4ClientFactory] startedConnecting")
def clientConnectionFailed(self, connector, reason):
# debug("[SimpleIMAP4ClientFactory] clientConnectionFailed: %s" %reason.getErrorMessage())
self.e2session.onConnectionFailed(reason)
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def clientConnectionLost(self, connector, reason):
# debug("[SimpleIMAP4ClientFactory] clientConnectionLost: %s" %reason.getErrorMessage())
self.e2session.onConnectionLost(reason)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def createFactory(e2session, username, hostname, port):
debug("createFactory: for %s@%s:%s" %(username, hostname, port))
f2 = ssl.ClientContextFactory()
factory = SimpleIMAP4ClientFactory(e2session, username, f2)
if port == 993:
reactor.connectSSL(hostname, port, factory, f2) #@UndefinedVariable # pylint: disable-msg=E1101
else:
reactor.connectTCP(hostname, port, factory) #@UndefinedVariable # pylint: disable-msg=E1101
debug("createFactory: factory started")
return factory |
# subsystem mem
# memory layout:
# 0, value0, 0, value1, ..., value(N-1), 0, 0, 0, 0
# init imm
# initialize imm bytes of random-access memory. imm <= 256.
# memory should be cleared before loading.
# clean
# works only after init. clean random-access memory area.
# clean fast
# doesnt clear values. works only when next object does not require clean memory area.
# @set imm_value ...addresses
# set constant or use command to random-access memory
# imm_value:
# any digits
# set constant
# input
# set input data.
# print
# print data.
# address:
# +address or -address
# add or sub. sign can exists only once and works only with digits value.
# digits
# address
# var_name
# uses pointer variable
# @w_move in_var ...addresses
# set value from variable to random-access memory. currently addresses accept only variable.
# @w_moveadd in_var ...addresses
# @w_movesub in_var ...addresses
# @w_copy in_var ...addresses
# @w_copyadd in_var ...addresses
# @w_copysub in_var ...addresses
# @r_move in_var ...addresses
# move value from random-access memory to variable. address is the same as @w_move.
# @r_moveadd address ...out_vars
# @r_movesub address ...out_vars
# @r_copy address ...out_vars
# @r_copyadd address ...out_vars
# @r_copysub address ...out_vars
from typing import cast, Union, List, Tuple, Dict, Callable
from tobf import Tobf
from base import separate_sign, SubsystemBase
class Subsystem_Memory(SubsystemBase):
"""random-access memory area"""
def mem2_clean(self, fast=False):
# + eos
size = self.mem_size_ + 2
if fast:
return
self._main.put(">" * self.offset())
for i in range(size):
self._main.put("[-]>" * 2)
self._main.put("<" * self.offset(size * 2))
def __init__(self, tobf: Tobf, _name: str, args: List[Union[str, int]], instantiate: Callable[[int, SubsystemBase], int]):
super().__init__(tobf, _name)
self._main = cast(Tobf, self._main)
if len(args) > 0:
_cells = self._main.valueof(args[0])
else:
_cells = 16
self.mem_size_ = _cells
_size = (_cells + 2) * 2
self.resize(_size)
instantiate(self.size(), self)
self.def_const("size", self.mem_size_)
def array_size(self) -> bool:
return self.mem_size_
def is_readable_array(self) -> bool:
return True
def is_writable_array(self) -> bool:
return True
def readable_vars(self) -> bool:
return ["first", "last"]
def writable_vars(self) -> bool:
return ["first", "last"]
def has_short_move(self, dsts: List[str]) -> bool:
if self.array_size() < 2:
return False
for dst in dsts:
sign, dst = separate_sign(dst)
if not self._main.is_sub(dst):
return False
target = cast(SubsystemBase, self._main.get_instance(dst))
if not target.is_writable_array():
return False
if self.array_size() < 2 or target.array_size() < 2:
return False
for i in range(1, min(self.array_size(), target.array_size())):
if (self.addressof(str(i)) - self.addressof(str(i - 1))
!= target.addressof(str(i)) - target.addressof(str(i - 1))):
return False
return True
def has_short_copy(self, dsts: List[str]) -> bool:
return self.has_short_move(dsts)
def put_short_array_move(self, dsts: List[str], copy=False):
if not self.has_short_move(dsts):
raise Exception(f"destination is not compatible")
base = self.addressof("0")
sizes = set([])
dsts2: Tuple[int, str, SubsystemBase, int] = []
for dst in dsts:
sign, dst = separate_sign(dst)
sub = cast(SubsystemBase, self._main.get_instance(dst))
size = min(sub.array_size(), self.array_size())
sizes |= set([size])
dsts2.append((
size,
sign, sub,
sub.addressof("0") - base))
sizes = sorted(list(sizes))
for i, size in enumerate(sizes):
base_idx = 0 if i == 0 else sizes[i - 1]
size2 = size - base_idx
# n = 2
# 0, v0, n -> 1, v1, n-1 -> 1, v2, n-2 -> 0
self._main.put(">" * self.offset(base_idx * 2 + 2))
self._main.put("+" * size2)
self._main.put("[-[>>+<<-]<")
for dst_size, sign, dst, dst_addr in dsts2:
if sign == "":
self._main.put_at(dst_addr, "[-]")
self._main.put("[")
if copy:
self._main.put(">+<")
for dst_size, sign, dst, dst_addr in dsts2:
o = "-" if sign == "-" else "+"
self._main.put_at(dst_addr, o)
self._main.put("-]")
if copy:
self._main.put(">[<+>-]<")
self._main.put(">+>>]")
self._main.put("<<[-<<]")
self._main.put("<" * self.offset(base_idx * 2))
def put_short_array_copy(self, dsts: List[str]):
self.put_short_array_move(dsts, copy=True)
def has_var(self, name: str) -> bool:
if self._main.is_val(name):
idx = self._main.valueof(name)
if idx in range(self.mem_size_):
return True
return name in ["first", "last"]
def addressof(self, value: str) -> int:
"""direct I/O interface. index as variable"""
if self.mem_size_ == 0:
raise Exception(f"empty array has no readable address")
if value == "first":
return self.offset(1)
if value == "last":
return self.offset(1 + (self.mem_size_ - 1) * 2)
if self._main.is_val(value):
idx = self._main.valueof(value)
if idx in range(self.mem_size_):
return self.offset(1 + idx * 2)
raise Exception(f"can not get address of {value}")
def has_ins(self, name: str, args: list) -> bool:
return (name in [
"clean",
"@clear"]
or len(args) == 1
and name == "init"
or len(args) > 1
and name in [
"@set",
"@w_copy", "@w_move",
"@r_copy", "@r_move",
"@w_copy", "@w_copyadd", "@w_copysub",
"@w_move", "@w_moveadd", "@w_movesub",
"@r_copy", "@r_copyadd", "@r_copysub",
"@r_move", "@r_moveadd", "@r_movesub"]
or super().has_ins(name, args))
def put_clean(self, args: list):
self.mem2_clean("fast" in args)
def put(self, ins_name: str, args: list, tmps: List[int]):
if ins_name == "init":
return
if ins_name == "clean":
self.put_clean(args)
return
if ins_name == "@clear":
self.mem2_clean(args)
return
if (len(args) < 2
and ins_name in [
"@set", "@w_copy", "@w_move",
"@w_copy", "@w_copyadd", "@w_copysub",
"@w_move", "@w_moveadd", "@w_movesub",
"@r_copy", "@r_copyadd", "@r_copysub",
"@r_move", "@r_moveadd", "@r_movesub"
]):
raise Exception(f"error: {ins_name} {args}")
# aliases
if ins_name in ["@w_copy", "@w_move"]:
if self._main.is_val(args[0]):
ins_name = "@set"
if ins_name in ["@w_copyadd", "@w_copysub"]:
sign = "+" if ins_name == "@w_copyadd" else "-"
ins_name = "@w_copy"
args = [args[0]] + [sign + x for x in args[1:]]
if ins_name in ["@w_moveadd", "@w_movesub"]:
sign = "+" if ins_name == "@w_moveadd" else "-"
ins_name = "@w_move"
args = [args[0]] + [sign + x for x in args[1:]]
if ins_name in ["@r_copyadd", "@r_copysub"]:
sign = "+" if ins_name == "@r_copyadd" else "-"
ins_name = "@r_copy"
args = [args[0]] + [sign + x for x in args[1:]]
if ins_name in ["@r_moveadd", "@r_movesub"]:
sign = "+" if ins_name == "@r_moveadd" else "-"
ins_name = "@r_move"
args = [args[0]] + [sign + x for x in args[1:]]
if ins_name == "@set":
if self._main.is_var(args[0]):
raise Exception("[mem:@set var ...out] is not implemented")
value = self._main.valueof(args[0])
for dst in args[1:]:
dst_sign, dst = separate_sign(dst)
if self._main.is_val(dst):
idx = self._main.valueof(dst)
dst_addr = self.offset(idx * 2 + 1)
self._main.put_set(value, [f"{dst_sign}#{dst_addr}"])
elif self._main.is_var(dst):
dst_addr = self._main.addressof(dst)
tmp = self._main.get_nearest_tmp(tmps, [self.offset()])
self._main.put_copy(dst_addr, [f"+#{self.offset(2)}"])
self._main.put(">" * self.offset())
self._main.put(""">>[[>>+<<-]+>>-]<""")
self._main.put_set(value, f"{dst_sign}#0")
self._main.put("""<[-<<]""")
self._main.put("<" * self.offset())
return
if ins_name in ["@w_move", "@w_copy"]:
if not self._main.is_var(args[0]):
raise Exception(f"[mem:{ins_name} val ...out] is not implemented")
addr = self._main.addressof(args[0])
if ins_name == "@w_copy":
self._main.put_copy(addr, [f"+#{self.offset(4)}"])
else:
self._main.put_move(addr, [f"+#{self.offset(4)}"])
out_vars = args[1:]
tmp = self._main.get_nearest_tmp(tmps, [addr])
for i in range(len(out_vars)):
name = out_vars[i]
sign, name = separate_sign(name)
if self._main.is_var(name):
dst_addr = self._main.addressof(name)
self._main.put_copy(dst_addr, [f"+#{self.offset(2)}"])
# for next destination
if i < len(out_vars) - 1:
self._main.put_copy(self.offset(4), [f"+#{self.offset()}"])
self._main.put(">" * self.offset())
self._main.put(""">>[->>[>>+<<-]<<[>>+<<-]+>>]""")
if sign == "":
self._main.put("""<[-]>""")
self._main.put(""">>[<<<""")
self._main.put("-" if sign == "-" else "+")
self._main.put(""">>>-]<<<<[-<<]""")
self._main.put("<" * self.offset())
if i < len(out_vars) - 1:
self._main.put_move(tmp, [f"+#{self.offset(4)}"])
continue
if self._main.is_val(name):
dst_idx = self._main.valueof(name)
self._main.put_at(self.offset(4), "[")
self._main.put_at(self.offset(1 + dst_idx * 2), "+")
if i + 1 < len(out_vars):
self._main.put_at(self.offset(2), "+")
self._main.put_at(self.offset(4), "-]")
if i + 1 < len(out_vars):
self._main.put_move(self.offset(2), [f"+#{self.offset(4)}"])
continue
return
if ins_name in ["@r_move", "@r_copy"]:
src = args[0]
if len(list(filter(self._main.is_var, args[1:]))) != len(args[1:]):
raise Exception(f"unknown instruction: [mem:{ins_name}] with non-variable destinations")
# static addressing
if self._main.is_val(src):
idx = self._main.valueof(src)
addr = self.offset(idx * 2 + 1)
if ins_name == "@r_copy":
self._main.put_copy(addr, args[1:])
else:
self._main.put_move(addr, args[1:])
elif self._main.is_var(src):
idx_addr = self._main.addressof(src)
self._main.put_copy(idx_addr, [f"+#{self.offset(2)}"])
self._main.put(">" * self.offset())
self._main.put(""">>[[>>+<<-]+>>-]""")
if ins_name == "@r_copy":
self._main.put("""<[>>>+<<<-]>>>[<<+<+>>>-]<<""")
else:
self._main.put("""<[>+<-]>""")
self._main.put("""<<[->>>>[<<+>>-]<<<<<<]>>>>[<<+>>-]<<<<""")
self._main.put("<" * self.offset())
self._main.put_move(self.offset(2), args[1:])
return
raise Exception(f"error unknown: {ins_name} {args}")
|
"""
Plotting Data Module
Contains the general class definition and the subclasses of the Clawpack
data objects specific to plotting.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import copy
import numpy as np
import re
import logging
import clawpack.clawutil.data as clawdata
import time
import clawpack.pyclaw.controller
# ============================================================================
# Subclass ClawPlotData containing data for plotting results
# ============================================================================
class ClawPlotData(clawdata.ClawData):
"""ClawPlotData class
Data subclass containing plot data.
"""
# ========== Initialization routine ======================================
def __init__(self, controller=None):
"""Initialize a PlotData object
"""
# Initialize the data object and read the data files
super(ClawPlotData,self).__init__()
# default values of attributes:
if controller:
controller.plotdata = self
# inherit some values from controller
self.add_attribute('rundir',copy.copy(controller.rundir))
self.add_attribute('outdir',copy.copy(controller.outdir))
if len(controller.frames)>0:
for i,frame in enumerate(controller.frames):
self.framesoln_dict[str(i)] = frame
self.add_attribute('format',copy.copy(controller.output_format))
else:
self.add_attribute('rundir',os.getcwd()) # uses *.data from rundir
self.add_attribute('outdir',os.getcwd()) # where to find fort.* files
self.add_attribute('format','ascii')
# This should eventually replace all need for recording the above
# information
self.add_attribute('output_controller', None)
self.output_controller = clawpack.pyclaw.controller.OutputController(
self.outdir, file_format=self.format)
self.add_attribute('plotdir',os.getcwd()) # directory for plots *.png, *.html
self.add_attribute('overwrite',True) # ok to overwrite old plotdir?
self.add_attribute('plotter','matplotlib') # backend for plots
self.add_attribute('msgfile','') # where to write error messages
self.add_attribute('verbose',True) # verbose output?
self.add_attribute('ion',False) # call ion() or ioff()?
self.add_attribute('printfigs',True)
self.add_attribute('print_format','png')
self.add_attribute('print_framenos','all') # which frames to plot
self.add_attribute('print_gaugenos','all') # which gauges to plot
self.add_attribute('print_fignos','all') # which figures to plot each frame
self.add_attribute('iplotclaw_fignos','all') # which figures to plot interactively
self.add_attribute('latex',True) # make latex files for figures
self.add_attribute('latex_fname','plots') # name of latex file
self.add_attribute('latex_title','Clawpack Results')
self.add_attribute('latex_framesperpage','all') # number of frames on each page
self.add_attribute('latex_framesperline',2) # number of frames on each line
self.add_attribute('latex_figsperline','all') # number of figures on each line
self.add_attribute('latex_makepdf',False) # run pdflatex on latex file
self.add_attribute('html',True) # make html files for figures
self.add_attribute('html_index_fname','_PlotIndex.html') # name of html index file
self.add_attribute('html_index_title','Plot Index') # title at top of index page
self.add_attribute('html_homelink',None) # link to here from top of _PlotIndex.html
self.add_attribute('html_movie','JSAnimation') # make html with java script for movie
self.add_attribute('html_movie_width', 500) # width of movie
self.add_attribute('html_eagle',False) # use EagleClaw titles on html pages?
self.add_attribute('kml',False) # make kml plots and a kml file for figures
self.add_attribute('kml_index_fname','_GoogleEarth') # name of html index file
self.add_attribute('kml_publish',None)
self.add_attribute('kml_name',"GeoClaw")
self.add_attribute('kml_starttime',None)
self.add_attribute('kml_tz_offset',None)
self.add_attribute('kml_time_scale',1.0) # Multiply by factor to get seconds
self.add_attribute('kml_map_topo_to_latlong',None)
self.add_attribute('kml_user_files',[])
self.add_attribute('gif_movie',False) # make animated gif movie of frames
self.add_attribute('setplot',False) # Execute setplot.py in plot routine
self.add_attribute('mapc2p',None) # function to map computational
# points to physical
self.add_attribute('beforeframe',None) # function called before all plots
# in each frame are done
self.add_attribute('afterframe',None) # function called after all plots
# in each frame are done
self.add_attribute('plotfigure_dict',{})
try:
from collections import OrderedDict # new in Python 2.7
d = OrderedDict()
except:
d = {}
self.add_attribute('otherfigure_dict',d)
self.add_attribute('framesoln_dict',{}) # dictionary for holding framesoln
# objects associated with plots
self.add_attribute('gaugesoln_dict',{}) # dictionary for holding gaugesoln
# objects associated with plots
self.add_attribute('save_frames',True) # True ==> Keep a copy of any frame
# read in. False ==> Clear the frame
# solution dictionary before adding
# another solution
self.add_attribute('save_figures',True) # True ==> Keep a copy of and figure
# created. False ==> Clear the
# figure dictionary before adding
# another solution
self.add_attribute('refresh_gauges',False) # False ==> don't re-read gaugesoln if
# already in gaugesoln_dict
self.add_attribute('timeframes_framenos',None)
self.add_attribute('timeframes_frametimes',None)
self.add_attribute('timeframes_fignos',None)
self.add_attribute('timeframes_fignames',None)
self.add_attribute('gauges_gaugenos',None)
self.add_attribute('gauges_fignos',None)
self.add_attribute('gauges_fignames',None)
# Parallel capabilities
# Run multiple processess dividing up the frames that need to be plotted
self.add_attribute('parallel', False)
# Default to OMP_NUM_THREADS available if defined
self.add_attribute('num_procs', None)
self.add_attribute('proc_frames', None)
self.add_attribute('_parallel_todo', None)
self._next_FIG = 1000
self._fignames = []
self._fignos = []
self._mode = 'unknown'
self._figname_from_num = {}
self._otherfignames = []
def new_plotfigure(self, name=None, figno=None, type='each_frame'):
"""
Create a new figure for Clawpack plots.
If type='each_frame' it is a figure that will be plotted
for each time frame.
If type='multi_frame' it is a figure that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print('*** Warning, figure named %s has already been created' % name)
if (self._mode != 'iplotclaw') and (figno in self._fignos):
print('*** Warning, figure number %s has already been created' % figno)
if figno is None:
self._next_FIG += 1
figno = self._next_FIG
if name is None:
name = "FIG%s" % figno
if name in self._fignames:
print("*** Error in new_plotfigure: Figure name already used... ",name)
raise Exception("Figure name already used")
elif figno in self._fignos:
print("*** Error in new_plotfigure: Figure number already used... ",figno)
raise Exception("Figure number already used")
self._fignames.append(name)
self._fignos.append(figno)
plotfigure = ClawPlotFigure(name, figno, type, self)
if not self.save_figures:
self.plotfigure_dict.clear()
self.plotfigure_dict[name] = plotfigure
self._figname_from_num[figno] = name
return plotfigure
def getframe(self,frameno,outdir=None,refresh=False):
"""
ClawPlotData.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If refresh == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has no key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
from clawpack.pyclaw import solution
framesoln_dict = self.framesoln_dict
if 0:
if outdir:
key = (frameno, outdir)
else:
key = frameno
outdir = self.outdir
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
key = (frameno, outdir)
if refresh or (key not in framesoln_dict):
framesoln = solution.Solution(frameno,path=outdir,file_format=self.format)
if not self.save_frames:
framesoln_dict.clear()
framesoln_dict[key] = framesoln
if key != frameno:
print(' Reading Frame %s at t = %g from outdir = %s' \
% (frameno,framesoln.t,outdir))
else:
print(' Reading Frame %s at t = %g ' \
% (frameno,framesoln.t))
else:
framesoln = self.framesoln_dict[key]
return framesoln
def clearfigures(self):
"""
Clear all plot parameters specifying figures, axes, items.
Does not clear the frames of solution data already read in.
For that use clearframes.
"""
self.plotfigure_dict.clear()
self._fignames = []
self._fignos = []
self._next_FIG = 1000
self._otherfignames = []
def clearframes(self, framenos='all'):
"""
Clear one or more frames from self.framesoln_dict.
Need to add outdir option!
"""
if isinstance(framenos, int):
framenos = [framenos] # turn into a list
if framenos=='all':
self.framesoln_dict.clear()
print('Cleared all frames')
else:
for frameno in framenos:
xxx = self.plotdata.framesoln_dict.pop(frameno,None)
if xxx is None:
print('No frame data to clear for frame ',frameno)
else:
print('Cleared data for frame ',frameno)
def getgauge(self, gauge_id, outdir=None, verbose=True):
r"""Read in the gauge labeled with `gaugeno` in path `outdir`
:Note:
The behavior of this function has changed to actually only read in the
requested gauge id rather than all of the gauges. The dictionary
`gaugesoln_dict` remains the same.
:Input:
- *gauge_id* - (int) The gauge id of the gauge to be read in.
- *outdir* - (path) Path to output directory containing gauge files.
Defaults to this data object's `self.outdir`.
- *verbose* - (bool) Verbose console output, default is `False`.
:Output:
- (clawpack.amrclaw.GaugeSolution) The read in gauge solution either
from the `gaugeson_dict` or from file. If something went wrong then
the routine prints a warning and returns `None`.
"""
# Construct path to file
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
# Reread gauge data file
key = (gauge_id, outdir)
if self.refresh_gauges or (key not in self.gaugesoln_dict):
try:
# Read gauge solution:
import clawpack.pyclaw.gauges as gauges
self.gaugesoln_dict[key] = gauges.GaugeSolution(
gauge_id=gauge_id, path=outdir)
if verbose:
print("Read in gauge %s." % gauge_id)
except Exception as e:
import warnings
warnings.warn(str(e))
return None
return self.gaugesoln_dict[key]
def plotframe(self, frameno):
from clawpack.visclaw import frametools
frametools.plotframe(frameno, self)
def printframes(self, verbose=True):
#from clawpack.visclaw import frametools
#frametools.printframes(self, verbose)
print("*** printframes is deprecated. Use plotpages.plotclaw_driver")
print("*** for added capabilities.")
raise DeprecationWarning("The method 'printframes' is deprecated.")
def fignos(self):
"""
Return a list of the figure numbers actually used.
Useful in afterframe function for example to loop over all
figures and do something.
"""
return self._fignos
def mode(self):
"""
Return self._mode, which is set internally to
'iplotclaw' if Iplotclaw is in use,
'printframes' if printframes is being used
Useful in afterframe function if you want to do different things
for interactive or print modes.
"""
return self._mode
def iplotclaw(self):
"""
Return True if interactive plotting with iplotclaw is being done.
"""
return (self._mode == 'iplotclaw')
def getfigure(self,figname):
try:
plotfigure = self.plotfigure_dict[figname]
except:
raise Exception('Error accessing plotfigure_dict[%s]' % figname)
return plotfigure
def getaxes(self,axesname,figname=None):
found = True
if not figname:
found = False
for fig in self._fignames:
plotfigure = self.getfigure(fig)
if axesname in plotfigure._axesnames:
if found == True: # already found!
print('*** Ambiguous... must specify figname')
print(' try getaxes(axesname, figname)')
return None
figname = fig
found = True
if not found:
print('*** No axes found with name = ',axesname)
return None
try:
plotfigure = self.getfigure(figname)
plotaxes = plotfigure.plotaxes_dict[axesname]
except:
print('*** Error accessing plotaxes[%s]' % axesname)
print('*** figname = %s' % figname)
return None
return plotaxes
def getitem(self,itemname,axesname=None,figname=None):
found = True
if not figname:
# search over all figures looking for the item
found = False
for fign in self._fignames:
plotfigure = self.getfigure(fign)
if not axesname:
# search over all axes looking for the item
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print('*** Ambiguous... must specify figname and/or axesname')
print(' try getitem(itemname, axesname, figname)')
return None
axesname = axesn
figname = fign
found = True
else:
# axesname was specified (but not figname)
plotaxes = self.getaxes(axesname,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print('*** Ambiguous... must specify figname and/or axesname')
print(' try getitem(itemname, axesname, figname)')
return None
figname = fign
found = True
elif not axesname:
# figname was specified but not axesname.
# search over all axes looking for the item
found = False
plotfigure = self.getfigure(figname)
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,figname)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print('*** Ambiguous... must specify axesname')
print(' try getitem(itemname, axesname, figname)')
return None
axesname = axesn
found = True
if not found:
print('*** No item found with name = ',itemname)
return None
try:
plotaxes = self.getaxes(axesname,figname)
plotitem = plotaxes.plotitem_dict[itemname]
except:
print('*** Error accessing plotitem[%s]' % itemname)
print('*** figname = ',figname)
print('*** axesname = ',axesname)
return None
return plotitem
def showitems(self):
fignames = self._fignames
print("\n\nCurrent plot figures, axes, and items:")
print("---------------------------------------")
for figname in fignames:
plotfigure = self.getfigure(figname)
s = " figname = %s, figno = %s" % (figname, plotfigure.figno)
if not plotfigure._show:
s = s + " [Not showing]"
print(s)
axesnames = plotfigure._axesnames
for axesname in axesnames:
plotaxes = self.getaxes(axesname,figname)
s = " axesname = %s, axescmd = %s" \
% (axesname, plotaxes.axescmd)
if not plotaxes._show:
s = s + " [Not showing]"
print(s)
for itemname in plotaxes._itemnames:
plotitem = self.getitem(itemname,axesname,figname)
plot_type = plotitem.plot_type
s = " itemname = %s, plot_type = %s" \
% (itemname,plot_type)
if not plotitem._show:
s = s + " [Not showing]"
print(s)
print(" ")
def getq(self,frameno):
solution = self.getframe(frameno)
patches = solution.patches
if len(patches) > 1:
print('*** Warning: more than 1 patch, q on patch[0] is returned')
q = patches[0].q
return q
def new_otherfigure(self, name=None, fname=None):
"""
Create a new figure for Clawpack plots.
For figures not repeated each frame.
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print('*** Warning, figure named %s has already been created' % name)
if name is None:
if fname is None:
raise Exception("Need to provide name in new_otherfigure")
else:
name = fname
if name in self._otherfignames:
print("*** Error in new_otherfigure: Figure name already used... ",name)
raise Exception("Figure name already used")
self._otherfignames.append(name)
otherfigure = ClawOtherFigure(name,self)
self.otherfigure_dict[name] = otherfigure
otherfigure.fname = fname
return otherfigure
def set_outdirs(self):
"""
Make a list of all outdir's for all plotitem's in the order they
are first used.
"""
outdir_list = []
for figname in self._fignames:
plotfigure = self.plotfigure_dict[figname]
if not plotfigure._show:
continue # skip to next figure
for axesname in plotfigure._axesnames:
plotaxes = plotfigure.plotaxes_dict[axesname]
if not plotaxes._show:
continue # skip to next axes
for itemname in plotaxes._itemnames:
plotitem = plotaxes.plotitem_dict[itemname]
if not plotitem._show:
continue # skip to next item
if plotitem.outdir is not None:
outdir = plotitem.outdir
else:
outdir = self.outdir
if outdir not in outdir_list:
outdir_list.append(outdir)
self._outdirs = outdir_list
return self
# ============================================================================
# Subclass ClawPlotFigure containing data for plotting a figure
# ============================================================================
class ClawPlotFigure(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single figure.
This may consist of several ClawPlotAxes objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, figno, fig_type, plotdata):
"""
Initialize a ClawPlotFigure object
"""
super(ClawPlotFigure, self).__init__()
self._plotdata = plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('figno',figno)
self.add_attribute('kwargs',{})
self.add_attribute('clf_each_frame',True)
self.add_attribute('clf_each_gauge',True)
self._axesnames = []
self.add_attribute('show',True)
self._show = True
self.add_attribute('plotaxes_dict', {})
self.add_attribute('type',fig_type) # = 'each_frame' or 'each_run' or 'each_gauge'
self.add_attribute('use_for_kml',False)
self.add_attribute('kml_gauge_name','Gauge')
self.add_attribute('kml_dpi',200)
self.add_attribute('kml_xlimits',None)
self.add_attribute('kml_ylimits',None)
self.add_attribute('kml_use_figure_limits',True)
self.add_attribute('kml_tile_images',False)
self.add_attribute('kml_colorbar',None)
self.add_attribute('kml_use_for_initial_view',False)
self.add_attribute('kml_show_figure',False)
self.add_attribute('kml_maxlevel',20)
self.add_attribute('kml_figsize',None) # Figure size; specify to get rid of aliasing
self._next_AXES = 0
def new_plotaxes(self, name=None, type='each_frame'):
"""
Create a new axes that will be plotted in this figure.
If type='each_frame' it is an axes that will be plotted
for each time frame.
If type='multi_frame' it is an axes that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
If type='empty' it is created without doing any plots using the
pyclaw tools. Presumably the user will create a plot within an
afteraxes command, for example.
"""
if name is None:
self._next_AXES += 1
name = "AXES%s" % self._next_AXES
if name in self._axesnames:
print('*** Warning, axes named %s has already been created' % name)
if name not in self._axesnames:
self._axesnames.append(name)
plotaxes = ClawPlotAxes(name, self)
self.plotaxes_dict[name] = plotaxes
plotaxes.type = type
return plotaxes
def gethandle(self):
_handle = getattr(self,'_handle',None)
return _handle
# ============================================================================
# Subclass ClawPlotAxes containing data for plotting axes within a figure
# ============================================================================
class ClawPlotAxes(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single axes.
This may consist of several ClawPlotItem objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotfigure):
"""
Initialize a ClawPlotAxes object
"""
super(ClawPlotAxes, self).__init__()
self._plotfigure = plotfigure # figure this item is on
self._plotdata = plotfigure._plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('title',name)
self.add_attribute('title_with_t',True) # creates title of form 'title at time t = ...'
self.add_attribute('axescmd','subplot(1,1,1)')
self.add_attribute('beforeaxes',None)
self.add_attribute('afteraxes',None)
self.add_attribute('xlimits',None)
self.add_attribute('ylimits',None)
self.add_attribute('skip_patches_outside_xylimits',None)
self.add_attribute('scaled',False) # true so x- and y-axis scaled same
self.add_attribute('image',False) # true so x- and y-axis scaled same
# and plot bounds tight
self.add_attribute('plotitem_dict', {})
self.add_attribute('type','each_frame')
self._itemnames = []
self.add_attribute('show',True)
self._show = True
self._handle = None
self._next_ITEM = 0
self.add_attribute('figno', self._plotfigure.figno)
# attributes for gauge plots
self.add_attribute('time_label', 'time') # for time axis in gauges
self.add_attribute('time_label_kwargs', {}) # kwargs for xlabel cmd
self.add_attribute('time_scale', 1) # multiplicative factor to rescale t
# e.g. 1/3600. from sec to hours
def new_plotitem(self, name=None, plot_type=None):
# Create a new entry in self.plotitem_dict
if name is None:
self._next_ITEM += 1
name = "ITEM%s" % self._next_ITEM
if name not in self._itemnames:
self._itemnames.append(name)
plotitem = ClawPlotItem(name, plot_type, plotaxes=self)
self.plotitem_dict[name] = plotitem
return plotitem
def get_plotdata(self):
plotdata = getattr(self,'_plotdata',None)
return self._plotdata
def get_plotfigure(self):
plotfigure = getattr(self,'_plotfigure',None)
return self._plotfigure
def gethandle(self):
_handle = getattr(self,'_handle',None)
return self._handle
# ============================================================================
# Subclass ClawPlotItem containing data for plotting a single object
# ============================================================================
class ClawPlotItem(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single object.
This may be a single curve, set of points, contour plot, etc.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plot_type, plotaxes):
"""
Initialize a ClawPlotItem object
"""
super(ClawPlotItem, self).__init__()
self._plotaxes = plotaxes # axes this item is on
self._plotfigure = plotaxes._plotfigure # figure this item is on
self._plotdata = plotaxes._plotfigure._plotdata # parent ClawPlotData object
try:
num_dim = int(plot_type[0]) # first character of plot_type should be num_dim
except:
print('*** Error: could not determine num_dim from plot_type = ',plot_type)
self.add_attribute('num_dim',num_dim)
self.add_attribute('name',name)
self.add_attribute('figno',plotaxes.figno)
self.add_attribute('outdir',None) # indicates data comes from
# self._plotdata.outdir
self.add_attribute('plot_type',plot_type)
self.add_attribute('plot_var',0)
self.add_attribute('data_show',True)
self.add_attribute('MappedGrid',None) # False to plot on comput. patch even
# if _plotdata.mapc2p is not None.
self.add_attribute('mapc2p',None) # function to map computational
# points to physical (over-rides
# plotdata.mapc2p if set for item
self.add_attribute('afterpatch',None) # function called after each patch is
# plotted within each single plotitem.
self.add_attribute('afteritem',None) # function called after the item is
# plotted for each frame
self.add_attribute("show",True) # False => suppress showing this item
self._show = True # Internal
self._current_pobj = None
self.add_attribute('params',{}) # dictionary to hold optional parameters
if num_dim == 1:
self.add_attribute('plotstyle','-')
self.add_attribute('color',None)
self.add_attribute('kwargs',{})
amr_attributes = """show color kwargs data_show plotstyle""".split()
for a in amr_attributes:
self.add_attribute('amr_%s' % a, [])
if plot_type == '1d_fill_between':
zero_function = lambda current_data: 0.
self.add_attribute('plot_var2',zero_function)
self.add_attribute('fill_where',None)
if plot_type == '1d_from_2d_data':
self.add_attribute('map_2d_to_1d',None)
self.add_attribute('amr_plotstyle',[])
elif num_dim == 2:
# default values specifying this single plot:
self.add_attribute('plot_type',plot_type)
self.add_attribute('celledges_show',0)
self.add_attribute('celledges_color','k')
self.add_attribute('patch_bgcolor','w')
self.add_attribute('patchedges_show',0)
self.add_attribute('patchedges_color','k')
self.add_attribute('add_colorbar',False)
self.add_attribute('colorbar_shrink',None)
self.add_attribute('colorbar_label',None)
self.add_attribute('colorbar_ticks', None)
self.add_attribute('colorbar_tick_labels',None)
self.add_attribute('colorbar_kwargs',{})
self.add_attribute('kwargs',{})
amr_attributes = """celledges_show celledges_color data_show
patch_bgcolor patchedges_show patchedges_color kwargs""".split()
for a in amr_attributes:
self.add_attribute('amr_%s' % a, [])
if plot_type == '2d_pcolor':
from clawpack.visclaw import colormaps
self.add_attribute('pcolor_cmap',colormaps.yellow_red_blue)
self.add_attribute('pcolor_cmin',None)
self.add_attribute('pcolor_cmax',None)
elif plot_type == '2d_imshow':
from clawpack.visclaw import colormaps
self.add_attribute('imshow_cmap',colormaps.yellow_red_blue)
self.add_attribute('imshow_cmin',None)
self.add_attribute('imshow_cmax',None)
elif plot_type in ['2d_contour', '2d_contourf']:
self.add_attribute('contour_nlevels',20)
self.add_attribute('contour_levels',None)
self.add_attribute('contour_min',None)
self.add_attribute('contour_max',None)
self.add_attribute('contour_show',1)
self.add_attribute('contour_colors','k')
self.add_attribute('contour_cmap',None)
amr_attributes = """show colors cmap data_show""".split()
for a in amr_attributes:
self.add_attribute('amr_contour_%s' % a, [])
if plot_type == '2d_contourf':
self.add_attribute('fill_cmap',None)
self.add_attribute('fill_cmin',None)
self.add_attribute('fill_cmax',None)
self.add_attribute('fill_colors',None)
# Note either fill_cmap or fill_colors must be None
elif plot_type == '2d_schlieren':
from clawpack.visclaw import colormaps
self.add_attribute('schlieren_cmap',colormaps.schlieren_grays)
self.add_attribute('schlieren_cmin',None)
self.add_attribute('schlieren_cmax',None)
elif plot_type == '2d_patch':
self.add_attribute('max_density',None)
self.celledges_show = True
self.patchedges_show = True
elif plot_type == '2d_quiver':
self.add_attribute('quiver_var_x',None)
self.add_attribute('quiver_var_y',None)
self.add_attribute('quiver_coarsening',1)
self.add_attribute('quiver_key_show',False)
self.add_attribute('quiver_key_label_x',0.15)
self.add_attribute('quiver_key_label_y',0.95)
self.add_attribute('quiver_key_units','')
self.add_attribute('quiver_key_scale',None)
self.add_attribute('quiver_key_kwargs',{})
amr_attributes = """coarsening key_show key_label_x key_label_y
key_scale key_kwargs data_show""".split()
for a in amr_attributes:
self.add_attribute('amr_quiver_%s' % a, [])
elif plot_type == '2d_gtiff':
pass
else:
print('*** Warning 2d plot type %s not recognized' % plot_type)
elif num_dim == 3:
raise NotImplementedError('ClawPlotItem not yet set up for num_dim = 3')
else:
raise Warning('Unrecognized plot_type in ClawPlotItem')
def getframe(self,frameno,refresh=False):
"""
ClawPlotItem.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If refresh == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
framesoln = plotdata.getframe(frameno, outdir,refresh=refresh)
return framesoln
def getgauge(self,gauge):
"""
ClawPlotItem.getgauge:
Return an object of class GaugeSolution containing the solution
for gauge number gaugeno.
If self.refresh_gauges == True then this gauge is read from the
fort.gauge file, otherwise it is read only if the
the dictionary self.gaugesoln_dict has no key gaugeno. If it does, the
gauge has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
gaugesoln = plotdata.getgauge(gauge, outdir)
return gaugesoln
# ============================================================================
# Subclass ClawOtherFigure containing data for plotting a figure
# ============================================================================
class ClawOtherFigure(clawdata.ClawData):
"""
Data subclass containing plot data needed to plot a single figure.
For figures that are not produced each frame.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotdata):
"""
Initialize a ClawOtherFigure object
"""
super(ClawOtherFigure, self).__init__()
self._plotdata = plotdata # parent ClawPlotData object
self.add_attribute('name',name)
self.add_attribute('fname',None) # name of png file
self.add_attribute('makefig',None) # function invoked to create figure
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import patterns, url
urlpatterns = patterns(
'pagseguro.views',
url(
r'^$', 'receive_notification', name='pagseguro_receive_notification'
),
)
|
from collections import abc
from pathlib import Path
import time
import re
from threading import Thread
from api import audio
from api import gui
from init_phase import init_phase
from viability_phase import viability_phase
from evaluation_phase import create_datetime_subdir, evaluation_phase
from evaluation_phase import load_all_learning_profiles
from phase_utils import retrieve_best_learning_profiles, EvaluationMode
from phase_utils import PresentationMode, get_specific_learning_profiles
from phase_utils import sort_by_score
from user_phase import user_phase
from presentation_phase import presentation_phase
def _phase_header(header):
n = len(header)
print()
print("#" * (n + 8))
print(f"# {'=' * (n + 4)} #")
print(f"# = {header} = #")
print(f"# {'=' * (n + 4)} #")
print("#" * (n + 8))
print()
def _pick_learning_profiles(learning_profiles: list):
# Show info
n = len(learning_profiles)
print(f"There are {n} Learning Profiles.")
# Prompt for starting index
start_index = -1
print(f"Pick a starting index between 0 and {n-1}:")
while True:
try:
start_index = int(input("> "))
if start_index >= 0 or start_index < n:
break
except ValueError:
continue
# Prompt for stopping index
stop_index = -1
print(f"Pick a stopping index between {start_index} and {n-1}:")
while True:
try:
stop_index = int(input("> "))
if stop_index >= start_index or stop_index < n:
break
except ValueError:
continue
return learning_profiles[start_index:(stop_index+1)]
def _pick_multiple_learning_profiles(learning_profiles: list):
# Initial prompt
print("Pick what Learning Profiles to evaluate.")
indexed_lps = {i: lp for i, lp in enumerate(learning_profiles)}
picked_inds = []
while True:
# Print unpicked LPs
print(
"Learning Profiles to pick from: (Train DS, Test DS, AL, ML, "
"hyperparameters)")
if len(picked_inds) == len(indexed_lps):
print("\t-")
else:
for i, lp in indexed_lps.items():
if i not in picked_inds:
print(f"\t{i}: {lp.get_name()}")
# Print picked LPs
print("Picked Learning Profiles:")
if not picked_inds:
print("\t-")
else:
for i in sorted(picked_inds):
print(f"\t{i}: {indexed_lps[i].get_id()}")
# Input prompt
print("Enter indices on format 'i' or 'i-j'.")
print("Drop staged Learning Profiles with 'drop i'.")
print("Write 'done' when you are done.")
# Handle input
try:
idx = input("> ")
if idx == "done": # Check if done
break
elif bool(re.match("^[0-9]+-[0-9]+$", idx)): # Check if range
span_str = idx.split("-")
picked_inds += [i for i in range(
int(span_str[0]), int(span_str[1]) + 1)
if i not in picked_inds]
elif bool(re.match("^drop [0-9]+$", idx)):
picked_inds.remove(int(idx.split()[1]))
elif int(idx) in indexed_lps.keys() \
and int(idx) not in picked_inds: # Check if singular
picked_inds.append(int(idx))
except ValueError:
continue
return [indexed_lps[i] for i in picked_inds]
def _nested_dict_ids(nested):
for _, value in nested.items():
if isinstance(value, abc.Mapping):
yield from _nested_dict_ids(value)
elif isinstance(value, abc.Iterable):
for lp in value:
yield lp.get_id()
else:
raise ValueError(f"Invalid structure (value was '{value}')")
def _best_learning_profiles(input_dir: Path, learning_profiles: list,
n_lps_per_cat: int):
# Load learning profile descriptions and choose best
lp_descs = load_all_learning_profiles(input_dir)
lp_descs_best = retrieve_best_learning_profiles(lp_descs, n_lps_per_cat)
# Use descriptions to retrieve actual learning profiles
return [lp for lp in learning_profiles
if lp.get_id() in _nested_dict_ids(lp_descs_best)]
def model_selection_process(data_dir: Path, output_dir: Path,
sliding_window_length: int,
batch_size: int, num_iterations: int,
seed_percent: float, n_threads: int):
"""
Runs the model selection process.
Args:
data_dir (Path): The directory where all `.csv` and
`.npy` files are located.
output_dir (Path): A directory where all Learning
Profile results will be stored.
sliding_window_length (int): The sliding window size to use.
batch_size (int): The batch size to use.
num_iterations (int): Number of batches to process.
seed_percent (float): Percent of initial seed data
to use before applying Active Learning.
n_threads (int): The number of threads to use.
"""
########################
# Initialization Phase #
########################
_phase_header("INIT PHASE")
learning_profiles = init_phase(
data_dir,
sliding_window_length=sliding_window_length,
batch_size=batch_size,
model_eval=False
)
##########
# Filter #
##########
_phase_header("LEARNING PROFILE FILTER")
filtered_learning_profiles = _pick_learning_profiles(learning_profiles)
###################
# Viability Phase #
###################
_phase_header("VIABILITY PHASE")
viability_phase(filtered_learning_profiles, num_iterations,
seed_percent, n_threads)
####################
# Evaluation Phase #
####################
_phase_header("EVALUATION PHASE")
stamped_output_dir = create_datetime_subdir(output_dir)
evaluation_phase(stamped_output_dir, filtered_learning_profiles)
print("Evaluated successfully!")
# Done
_phase_header("DONE")
def model_evaluation_process(data_dir: Path, input_dir: Path, output_dir: Path,
audio_dir: Path, sliding_window_length: int,
batch_size: int, num_iterations: int,
seed_percent: float, audio_file_ext: str,
n_lps_per_category_item: int):
"""
Runs the model evaluation process.
Args:
data_dir (Path): The directory where all `.csv` and
`.npy` files are located.
input_dir (Path): A directory with Learning Profile results from
the model_selection process.
output_dir (Path): A directory where all Learning
Profile results will be stored.
audio_dir (Path): A directory where all audio files are located.
sliding_window_length (int): The sliding window size to use.
batch_size (int): Number of batches to process.
num_iterations (int): Number of batches to process.
seed_percent (float): Percent of initial seed data
to use before applying Active Learning.
audio_file_ext (str): File extension of the audio files
in `data_dir`.
n_lps_per_category_item (int): The number of
best-performing-learning-profiles per
presentation-mode-category-item (a method/dataset from any of these
categories AL,ML,DS) to continue with from the
model selection phase.
Raises:
FileNotFoundError: If `input_dir` is not a valid directory.
"""
########################
# Initialization Phase #
########################
_phase_header("INIT PHASE")
learning_profiles = init_phase(
data_dir,
sliding_window_length=sliding_window_length,
batch_size=batch_size,
model_eval=True
)
##########
# Filter #
##########
_phase_header("LEARNING PROFILE FILTER")
# Validity check
if not input_dir.is_dir():
raise FileNotFoundError(f"Not a directory: '{input_dir}'")
# Get best learning profiles
filtered_learning_profiles = _best_learning_profiles(
input_dir, learning_profiles, n_lps_per_category_item)
# Pick what learning profiles to evaluate
picked_learning_profiles = _pick_multiple_learning_profiles(
filtered_learning_profiles)
##############
# User Phase #
##############
_phase_header("USER PHASE")
# Initialize audio
audio.init()
# User phase wrapper
def _user_phase_thread_func():
for _ in user_phase(
picked_learning_profiles, audio_dir,
num_iterations, seed_percent,
audio_file_ext):
pass
# Start application
_app = Thread(target=_user_phase_thread_func)
print("Starting User Phase thread...")
_app.start()
# Drive GUI
while _app.is_alive():
time.sleep(.01) # Allow other threads to breathe
gui.update_windows()
print("The GUI loop on main thread was exited " +
"since the User Phase thread was stopped!")
# Exit GUI
print("Destroying GUI...")
gui.destroy()
print("The GUI was successfully destroyed!")
# Deinitialize audio
audio.deinit()
####################
# Evaluation Phase #
####################
_phase_header("EVALUATION PHASE")
stamped_output_dir = create_datetime_subdir(output_dir)
evaluation_phase(stamped_output_dir, picked_learning_profiles)
print("Evaluated successfully!")
# Done
_phase_header("DONE")
def _get_sorted_specific_learning_profiles(lps, eval_mode, pres_mode,
n_lps_per_category_item):
sorted_lps = []
# Get all specific learning profiles
for spec_lps in get_specific_learning_profiles(
lps, pres_mode):
# For each attribute, sort learning profiles by score
# and choose n_lps_per_category_item nr of models per category item
for lp in sort_by_score(spec_lps, eval_mode,
n_lps_per_category_item):
sorted_lps.append(lp)
return sorted_lps
def presentation_process(learning_profile_dir: Path, n_lps: int):
"""
Runs the model evaluation process.
Args:
learning_profile_dir (Path): A directory with Learning Profile results
from either model_selection or model_evaluation.
n_lps (int): Max number of Learning Profiles to include in plot,
chooses the best performing ones.
(-1 all Learning Profiles included).
"""
# get profiles
lps_desc = load_all_learning_profiles(learning_profile_dir)
# Copy
lps_present = lps_desc
######################
# Presentation Phase #
######################
_phase_header("PRESENTATION PHASE")
print(f"In total there are {len(lps_desc)} Learning profiles.")
# Setup variables
quit = False
picked_eval = None
eval_modes = [eval_mode for eval_mode in EvaluationMode]
pres_modes = [pres_mode for pres_mode in PresentationMode]
# Input loop to gather plot settings
while True:
_phase_header("PLOT SETTINGS")
###################
# Evaluation mode #
###################
# Input prompt
print("Pick evaluation mode by writing the index of the desired"
" evaluation mode.")
for idx, eval_mode in enumerate(EvaluationMode):
print(f"{idx}:\t{eval_mode}")
print("Write 'exit' to quit.")
# Handle evaluation mode input
while True:
try:
idx = input("> ")
if "exit" == idx:
quit = True
break
elif int(idx) >= 0 and int(idx) < len(EvaluationMode):
picked_eval = eval_modes[int(idx)]
break
except ValueError:
continue
if quit:
break
#####################
# Presentation mode #
#####################
# Input prompt
print("Pick presentation mode by writing the index of the wanted"
" presentation mode.")
print("ML = Machine learning, AL = Active learning,"
" DS = Dataset.")
for idx, pres_mode in enumerate(PresentationMode):
print(f"{idx}:\t{pres_mode}")
print(
f"Write 'all' to present {n_lps if n_lps > -1 else len(lps_desc)}"
" learning profiles."
" (No presentation-mode-filtering)")
print("Write 'exit' to quit.")
# Handle presentation mode input
while True:
try:
idx = input("> ")
if "exit" == idx:
quit = True
break
elif "all" == idx:
lps_present = lps_desc
presentation_phase(learning_profiles=lps_present,
eval=picked_eval,
nr_models=n_lps)
break
elif int(idx) >= 0 and int(idx) < len(PresentationMode):
# Set nr of learning profiles per category item
n_lps_per_category_item = None
while True:
print("Write the number of "
"best-performing-learning-profiles"
" per presentation-mode-category-item "
"(a method/dataset from any of these "
"categories AL,ML,DS) "
"to apply presentation-mode-filtering"
"(-1 means all Learning Profiles included)")
n = input("> ")
if "exit" == n:
quit = True
break
elif int(n) == -1 or int(n) > 0:
n_lps_per_category_item = int(n)
break
if quit:
break
# Filter learning profiles given the arguments
lps_present = _get_sorted_specific_learning_profiles(
lps_desc, picked_eval, pres_modes[int(idx)],
n_lps_per_category_item)
# Run presentation phase to plot the results
presentation_phase(learning_profiles=lps_present,
eval=picked_eval,
nr_models=n_lps)
break
except ValueError:
continue
if quit:
break
pass
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
I = cv2.imread('masoleh.jpg')
# notice that OpenCV uses BGR instead of RGB!
B = I.copy()
B[:, :, 1:] = 0
G = I.copy()
G[:, :, ::2] = 0
R = I.copy()
R[:, :, :2] = 0
cv2.imshow('win1', I)
while 1:
k = cv2.waitKey()
if k == ord('o'):
cv2.imshow('win1', I)
elif k == ord('b'):
cv2.imshow('win1', B)
elif k == ord('g'):
cv2.imshow('win1', G)
elif k == ord('r'):
cv2.imshow('win1', R)
elif k == ord('q'):
cv2.destroyAllWindows()
break
|
# Standard libraries
import os
from datetime import datetime as dt
from datetime import timedelta
# External libraries
import pandas as pd
class LoadProfileData:
def __init__(self,settings, logger):
self.logger = logger
self.settings = settings
# Read all csv files
self.read_files()
self.group_kw = self.consumer_data.groupby('cust_type').sum()['kw'].to_dict()
self.timeseries, self.timeseriesdict = [],{}
for group, load in self.group_kw.items():
group_timeseries = [mult*load for mult in self.dataframe_dict[group]]
self.timeseriesdict[group] = group_timeseries
if self.timeseries == []:
self.timeseries = group_timeseries
else:
self.timeseries = [sum(x) for x in zip(self.timeseries,group_timeseries)]
self.logger.info('Profile object instantiated !!')
def get_data(self,pv_pen: float, date):
self.timeseries_pv, self.timeseriesdict_pv = [],{}
self.pvgeneration = []
# find out the peak and peak_index
self.peak = max(self.timeseries)
self.peak_index = self.timeseries.index(self.peak)
for group, load in self.group_kw.items():
load_at_peak = self.dataframe_dict[group][self.peak_index]*load
base_load = [el*load for el in self.dataframe_dict[group]]
solar_gen = [load_at_peak*pv_pen/100*el for el in self.solar_data]
net_load = [x[0] - x[1] for x in zip(base_load,solar_gen)]
self.timeseriesdict_pv[group] = net_load
if self.timeseries_pv == []:
self.timeseries_pv = net_load
self.pvgeneration = solar_gen
else:
self.timeseries_pv = [sum(x) for x in zip(self.timeseries_pv,net_load)]
self.pvgeneration = [sum(x) for x in zip(self.pvgeneration,solar_gen)]
# Let's arange in descending order to plot load duration curve
self.sorted_timeseries_pv, self.ids = zip(*sorted(zip(self.timeseries_pv, \
range(len(self.timeseries_pv))),reverse=True))
self.sorted_timeseriesdict_pv = {}
for group, array in self.timeseriesdict_pv.items():
temp_array = [array[index] for index in self.ids]
self.sorted_timeseriesdict_pv[group] = temp_array
data_len = len(self.sorted_timeseries_pv)
self.sorted_timeseriesdict_pv['TimeStamp'] = [index*100/data_len for index in range(data_len)]
# Separate load data for a day
self.datelist = [dt(self.settings['year'],1,1,0,0,0) \
+ timedelta(minutes=self.settings['time_step(min)'])*i \
for i in range(len(self.timeseries_pv))]
if ":" in date:
self.date = dt.strptime(date,'%Y-%m-%d %H:%M:%S')
else:
self.date = dt.strptime(date,'%Y-%m-%d')
self.daily_data = {'TimeStamp':[el for el in self.datelist
if el.day==self.date.day and el.month==self.date.month]}
for group, array in self.timeseriesdict_pv.items():
temp_array = []
for date, load in zip(self.datelist,array):
if date.day == self.date.day:
temp_array.append(load)
self.daily_data[group] = temp_array
# Sample load duration data
self.sorted_sample_dict = {}
for group, array in self.sorted_timeseriesdict_pv.items():
chunk_size = int(len(array)/1000) if len(array)>3000 else 1
self.sorted_sample_dict[group] = [array[index] for index in range(0,len(array),chunk_size)]
# Get statistics about load profile
absolute_load = [abs(value) for value in self.timeseries_pv]
max_net_gen = 'NA' if pv_pen==0 else -round(min(self.timeseries_pv)/1000,2)
load_factor = round(sum(self.timeseries_pv)/(len(self.timeseries_pv)*max(self.timeseries_pv)),2)
col_names = ['Peak load (MW)','Minimum load (MW)', 'Maximum solar generation (MW)', \
'Maximmum Net Generation (MW)', 'Load factor']
val_list = [round(max(self.timeseries_pv)/1000,2),
round(min(absolute_load)/1000,2),
round(max(self.pvgeneration)/1000,2),
max_net_gen,
load_factor]
time_list = [self.timeseries_pv.index(max(self.timeseries_pv)),
absolute_load.index(min(absolute_load)),
self.pvgeneration.index(max(self.pvgeneration)),
self.timeseries_pv.index(min(self.timeseries_pv)),
'NA']
if max_net_gen == 'NA': time_list[3] = 'NA'
for id, value in enumerate(time_list):
if value != 'NA':
time_list[id] = self.datelist[value]
self.df_stat = pd.DataFrame({'Parameters': col_names,'Value': val_list, 'Time': time_list})
return self.sorted_sample_dict, self.daily_data, self.df_stat
def read_files(self):
# Make sure all file exists
fileslist = ['residential.csv','commercial.csv','industrial.csv',
'agricultural.csv','consumer.csv','solarmult.csv']
file_path = os.path.join(self.settings['project_path'], \
self.settings['active_project'],'Profile')
if not set(fileslist).issubset(set(os.listdir(file_path))):
raise Exception(f"At least one of the file in list {','.join(fileslist)} \
is missing in folder path {file_path}")
file_dict = {'residential':'residential.csv',
'commercial':'commercial.csv',
'industrial':'industrial.csv',
'agricultural':'agricultural.csv'}
self.dataframe_dict = {}
for consumertype, filename in file_dict.items():
self.dataframe_dict[consumertype] = list(pd.read_csv(os.path.join(file_path, \
filename),header=None)[0])
self.consumer_data = pd.read_csv(os.path.join(file_path,'consumer.csv'))
self.solar_data = list(pd.read_csv(os.path.join(file_path,'solarmult.csv'),header=None)[0])
self.logger.info(f'Files read successfully from folder path {file_path}')
|
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='smartRM',
version='1.0',
author='Andrew Denisevich',
author_email='[email protected]',
packages=find_packages(),
long_description=open(join(dirname(__file__), 'README')).read()
}
) |
import os
import time
from dataclasses import dataclass
from datetime import datetime
from typing import Annotated
from apischema import deserialize, serialize
from apischema.metadata import conversion
# Set UTC timezone for example
os.environ["TZ"] = "UTC"
time.tzset()
def datetime_from_timestamp(timestamp: int) -> datetime:
return datetime.fromtimestamp(timestamp)
date = datetime(2017, 9, 2)
assert deserialize(datetime, 1504310400, conversion=datetime_from_timestamp) == date
@dataclass
class Foo:
bar: int
baz: int
def sum(self) -> int:
return self.bar + self.baz
@property
def diff(self) -> int:
return int(self.bar - self.baz)
assert serialize(Foo, Foo(0, 1)) == {"bar": 0, "baz": 1}
assert serialize(Foo, Foo(0, 1), conversion=Foo.sum) == 1
assert serialize(Foo, Foo(0, 1), conversion=Foo.diff) == -1
# conversions can be specified using Annotated
assert serialize(Annotated[Foo, conversion(serialization=Foo.sum)], Foo(0, 1)) == 1
|
'''
This file contains the important function that is imported within the module
'''
import numpy as np
import matplotlib.pyplot as plt
from time import time
import os
import glob
from astropy.io import fits
from functools import reduce
from scipy.interpolate import LSQUnivariateSpline as spline
from scipy.interpolate import UnivariateSpline
from scipy.signal import savgol_filter
from scipy.signal import gaussian
from scipy.stats import binned_statistic
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import filters
def fold_data(Time , Flux, Period):
'''
Function to fold the time, and flux for a given period
Parameters
----------
Time: numpy array
Time series as a numpy array
Flux: numpy array
Flux series as a numpy array
Period: float
The value of period to which data is folded
Returns
-------
array, array
Arranged index of
'''
FoldedTime = Time%Period
ArrangeIndex = np.array(FoldedTime).argsort()
ArrangedTime = FoldedTime[ArrangeIndex]
ArrangedFlux = Flux[ArrangeIndex]
return ArrangedTime, ArrangedFlux
def FindLocalMaxima(Data, NData=4):
'''
This function finds the value of the local maxima
Input Parameter:
----------------
Data: numpy array
Data where the local maxima is to be found.
NData: integer
Number of neighboring data points to be considered.
Returns
-------
array
an array of index for the night
'''
Index = np.zeros(len(Data)).astype(np.bool)
for counter in range(len(Data)):
Data[counter-NData:counter+NData]
StartIndex = counter-NData
if StartIndex<0:
StartIndex = 0
StopIndex = counter+NData+1
if StopIndex>len(Data):
StopIndex=len(Data)
Index[counter] = Data[counter]>0.999999*max(Data[StartIndex:StopIndex])
return Index
def RunningResidual(Time, Flux, NumBins):
'''
Function yields the moving average of the data
Parameters
----------
Time: array of float
array for which the moving average is to be determined
Residual: array of float
array for which the moving average is to be determined
NumBins: integer
Number of points for generating the gaussian function
Returns
-------------
arrays
Value of standard deviation
'''
NumPoints = int(len(Time)/NumBins)
CurrentSTD = []
for i in range(NumBins):
Start = i*NumPoints
Stop = (i+1)*NumPoints
CurrentSTD.append(np.std(Flux[Start:Stop])/(np.sqrt(NumPoints)))
CurrentSTD = np.array(CurrentSTD)
return CurrentSTD
def moving_average(series, sigma=5, NumPoint=75):
'''
Function yields the moving average of the data
Parameters
------------
series: array of float
array for which the moving average is to be determined
sigma: float
Standard deviation used to construct the normal function
NumPoint: integer
Number of points for generating the gaussian function
Returns
-------------
This function returns
'''
b = gaussian(NumPoint, sigma)
average = filters.convolve1d(series, b/b.sum())
var = filters.convolve1d(np.power(series-average,2), b/b.sum())
return average, var
def FindQuality(Time, Data, CutOff=6.0, NIter=2):
'''
Function to find quality based on all the data
Parameter
----------
Time: array
The time series of the data
Data: array
The data series for finding the outliers
CutOff: float
The cutoff value for finding the threshold for the cutoff
NIter: int
The number if interation for finding the outlier
Returns
------------
Array of boolean based on
'''
NanIndex = np.logical_or(np.isnan(Time),np.isnan(Data))
SelectIndex = ~NanIndex
for IterCount in range(NIter):
_ , var = moving_average(Data[SelectIndex], )
spl = UnivariateSpline(Time[SelectIndex], Data[SelectIndex], w=1.0/np.sqrt(var))
trend = spl(Time)
Residual = Data- trend
STD = np.std(Residual[SelectIndex])
Value = np.abs(Residual)/STD
SelectIndex = np.logical_and(SelectIndex, Value<CutOff)
return SelectIndex
def ParseFile(Location):
'''
This function parse Search Parameters initialization file
Input
#####################################
Location of the search initialization file
Output
#####################################
The parameters in dictionary format
'''
with open(Location,'r') as f:
Data = f.readlines()
ValueDict = {}
for Line in Data[1:]:
LineItem = Line.split("#")[0]
Key, Value = LineItem.split(":")
ValueDict[Key] = Value.replace(" ", "")
return ValueDict
def ReadTxtData(Location, TargetName):
'''
This function reads the input file
Input
#####################################
Location: Path to the folder containing the light curve.
TargetName: Name of the target for identifying the files.
Output
#####################################
Name of the parameters
Values of the parameters
'''
if len(Location) <1 or len(TargetName)<1:
raise NameError("No location or target available")
FileList = glob.glob(Location+"/*%s*.txt*" %TargetName)
NumFiles = len(FileList)
if NumFiles == 0:
raise NameError("No Files found")
AllData = []
for Counter,FileItem in enumerate(FileList):
#Headers
if Counter ==0 :
Header = open(FileItem,'r').readline().upper()
CSVFileFlag = "," in Header
if CSVFileFlag:
TempParameter = Header.split(",")
else:
TempParameter = Header.split("\t")
ParamName = []
for Param in TempParameter:
ParamName.append(Param.replace(" ", "").replace("#","").replace("\n",""))
try:
Data = np.loadtxt(FileItem,skiprows=1, delimiter=",")
except:
Data = np.loadtxt(FileItem, skiprows=0)
AllData.extend(Data)
AllData = np.array(AllData)
ParamName = np.array(ParamName)
return ParamName, AllData
def ReadFitsData(Location, TargetName, version=1):
'''
This function reads the input file from Cambridge Pipeline
Parameter
------------
Location: string
Path to the folder containing the light curve.
TargetName: string
Name of the target used to identifying the files.
Either SpeculoosID or GAIAID is expected
version: int
Version of the dataproduct being used. Version 1 is
different from version 2.
Yields
---------
Name of the parameters
Values of the parameters
'''
if len(Location) <1 or len(TargetName)<1:
raise NameError("No location or target available")
FileList = glob.glob(Location+"/*%s*.fits*" %TargetName)
NumFiles = len(FileList)
if NumFiles == 0:
raise NameError("No Files found")
AllData = []
if version==1:
ParamName = ["TIME", "FLUX", "AIRMASS", "FWHM", \
"DX", "DY", "FWHM_X", "FWHM_Y", "SKY"]
elif version==2:
ParamName = ["TIME", "FLUX", "COMP_LC1", \
"COMP_LC2", "COMP_LC3", "AIRMASS", "FWHM", \
"DX", "DY", "FWHM_X", "FWHM_Y"]
AllData = []
for Counter,FileItem in enumerate(FileList):
FitsFile = fits.open(FileItem, memmap='r')
Time = FitsFile[1].data["JD-OBS"]
#Generate the array to save the data
CurrentData = np.zeros((len(Time), len(ParamName)))
if version==1:
CurrentData[:,0] = Time
CurrentData[:,1] = FitsFile[20].data[0,:]
CurrentData[:,2] = FitsFile[1].data['RA_MOVE']
CurrentData[:,3] = FitsFile[1].data['DEC_MOVE']
CurrentData[:,4] = FitsFile[1].data['PSF_A_1']
CurrentData[:,5] = FitsFile[1].data['PSF_B_1']
CurrentData[:,6] = FitsFile[1].data["AIRMASS"]
input("Trying to figure out the content of the fits file...")
plt.figure()
plt.plot(CurrentData[:,0] , CurrentData[:,1], "ko")
plt.plot(CurrentData[:,0] , FitsFile[24].data[0,:], "rd")
#plt.plot(FitsFile[1].data['TMID'], CurrentData[:,1], "ro")
plt.show()
elif version ==2:
CurrentData[:,0] = Time
CurrentData[:,1] = FitsFile[3].data[:,0]
CurrentData[:,2] = FitsFile[3].data[:,1]
CurrentData[:,3] = FitsFile[3].data[:,2]
CurrentData[:,4] = FitsFile[3].data[:,3]
CurrentData[:,5] = FitsFile[1].data["AIRMASS"]
CurrentData[:,6] = FitsFile[1].data["FWHM"]
CurrentData[:,7] = FitsFile[1].data["RA_MOVE"]
CurrentData[:,8] = FitsFile[1].data["DEC_MOVE"]
CurrentData[:,9] = FitsFile[1].data["PSF_A_5"]
CurrentData[:,10] = FitsFile[1].data["PSF_B_5"]
AllData.extend(CurrentData)
AllData = np.array(AllData)
ParamName = np.array(ParamName)
return ParamName, AllData
def TransitBoxModel(Time, T0=None, TDur=None, Delta=1):
'''
This function creates a box shaped transit
Parameters:
============
Time: numpy array
Array of time vector for which the transit is to be evaluated
T0: float
The mid point of the transit in unit of Time
TDur: float
Transit Duration in days
Returns
==========
A vector of transit the same size as time
'''
TransitIndex = np.abs((Time-T0))<TDur/2
TransitModel = np.zeros(len(Time))
TransitModel[TransitIndex]-=Delta
return TransitModel
def SVDSolver(A, b, T0, TDur, Combination):
'''
Returns the least square coefficients based on basis matrix
using Singular Value Decomposition
Parameters
----------
A: (M,N) sized array which serves as the basis function
Flux: N size array
Flux series
T0: The mid transit time
TDur: The transit Duration
Combination: The columns used for the getting the combination vector
Returns
--------
array(M), array(M), float
returns T0, TDur, Combination
'''
b = b.T
N, M = np.shape(A)
U,S,V = np.linalg.svd(A, full_matrices=False)
S = np.diag(S)
S[S==0] = 1.0e10
W = 1./S
CalcCoef = reduce(np.matmul,[U.T, b, W, V])
Cov = reduce(np.matmul,[V.T,W*W,V])
Residual = np.sum((np.matmul(A,CalcCoef)-b)**2.0)
ChiSquaredReduced = Residual/(N-M)
Cov = ChiSquaredReduced*Cov
Uncertainty = np.sqrt(np.diag(Cov))
Model = np.dot(A,CalcCoef)
DetrendedCoef = np.copy(CalcCoef)
DetrendedCoef[-2] = 0.0
DetrendedModel = np.dot(A, DetrendedCoef)
return CalcCoef, Uncertainty, Residual, Model, \
DetrendedModel,T0, TDur, Combination
def SplineFlattening(Time, Flux, period, NIter = 4, StdCutOff=2.5, poly=3, knot=1):
'''
This fit a spline to the data
'''
TimeCopy = np.copy(Time)#[~OutliersIndex]
FluxCopy = np.copy(Flux)#[~OutliersIndex]
KnotSpacing = knot #increase ChunkSize or decrease ChunkSize
PolyDeg = int(poly)
for i in range(NIter):
NumOrbits = int((TimeCopy[-1]-TimeCopy[0])/period)
if NumOrbits<1:
NumOrbits=1
ChunkSize = KnotSpacing*len(TimeCopy)/NumOrbits
N = int(len(Time)/ChunkSize)
Location = [int((i+0.5)*ChunkSize) for i in range(0,N)]
knots = TimeCopy[Location]
spl = spline(TimeCopy, FluxCopy, knots, k=PolyDeg)
FluxPred = spl(TimeCopy)
Residual = FluxCopy-FluxPred
Std = np.std(Residual)
GoodIndex = np.abs(Residual)<StdCutOff*Std
TimeCopy = TimeCopy[GoodIndex]
FluxCopy = FluxCopy[GoodIndex]
FluxPred = spl(Time)
return FluxPred
def GetID(Name, IdType=None):
'''
Method to get Speculoos ID/GAIA ID from viceversa
'''
#Loading the database
Data = np.loadtxt("database/Targets.csv", delimiter=",", skiprows=1, dtype=np.str)
SpName = Data[:,0]
SpName = np.array([Item.upper() for Item in SpName])
GaiaID = Data[:,2].astype(np.int)
if "SPECULOOS" in IdType.upper():
return GaiaID[SpName == Name.upper()][0]
elif "GAIA" in IdType.upper():
return SpName[GaiaID==int(Name)][0]
else:
return "Not Found"
|
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v2
try:
from datadog_api_client.v2.model import permission_attributes
except ImportError:
permission_attributes = sys.modules[
'datadog_api_client.v2.model.permission_attributes']
try:
from datadog_api_client.v2.model import permissions_type
except ImportError:
permissions_type = sys.modules[
'datadog_api_client.v2.model.permissions_type']
from datadog_api_client.v2.model.permission import Permission
class TestPermission(unittest.TestCase):
"""Permission unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPermission(self):
"""Test Permission"""
# FIXME: construct object with mandatory attributes with example values
# model = Permission() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_ldap_multidomain_utilities.util.helper import LDAPUtilitiesHelper
from ast import literal_eval
from ldap3.extend.microsoft.addMembersToGroups import ad_add_members_to_groups as ad_add_members_to_groups
PACKAGE_NAME = "fn_ldap_multidomain_utilities"
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'ldap_md_utilities_add_to_groups''"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get(PACKAGE_NAME, {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get(PACKAGE_NAME, {})
@function("ldap_md_utilities_add_to_groups")
def _ldap_md_utilities_add_to_groups_function(self, event, *args, **kwargs):
"""Function: A function that allows adding multiple users to multiple groups"""
try:
# Get the wf_instance_id of the workflow this Function was called in
wf_instance_id = event.message["workflow_instance"]["workflow_instance_id"]
yield StatusMessage("Starting 'ldap_md_utilities_add_to_groups' running in workflow '{0}'".format(wf_instance_id))
# Get the function parameters:
ldap_md_domain_name = kwargs.get("ldap_md_domain_name") # text
ldap_md_multiple_user_dn = kwargs.get("ldap_md_multiple_user_dn") # text
ldap_md_multiple_group_dn = kwargs.get("ldap_md_multiple_group_dn") # text
log = logging.getLogger(__name__)
log.info("ldap_md_domain_name: %s", ldap_md_domain_name)
log.info("ldap_md_multiple_user_dn: %s", ldap_md_multiple_user_dn)
log.info("ldap_md_multiple_group_dn: %s", ldap_md_multiple_group_dn)
yield StatusMessage("Function Inputs OK")
# Instansiate helper (which gets appconfigs from file)
helper = LDAPUtilitiesHelper(self.options, ldap_md_domain_name)
log.info("[app.config] -ldap_server: %s", helper.LDAP_SERVER)
log.info("[app.config] -ldap_user_dn: %s", helper.LDAP_USER_DN)
yield StatusMessage("Appconfig Settings OK")
##############################################
if not helper.LDAP_IS_ACTIVE_DIRECTORY:
raise FunctionError("This function only supports an Active Directory connection. Make sure ldap_is_active_directory is set to True in the app.config file")
try:
# Try converting input to an array
ldap_md_multiple_user_dn = literal_eval(ldap_md_multiple_user_dn)
ldap_md_multiple_group_dn = literal_eval(ldap_md_multiple_group_dn)
except Exception:
raise ValueError("""ldap_md_multiple_user_dn and ldap_md_multiple_group_dn must be a string repersenation of an array e.g. "['dn=Accounts Group,dc=example,dc=com', 'dn=IT Group,dc=example,dc=com']" """)
# Instansiate LDAP Server and Connection
c = helper.get_ldap_connection()
try:
# Bind to the connection
c.bind()
except Exception as err:
raise ValueError("Cannot connect to LDAP Server. Ensure credentials are correct\n Error: {0}".format(err))
# Inform user
msg = "Connected to {0}".format("Active Directory")
yield StatusMessage(msg)
res = False
try:
yield StatusMessage("Attempting to add user(s) to group(s)")
# perform the removeMermbersFromGroups operation
res = ad_add_members_to_groups(c, ldap_md_multiple_user_dn, ldap_md_multiple_group_dn, True)
# Test: res = 'ad_add_members_to_groups(c, ' + str(ldap_md_multiple_user_dn) + ', ' + str(ldap_md_multiple_group_dn) + ', True)'
except Exception:
raise ValueError("Ensure all user and group DNs exist")
finally:
# Unbind connection
c.unbind()
##############################################
results = {
"success": res,
"domain_name": ldap_md_domain_name,
"users_dn": ldap_md_multiple_user_dn,
"groups_dn": ldap_md_multiple_group_dn
}
yield StatusMessage("Finished 'ldap_md_utilities_add_to_groups' that was running in workflow '{0}'".format(wf_instance_id))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
|
strIN = input("String to calculate the lenght: ")
print(f'Length: {len(strIN)}')
|
"""
<<<<<<<<<<Base Class Initializers>>>>>>>>>>>
-> Unlike C++ and Java, Python doesn't automatically call base class initializers.
-> __init__ is treated just like any other method.
-> If a subclass defines __init__, it must explicitly call the base class implementation for it to be run.
"""
"""
<<<<<<<<<Type Inspection>>>>>>>>>>
isinstance()
------------------
Determines if an object is an instance of type.
Takes an object as its first arguments and a type as its second.
Returns True of the first arguments is an instance of the second.
examples:
-> print(isinstance(5, int) -> True
-> print(isinstance('Or Hasson!', str) -> True
-> print(isinstance(4.999, bytes) -> False
<<<<<<<<Checking Multiple Types>>>>>>>>>>>>>
isinstance(obj, (type_a, type_b, type_c))
"""
"""
<<<<<<<<<<Type Checks in Python>>>>>>>>>>>>>>
isinstance() can be used for type checking in Python
Some people consider type checking a sign of poor design.
Sometimes they're the easiest way to solve a problem.
"""
"""
<<<<<<<< issubclass() >>>>>>>>>>
Operates on types to check for sub/superclass relationships.
Determines if one class is a subclass of another.
Takes two arguments, both of which must be types.
Returns True if the first argument is a subclass of the second.
"""
|
import re
import stanza
import deplacy
config = {
'processors': 'tokenize,pos,lemma,depparse',
'lang': 'en',
}
nlp = stanza.Pipeline(**config)
string = "The Old Kingdom is the period in the third millennium also known as the 'Age of the Pyramids' or 'Age of the Pyramid Builders' as it includes the great 4th Dynasty when King Sneferu perfected the art of pyramid building and the pyramids of Giza were constructed under the kings Khufu, Khafre, and Menkaure. "
string = "He doesn't like candy."
def preprocess_string(string):
try:
(start, stop) = (re.search("See also", string)).span()
string = string[:start]
except AttributeError:
pass
string = re.sub("\((.*?)\) ", "", string) # Remove Parenthesis Asides
return string
string = preprocess_string(string)
doc = nlp(string)
print(nlp(string))
deplacy.render(doc)
import spacy
nlp_spacy = spacy.load("en_core_web_lg")
str1 = nlp_spacy("implies")
str2 = nlp_spacy("also known")
str3 = nlp_spacy("as it includes")
print(str1.similarity(str2))
print(str1.similarity(str3))
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 18:05:15 2018
@author: LocalAdmin
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
def read_stereo_image(im="stereo_image_explorer.bmp"):
cv_im = cv2.imread(im);
imgL = cv_im[0:96, 126:252, :];
imgR = cv_im[0:96, 0:126, :];
return [imgL, imgR]
def simple_stereo(imgL, imgR, max_disparity=30):
W = imgL.shape[1];
H = imgL.shape[0];
# create the disparities image:
Disparities = np.zeros([H, W]);
# loop over the image
for x in range(W):
# in the left border of the left image, not all disparities can be investigated:
max_disp = np.min([x, max_disparity]);
disps = np.arange(0, max_disp+1);
for y in range(H):
# we can determine the differences in one go:
differences = np.abs(imgL[y,x,0] - imgR[y, x-max_disp:x+1,0]);
# the minimal difference determines the disparity
disp_ind = np.argmin(differences);
disparity = disps[disp_ind];
Disparities[y, x] = disparity;
return Disparities;
def get_differences_curve(imgL, imgR, x, y, max_disparity=30):
# determine the disparities that will be investigated:
max_disp = np.min([x, max_disparity]);
disps = np.arange(0, max_disp+1);
# we can determine the differences in one go:
differences = np.abs(imgL[y,x,0] - imgR[y, x-max_disp:x+1,0]);
# the minimal difference determines the disparity
disp_ind = np.argmin(differences);
disparity = disps[disp_ind];
return [differences, disps, disp_ind];
def windowed_stereo(imgL, imgR, max_disparity=30, window_half_size=3):
W = imgL.shape[1];
H = imgL.shape[0];
# create the disparities image:
Disparities = np.zeros([H, W]);
# loop over the image
for x in np.arange(window_half_size, W-window_half_size):
# in the left border of the left image, not all disparities can be investigated:
max_disp = np.min([x-window_half_size, max_disparity]);
if(max_disp >= 0):
disps = np.arange(0, max_disp+1);
differences = np.zeros([len(disps), 1]);
for y in np.arange(window_half_size, H-window_half_size):
window_left = imgL[y-window_half_size:y+window_half_size, x-window_half_size:x+window_half_size, 0];
for d in disps:
window_right = imgR[y-window_half_size:y+window_half_size, x-d-window_half_size:x-d+window_half_size, 0];
differences[d] = np.sum(np.abs(window_left.astype(float) - window_right.astype(float)));
# the minimal difference determines the disparity
disp_ind = np.argmin(differences);
disparity = disps[disp_ind];
Disparities[y, x] = disparity;
return Disparities;
def calculate_disparities(imgL, imgR, window_size=7, min_disp=0, num_disp=16):
# semi-global matching:
stereo = cv2.StereoSGBM_create(numDisparities = num_disp, blockSize = window_size);
disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0;
return disp;
def plot_relation_disparity_depth(f = 300, T_X = 0.10, max_disp = 64):
""" Focal length f is in pixels, T_X is in meters.
"""
disparities = np.arange(1, max_disp+1, 1);
depths = np.zeros(max_disp);
for disp in disparities:
depths[disp-1] = f * (T_X / disp);
plt.figure();
plt.plot(disparities, depths, 'o');
plt.xlabel('Disparity [px]')
plt.ylabel('Depth Z [m]')
plot_relation_disparity_depth(f = 140, T_X = 0.06, max_disp = 32)
#[imgL, imgR] = read_stereo_image();
#
#plt.figure();
#plt.subplot(121)
#plt.imshow(imgL);
#plt.title('Left');
#
#plt.subplot(122)
#plt.imshow(imgR);
#plt.title('Right');
#D = simple_stereo(imgL, imgR);
#plt.figure();
#plt.imshow(D, cmap='hot');
#plt.colorbar();
#plt.draw()
#
#print('Image size, width = {}, height = {}'.format(imgL.shape[1], imgL.shape[0]))
#[differences, disps, disp_ind] = get_differences_curve(imgL, imgR, 48, 64);
#plt.figure();
#plt.plot(disps, differences);
#plt.plot(disps[disp_ind], differences[disp_ind], 'x', markersize=10);
#plt.draw();
#
#D = windowed_stereo(imgL, imgR, max_disparity=30, window_half_size=3);
#plt.figure();
#plt.imshow(D, cmap='hot');
#plt.colorbar();
#plt.draw()
#
#D = calculate_disparities(imgL, imgR, window_size=7, min_disp=0, num_disp=16)
#plt.figure();
#plt.imshow(D, cmap='hot');
#plt.colorbar();
#plt.draw()
|
"""
Networks used in the main paper
"""
from mixmo.utils.logger import get_logger
from mixmo.networks import resnet, wrn
LOGGER = get_logger(__name__, level="DEBUG")
def get_network(config_network, config_args):
"""
Return a new instance of network
"""
# Available networks for tiny
if config_args["data"]["dataset"].startswith('tinyimagenet'):
network_factory = resnet.resnet_network_factory
elif config_args["data"]["dataset"].startswith('cifar'):
network_factory = wrn.wrn_network_factory
else:
raise NotImplementedError
LOGGER.warning(f"Loading network: {config_network['name']}")
return network_factory[config_network["name"]](
config_network=config_network,
config_args=config_args)
|
"""
Application module does all REST API operations on application endpoint
"""
import random
import urllib
from common import assert_status_code, assert_content_type_json, \
load_json_schema, assert_valid_schema, assert_valid_schema_error
from httpstatus import HTTP_OK, HTTP_NOT_FOUND, HTTP_CREATED, HTTP_BAD_REQUEST, HTTP_NO_CONTENT
class Application:
"""
Application does all REST API functions on application endpoint
"""
def __init__(self, config, session, developer_email):
self.config = config
self.session = session
self.global_application_url = self.config['api_url'] + '/apps'
if developer_email is not None:
self.application_url = config['api_url'] + '/developers/' + urllib.parse.quote(developer_email) + '/apps'
self.schemas = {
'application': load_json_schema('application.json'),
'applications': load_json_schema('applications.json'),
'applications-uuid': load_json_schema('applications-uuids.json'),
'error': load_json_schema('error.json'),
}
def generate_app_name(self, number):
"""
Returns generated test application name
"""
return f"testsuite-app{number}"
def _create(self, success_expected, new_application=None):
"""
Create new application to be used as test subject.
If no app data provided generate application with random data
"""
if new_application is None:
random_int = random.randint(0,99999)
new_application = {
"name" : self.generate_app_name(random_int),
# "displayName": "Testsuite app",
"callbackUrl": "1",
"attributes" : [
{
"name" : f"name{random_int}",
"value" : f"value{random_int}",
}
],
}
response = self.session.post(self.application_url, json=new_application)
if success_expected:
assert_status_code(response, HTTP_CREATED)
assert_content_type_json(response)
# Check if just created application matches with what we requested
assert_valid_schema(response.json(), self.schemas['application'])
self.assert_compare(response.json(), new_application)
else:
assert_status_code(response, HTTP_BAD_REQUEST)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['error'])
return response.json()
def create_new(self, new_application=None):
"""
Create new application, if no app data provided generate application with random data
"""
return self._create(True, new_application)
def create_negative(self, application):
"""
Attempt to create new application which already exists, should fail
"""
return self._create(False, application)
def create_key_positive(self, app_name, api_product_name):
"""
Create new key with assigned api product
"""
new_key = {
"apiProducts": [ api_product_name ],
"status": "approved"
}
headers = self.session.headers
headers['content-type'] = 'application/json'
application_url = self.application_url + '/' + urllib.parse.quote(app_name)
response = self.session.put(application_url, headers=headers, json=new_key)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['application'])
return response.json()
def get_all_global(self):
"""
Get global list of all application uuids
"""
response = self.session.get(self.global_application_url)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['applications-uuid'])
return response.json()
def get_all_global_detailed(self):
"""
Get global list of all application names
"""
response = self.session.get(self.global_application_url + '?expand=true')
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['applications'])
# TODO testing of paginating response
# TODO filtering of apptype, expand, rows, startKey, status queryparameters to filter
def get_all(self):
"""
Get all application names of one developer
"""
response = self.session.get(self.application_url)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['applications'])
return response.json()
def get_positive(self, app_name):
"""
Get existing application
"""
application_url = self.application_url + '/' + urllib.parse.quote(app_name)
response = self.session.get(application_url)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
retrieved_application = response.json()
assert_valid_schema(retrieved_application, self.schemas['application'])
return retrieved_application
def get_by_uuid_positive(self, app_uuid):
"""
Get existing application by uuid
"""
application_url = self.global_application_url + '/' + urllib.parse.quote(app_uuid)
response = self.session.get(application_url)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
retrieved_application = response.json()
assert_valid_schema(retrieved_application, self.schemas['application'])
return retrieved_application
def update_positive(self, application):
"""
Update existing application
"""
application_url = self.application_url + '/' + urllib.parse.quote(application['name'])
response = self.session.post(application_url, json=application)
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
updated_application = response.json()
assert_valid_schema(updated_application, self.schemas['application'])
return updated_application
def _change_status(self, app_name, status, expect_success):
"""
Update status of application
"""
headers = self.session.headers
headers['content-type'] = 'application/octet-stream'
application_url = (self.application_url + '/' +
urllib.parse.quote(app_name) + '?action=' + status)
response = self.session.post(application_url, headers=headers)
if expect_success:
assert_status_code(response, HTTP_NO_CONTENT)
assert response.content == b''
else:
assert_status_code(response, HTTP_BAD_REQUEST)
assert_valid_schema_error(response.json())
def change_status_approve_positive(self, app_name):
"""
Update status of developer to inactive
"""
self._change_status(app_name, 'approve', True)
def change_status_revoke_positive(self, app_name):
"""
Update status of developer to active
"""
self._change_status(app_name, 'revoke', True)
def _delete(self, app_name, expected_success):
"""
Delete existing application
"""
response = self.session.delete(self.application_url + '/' + urllib.parse.quote(app_name))
if expected_success:
assert_status_code(response, HTTP_OK)
assert_content_type_json(response)
assert_valid_schema(response.json(), self.schemas['application'])
else:
assert_status_code(response, HTTP_NOT_FOUND)
assert_content_type_json(response)
return response.json()
def delete_positive(self, app_name):
"""
Delete existing application
"""
return self._delete(app_name, True)
def delete_negative(self, app_name):
"""
Attempt to delete application, which should fail
"""
return self._delete(app_name, False)
def delete_all_test_developer(self):
"""
Delete all application created by test suite
"""
for i in range(self.config['entity_count']):
app_name = self.generate_app_name(i)
application_url = self.application_url + '/' + urllib.parse.quote(app_name)
self.session.delete(application_url)
def assert_compare(self, application_a, application_b):
"""
Compares minimum required fields that can be freely, as is, on applications
"""
assert application_a['name'] == application_b['name']
# assert application_a['displayName'] == application_b['displayName']
assert application_a['callbackUrl'] == application_b['callbackUrl']
assert (application_a['attributes'].sort(key=lambda x: x['name'])
== application_b['attributes'].sort(key=lambda x: x['name']))
|
import logging
import time as timer
import angr
lw = logging.getLogger("CustomSimProcedureWindows")
class GetTempFileNameA(angr.SimProcedure):
def decodeString(self, ptr):
fileName = self.state.mem[ptr].string.concrete
return fileName
def run(self, lpPathName, lpPrefixString, uUnique, lpTempFileName):
self.state.project
# import pdb; pdb.set_trace()
dirname = self.decodeString(lpPathName)
name = self.decodeString(lpPrefixString)[:3]
uid = self.state.solver.eval(uUnique)
if uid == 0:
uid = int(timer.time())
hexnum = "{0:0{1}x}".format(uid, 2)
if hasattr(dirname, "decode"):
try:
dirname = dirname.decode("utf-8")
except:
dirname = dirname.decode("utf-8",errors="ignore")
if hasattr(name, "decode"):
try:
name = name.decode("utf-8")
except:
name = name.decode("utf-8",errors="ignore")
fd = self.state.posix.open(
dirname + name + hexnum + ".TMP\0", self.state.solver.BVV(2, self.arch.bits)
)
newName = dirname + name + hexnum + ".TMP\0"
newName = self.state.solver.BVV(newName)
self.state.memory.store(lpTempFileName, newName)
# import pdb; pdb.set_trace()
return int(hexnum, 16)
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import UInt16, String
from sensor_msgs.msg import Joy
from math import atan2
import numpy as np
import time
class RobotArmControl(object):
def __init__(self):
self.msg = Joy()
self.torso_pos = 90
self.shoulder_pos = 45
self.elbow_pos = 15
self.wristrot_pos = 90
self.wristbend_pos = 10
self.claw_pos = 180
self.torso_state = 0
self.shoulder_state = 0
self.elbow_state = 0
self.wristrot_state = 0
self.wristbend_state = 0
self.claw_state = 0
self.demo_button = 0
self.all_buttons = []
self.threshold = 0.5
self.arduino_connected = 0
self.joy_sub = rospy.Subscriber("/joy",Joy, self.joy_callback)
self.chat_pub = rospy.Publisher("chatter", String, queue_size=1000)
self.torso_pub = rospy.Publisher("torso_servo", UInt16, queue_size=10)
self.shoulder_pub = rospy.Publisher("shoulder_servo", UInt16, queue_size=10)
self.elbow_pub = rospy.Publisher("elbow_servo", UInt16, queue_size=10)
self.wristrot_pub = rospy.Publisher("wristrot_servo", UInt16, queue_size=10)
self.wristbend_pub = rospy.Publisher("wristbend_servo", UInt16, queue_size=10)
self.claw_pub = rospy.Publisher("claw_servo", UInt16, queue_size=10)
self.raspi_connect_pub = rospy.Publisher("raspi_connect", UInt16, queue_size=10)
#self.arduino_connect_sub = rospy.Subscriber("arduino_connect", UInt16, self.arduino_callback)
#def arduino_callback(self,msg):
# if (msg.data)
def move_servos(self):
self.torso_pub.publish(self.torso_pos)
self.shoulder_pub.publish(self.shoulder_pos)
self.elbow_pub.publish(self.elbow_pos)
self.wristrot_pub.publish(self.wristrot_pos)
self.wristbend_pub.publish(self.wristbend_pos)
self.claw_pub.publish(self.claw_pos)
def joy_callback(self,msg):
self.msg = msg
self.update_buttons(msg)
self.update_servos(msg)
def update_buttons(self, msg):
self.demo_button = msg.buttons[0]
self.all_buttons = msg.buttons
#self.chat_pub.publish(str("Demo Button="+str(self.demo_button)))
def update_servos(self, msg):
# Update Torso
if (msg.axes[0] > self.threshold and self.torso_pos <=180):
self.torso_state = 1
elif (msg.axes[0] < -(self.threshold) and self.torso_pos >=0):
self.torso_state = -1
else:
self.torso_state = 0
# Update shoulder
if (msg.axes[7] > self.threshold and self.shoulder_pos <= 180):
self.shoulder_state = 1
elif (msg.axes[7] < -(self.threshold) and self.shoulder_pos >= 0):
self.shoulder_state = -1
else:
self.shoulder_state = 0
# Update elbow
if (msg.axes[1] > self.threshold and self.elbow_pos<=180):
self.elbow_state = 1
elif (msg.axes[1] < -(self.threshold) and self.elbow_pos>=0):
self.elbow_state = -1
else:
self.elbow_state = 0
# Update wristrot
if (msg.axes[3] > self.threshold and self.wristrot_pos<=180):
self.wristrot_state = 1*(-1) # invert rotation direction for this servo
elif (msg.axes[3] < -(self.threshold) and self.wristrot_pos>=0):
self.wristrot_state = -1*(-1)
else:
self.wristrot_state = 0
# Update wristbend
if (msg.axes[4] > self.threshold and self.wristbend_pos<=180):
self.wristbend_state = 1
elif (msg.axes[4] < -(self.threshold) and self.wristbend_pos>=0):
self.wristbend_state = -1
else:
self.wristbend_state = 0
# Update claw
trig = self.msg.axes[5]
if trig > self.threshold:
self.claw_pos=180
else:
self.claw_pos=30
def move_torso(self):
new_pos = UInt16()
new_pos = self.torso_pos + self.torso_state
self.torso_pos = new_pos
self.torso_pub.publish(self.torso_pos)
self.chat_pub.publish(str("Torso Angle="+str(self.torso_pos)))
def move_shoulder(self):
new_pos = UInt16()
new_pos = self.shoulder_pos + self.shoulder_state
self.shoulder_pos = new_pos
self.shoulder_pub.publish(self.shoulder_pos)
self.chat_pub.publish(str("Shoulder Angle="+str(self.shoulder_pos)))
def move_elbow(self):
new_pos = UInt16()
new_pos = self.elbow_pos + self.elbow_state
self.elbow_pos = new_pos
self.elbow_pub.publish(self.elbow_pos)
self.chat_pub.publish(str("Elbow Angle="+str(self.elbow_pos)))
def move_wristrot(self):
new_pos = UInt16()
new_pos = self.wristrot_pos + self.wristrot_state
self.wristrot_pos = new_pos
self.wristrot_pub.publish(self.wristrot_pos)
self.chat_pub.publish(str("WristRot Angle="+str(self.wristrot_pos)))
def move_wristbend(self):
new_pos = UInt16()
new_pos = self.wristbend_pos + self.wristbend_state
self.wristbend_pos = new_pos
self.wristbend_pub.publish(self.wristbend_pos)
self.chat_pub.publish(str("WristBend Angle="+str(self.wristbend_pos)))
def move_claw(self):
self.claw_pub.publish(self.claw_pos)
self.chat_pub.publish(str("Claw Angle="+str(self.claw_pos)))
def run_robot(self):
# Should constantly be updating joint positions based on teleop control states
mesg = str("===================== Running Arm ======================")
self.chat_pub.publish(mesg)
self.chat_pub.publish(str("Demo Button="+str(self.demo_button)))
self.move_torso()
self.move_shoulder()
self.move_elbow()
self.move_wristrot()
self.move_wristbend()
self.move_claw()
if self.demo_button==1:
self.run_demo()
def run_demo(self):
mesg = str("=================== Running Arm Demo ====================")
self.chat_pub.publish(mesg)
# Set up arm
self.torso_pos=90
self.shoulder_pos=100
self.elbow_pos=65
self.wristrot_pos=90
self.move_servos()
time.sleep(1)
# Adjust grabber above printing bed
self.torso_pos=180
self.wristbend_pos=90
self.claw_pos=180
self.move_servos()
time.sleep(0.5)
# Lower grabber to printing bed
self.shoulder_pos=140
self.elbow_pos=70
self.move_servos()
time.sleep(0.5)
# Grab object
self.claw_state=30
self.move_servos()
time.sleep(0.5)
# Move grabber above trash can
self.torso_state=100
self.shoulder_state=100
self.elbow_state=35
self.wristbend_state=125
self.move_servos()
time.sleep(1)
# Let go
self.claw_state=180
self.move_servos
def main():
rospy.init_node('desktop_teleop_arm')
r = rospy.Rate(10)
robotarm = RobotArmControl()
# Wait for connection status from arduino
#while robotarm.arduino_connected==0:
# pass
robotarm.move_servos()
while not rospy.is_shutdown():
# Send connect status to arduino
raspi_connect_msg = UInt16()
raspi_connect_msg.data = 1;
robotarm.raspi_connect_pub.publish(raspi_connect_msg)
robotarm.run_robot()
r.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
import datetime
from docx.shared import Cm
from docxtpl import DocxTemplate, InlineImage
"""""
Работа с текстами
"""""
def get_context(company, result_sku_list): # возвращает словарь аргуменов
return {
'retailer': company,
'sku_list': result_sku_list,
}
def from_template(company, result_sku_list, template, signature):
template = DocxTemplate(template)
context = get_context(company, result_sku_list) # gets the context used to render the document
img_size = Cm(15) # sets the size of the image
acc = InlineImage(template, signature, img_size)
context['acc'] = acc # adds the InlineImage object to the context
template.render(context)
template.save(company + '_' + str(datetime.datetime.now().date()) + '_report.docx')
def generate_report(company, result_sku_list):
template = 'report.docx'
signature = 'Major.png'
from_template(company, result_sku_list, template, signature)
def toFixed(numObj, digits=0):
return f"{numObj:.{digits}f}"
car_data = ("""
brand price year
Volvo 1.5 2017
Lada 0.5 2018
Audi 2.0 2018
""")
with open('data_text', 'w') as f:
f.write(car_data.strip())
# Считывание файла построчно
f = open('data_text')
data = ''
for line in f:
data += line + '\n'
f.close()
generate_report('MAJOR', data)
|
import os
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
import time
import datetime as dt
import secrets
import random
## TODO check in requirements
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
## TODO cs50.sql -> lib
from cs50 import SQL
import json
import bcrypt
import jinja2
import dateutil.parser
import requests
## BEGIN SETUP
## Repalce this with the base URL of your instance
baseurl = "https://calliapp.dev"
app = Flask(__name__)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
## Replace this with your database
db = SQL("sqlite:///calli.db")
## Gives Jinja the datetime module
app.add_template_filter(dt.datetime)
## BEGIN FLASK
## Landing page ##
@app.route("/")
def root():
return render_template("homenew.html")
## Docs ##
@app.route("/docs")
def docs():
return render_template("docs.html")
@app.route("/docs/cli")
def docs_cli():
return render_template("docs_cli.html")
@app.route("/docs/api")
def docs_api():
return render_template("docs_api.html")
@app.route("/docs/server")
def docs_server():
return render_template("docs_server.html")
@app.route("/docs/install-cli")
def docs_install():
return render_template("docs_install.html")
## WEBAPP ##
## TODO accept redirects from /register
@app.route("/dash", methods=["GET"])
def web_dash():
if request.method == "GET":
## If user logged in ##
try:
session['user_id']
except KeyError:
return redirect('/login')
else:
## Today ##
now = dt.datetime.utcnow().strftime('%s')
today_start = dt.datetime.now().strftime('%s')
today_end = (dt.datetime.combine((dt.datetime.today()+dt.timedelta(days=1)), dt.time(0))+dt.timedelta(hours=(-1*int(session['offset'])))).strftime("%s")
today_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND start BETWEEN (:start) AND (:end) ORDER BY start", userid=session['user_id'], start=today_start, end=str(int(today_end)-1))
for event in today_calendar:
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
event['end'] = (((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
## Tomorrow ##
tomorrow_start = (dt.datetime.combine((dt.datetime.today()+dt.timedelta(days=1)), dt.time(0))+dt.timedelta(hours=(-1*int(session['offset'])))).strftime("%s")
tomorrow_end = (dt.datetime.combine((dt.datetime.today()+dt.timedelta(days=2)), dt.time(0))+dt.timedelta(hours=(-1*int(session['offset'])))).strftime("%s")
tomorrow_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND start BETWEEN (:start) AND (:end) ORDER BY start", userid=session['user_id'], start=tomorrow_start, end=str(int(tomorrow_end)-1))
for event in tomorrow_calendar:
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
event['end'] = (((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
## Rest of the week ##
rest_start = (dt.datetime.combine((dt.datetime.today()+dt.timedelta(days=2)), dt.time(0))+dt.timedelta(hours=(-1*int(session['offset'])))).strftime("%s")
reset_end = (dt.datetime.combine((dt.datetime.today()+dt.timedelta(days=7)), dt.time(0))+dt.timedelta(hours=(-1*int(session['offset'])))).strftime("%s")
rest_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND start BETWEEN (:start) AND (:end) ORDER BY start", userid=session['user_id'], start=rest_start, end=str(int(reset_end)-1))
## Timezone deltas ##
for event in rest_calendar:
event['day'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%a %d %b %y"))
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
event['end'] = (((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
return render_template("dash.html", userid=session['user_id'], username=session['username'], today_calendar=today_calendar, tomorrow_calendar=tomorrow_calendar, rest_calendar=rest_calendar, offset=session['offset'], token=session['token'], url=baseurl)
## TODO ##
else:
return "only get"
## TODO create
@app.route("/dash/create", methods=["GET", "POST"])
def dash_create():
if request.method == "GET":
try:
session['user_id']
except KeyError:
return redirect('/login')
else:
today_default = dt.datetime.now().date().strftime("%a %d %b %y")
time_default = dt.time(12).strftime("%I:%M %P")
duration_default = str(60)
return render_template("dash_create.html", today_default=today_default, time_default=time_default, duration_default=duration_default, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'])
elif request.method == "POST":
print(request.form.get('name'))
print(request.form.get('day'))
print(request.form.get('start'))
print(request.form.get('duration'))
print(request.form.get('info'))
## Start parsing the new event
event = {}
event['info'] = request.form.get('info')
event['name'] = request.form.get('name')
if request.form.get('day'):
try:
date = dateutil.parser.parse(request.form.get('day'), dayfirst=True)
except ValueError:
render_template("dash_create.html", show_error="please enter a valid date for the event", userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'])
else:
date = dt.datetime.now()
if request.form.get('start'):
try:
time = dateutil.parser.parse(request.form.get('start'), dayfirst=True)
except ValueError:
render_template("dash_create.html", show_error="please enter a valid time for the event", userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'])
else:
time = dt.datetime.combine(dt.datetime.now().date(), dt.time(12))
if request.form.get('duration'):
try:
duration = int(request.form.get('duration'))
except ValueError:
return render_template("dash_create.html", show_error="please enter an integer duration for the event, or select reminder", userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'])
else:
duration = 60
start = ((dt.datetime.combine(date.date(), time.time()).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=(-1*int(session['offset']))))
event['start'] = start.strftime("%s")
if request.form.get('type') == "event":
end = (start + dt.timedelta(minutes=duration))
event['end'] = end.strftime("%s")
else:
event['end'] = str(0)
if request.form.get('type') == "event":
event['type'] = "E"
elif request.form.get('type') == 'reminder':
event['type'] = "R"
else:
event['type'] = "?"
r = lambda: random.randint(0,255)
existing = db.execute("SELECT eventhex FROM calendar WHERE userid=(:userid)", userid=session['user_id'])
eventhex = '@%02X%02X%02X' % (r(),r(),r())
## Check for eventhex collission ##
while any(d['eventhex'] == eventhex for d in existing):
eventhex = '@%02X%02X%02X' % (r(),r(),r())
## Create event ##
pending = db.execute("INSERT INTO calendar (userid, eventhex, type, name, start, end, info) VALUES (:userid, :eventhex, :etype, :name, :start, :end, :info)", userid=session['user_id'], eventhex=eventhex, etype=event['type'], name=event['name'], start=event['start'], end=event['end'], info=event['info'])
if pending == None:
return render_template("dash_create.html", show_error="event creation failed. please try again.", userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'])
else:
return redirect("/dash")
@app.route("/dash/edit", methods=["GET", "POST"])
def dash_edit():
## If logged in
if request.method == "GET":
try:
session['user_id']
except KeyError:
return redirect('/login')
else:
## query event arg
eventhex = '@' + request.args.get('event').upper()
event = db.execute("SELECT * FROM calendar WHERE eventhex=:eventhex AND userid=:userid", eventhex=eventhex, userid=session['user_id'])[0]
## Convert to dt object and add user timezone offset
event['day'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%a %d %b %y"))
event['duration'] = (int(event['end']) - int(event['start']))/60
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
return render_template("dash_edit.html", event=event, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'], eventhex=request.args.get('event').lower())
elif request.method == "POST":
## get old event for error redirect
## TODO some new info isnt sent
eventhex = '@' + request.args.get('event').upper()
event = db.execute("SELECT * FROM calendar WHERE eventhex=:eventhex AND userid=:userid", eventhex=eventhex, userid=session['user_id'])[0]
old_start = ((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset'])))
old_end = ((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset'])))
event['day'] = old_start.strftime("%a %d %b %y")
event['duration'] = (int(event['end']) - int(event['start']))/60
event['start'] = old_start.strftime("%I:%M%p")
## Start parsing new fields
if request.form.get("info"):
new_info = request.form.get('info')
else:
new_info = event['info']
## Duration
if request.form.get("duration"):
if event['type'] == "E":
try:
new_duration = int(request.form.get('duration'))
except ValueError:
return render_template("dash_edit.html", show_error="please enter an integer number of minutes for event duration", event=event, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'], eventhex=request.args.get('event').lower())
else:
## default to old offset
new_duration = int(event['duration'])
## Date
if request.form.get("day"):
try:
new_date = dateutil.parser.parse(request.form.get('day'), dayfirst=True)
except ValueError:
return render_template("dash_edit.html", show_error="please enter a valid date", event=event, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'], eventhex=request.args.get('event').lower())
else:
new_date = old_start
## Start
if request.form.get("start"):
try:
new_time = dateutil.parser.parse(request.form.get('start'), dayfirst=True)
except ValueError:
return render_template("dash_edit.html", show_error="please enter a valid start time", event=event, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'], eventhex=request.args.get('event').lower())
else:
new_time = old_start
## Combine new date and time to start ##
new_start = (dt.datetime.combine(new_date.date(), new_time.time())+dt.timedelta(hours=(-1*int(session['offset']))))
## If event, calculate end 33
if event['type'] == "E":
new_end = (new_start + dt.timedelta(minutes=int(new_duration))).strftime('%s')
else:
## Else its a remind ##
new_end = str(0)
updated_event = db.execute("UPDATE calendar SET start=:start, end=:end, info=:info WHERE eventhex=(:eventhex) AND userid=(:userid)", userid=session['user_id'], eventhex=eventhex, start=new_start.strftime('%s'), end=new_end, info=new_info)
## If DB updated one event ##
if updated_event == 1:
return redirect('/dash')
else:
return render_template("dash_edit.html", show_error="edit unsuccessful. please try again", event=event, userid=session['user_id'], username=session['username'], offset=session['offset'], token=session['token'], eventhex=request.args.get('event').lower())
@app.route("/dash/events")
def dash_events():
if request.method == "GET":
## If user logged in ##
try:
session['user_id']
except KeyError:
return redirect('/login')
else:
## get events until the end of time ##
today_start = dt.datetime.now().strftime('%s')
events_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND type='E' AND start>=(:start) ORDER BY start", userid=session['user_id'], start=today_start)
for event in events_calendar:
event['day'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%a %d %b %y"))
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
event['end'] = (((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
return render_template("dash_events.html", userid=session['user_id'], username=session['username'], events_calendar=events_calendar, offset=session['offset'])
## TODO
else:
return "no"
@app.route("/dash/reminders")
def dash_remind():
if request.method == "GET":
try:
session['user_id']
except KeyError:
return redirect('/login')
else:
## get reminds from now to the end of time ##
today_start = dt.datetime.now().strftime('%s')
reminds_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND type='R' AND start>=(:start) ORDER BY start", userid=session['user_id'], start=today_start)
for event in reminds_calendar:
event['day'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%a %d %b %y"))
event['start'] = (((dt.datetime.utcfromtimestamp(int(event['start'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
event['end'] = (((dt.datetime.utcfromtimestamp(int(event['end'])).replace(tzinfo=dt.timezone.utc)) + dt.timedelta(hours=int(session['offset']))).strftime("%I:%M%p"))
return render_template("dash_reminds.html", userid=session['user_id'], username=session['username'], reminds_calendar=reminds_calendar, offset=session['offset'])
## TODO
else:
return "no"
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/login", methods=["GET", "POST"])
def web_login():
if request.method == "GET":
return render_template("login.html")
elif request.method == "POST":
## Clear session
session.clear()
session['offset'] = request.form.get('timeoffset')
## If username andor password arent supplied
if not request.form.get("username"):
return render_template("login.html", show_error="please provide a username")
elif not request.form.get("password"):
return render_template("login.html", show_error="please provide a password")
## Get the user
users = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
## If the username exists
if users:
## If the hash is right
if bcrypt.checkpw(request.form.get("password").encode('utf-8'), users[0]['hash']):
session["user_id"] = users[0]["userid"]
session["token"] = users[0]["token"]
session["username"] = users[0]["username"]
return redirect('/dash')
else:
return render_template("login.html", show_error="invalid username or password")
else:
return render_template("login.html", show_error="invalid username or password")
else:
return jsonify("method not allowed"), 405, {'ContentType':'application/json'}
@app.route("/register", methods=["GET", "POST"])
def web_register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
## Clientside checks should prevent empty form submission
username = request.form.get("username")
password = request.form.get("password")
confirmation = request.form.get("confirmation")
if password != confirmation:
return render_template("register.html", show_error="passwords must match")
if not username or not password:
return render_template("register.html", show_error="enter a username and a password")
else:
## This is not guaranteed to be unique - just extremely unlikely (one in ~2^2600 before a collision)
token = secrets.token_urlsafe(42)
db.execute("INSERT INTO users (username, hash, token) VALUES (:username, :hash, :token)", username=username, hash=bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(5)), token=token)
## TODO redirect to dashboard
return redirect("/")
else:
return jsonify("method not allowed"), 405, {'ContentType':'application/json'}
## API ##
@app.route("/api/events", methods=["POST", "GET", "DELETE", "PATCH"])
def new_event():
auth = db.execute("SELECT * FROM users WHERE token=(:token)", token=request.headers['token'])
if auth:
## TODO move auth to before request.method
if request.method == "GET":
## If GET by start and end time
if request.args.get('start') and request.args.get('end'):
start = request.args.get('start')
end = request.args.get('end')
today_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND start BETWEEN (:start) AND (:end) ORDER BY start", userid=auth[0]['userid'], start=start, end=end)
return jsonify(today_calendar)
## If GET by event id
elif request.args.get('eventhex'):
print(request.args.get('eventhex'))
today_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND eventhex=(:eventhex)", userid=auth[0]['userid'], eventhex="@"+request.args.get('eventhex').upper())
return jsonify(today_calendar)
## TODO If GET by start time onwards
elif request.args.get('start'):
start = request.args.get('start')
today_calendar = db.execute("SELECT * FROM calendar WHERE userid=(:userid) AND start>=(:start) ORDER BY start", userid=auth[0]['userid'], start=start)
return jsonify(today_calendar)
## else...
elif request.method == "POST":
## Load content ##
content = request.json
## Random hex lambda ##
r = lambda: random.randint(0,255)
existing = db.execute("SELECT eventhex FROM calendar WHERE userid=(:userid)", userid=auth[0]['userid'])
eventhex = '@%02X%02X%02X' % (r(),r(),r())
## Check for eventhex collission ##
while any(d['eventhex'] == eventhex for d in existing):
eventhex = '@%02X%02X%02X' % (r(),r(),r())
## If there is no end time ##
try:
content['end']
## End defaults to 0 (reminds) ##
except KeyError:
content['end'] = str(0)
## Create event ##
db.execute("INSERT INTO calendar (userid, eventhex, type, name, start, end, info) VALUES (:userid, :eventhex, :etype, :name, :start, :end, :info)", userid=auth[0]['userid'], eventhex=eventhex, etype=content['type'], name=content['name'], start=content['start'], end=content['end'], info=content['info'])
## Return the chosen eventhex ##
return json.dumps({'eventhex':eventhex}), 200, {'ContentType':'application/json'}
elif request.method == "DELETE":
content = request.json
## Set a counter for number of events deleted ##
deleted = 0
## Start deleting ##
for eventhex in content['hex']:
deleted += db.execute("DELETE FROM calendar WHERE userid=(:userid) AND eventhex=(:eventhex)", userid=auth[0]['userid'], eventhex=eventhex.upper())
## If all the events got deleted ##
if deleted == len(content['hex']):
## Return the successfully deleted events ##
return json.dumps({'eventhex':eventhex}), 200, {'ContentType':'application/json'}
else:
## Else you fucked up ##
return jsonify("failed"), 401, {'ContentType':'application/json'}
elif request.method == "PATCH":
## re-create the eventhex string
eventid = "@" + request.args.get('eventhex')
content = request.json
## Timestamp generation is all clientside
updated_event = db.execute("UPDATE calendar SET start=:start, end=:end, info=:info WHERE eventhex=(:eventid) AND userid=(:userid)", userid=auth[0]['userid'], eventid=eventid.upper(), start=content['start'], end=content['end'], info=content['info'])
if updated_event == 1:
return jsonify("success"), 204, {'ContentType':'application/json'}
else:
return jsonify("failed"), 404, {'ContentType':'application/json'}
else:
return jsonify("method not allowed"), 405, {'ContentType':'application/json'}
else:
return jsonify("unauthorized"), 401, {'ContentType':'application/json'}
@app.route("/api/login", methods=["POST"])
def login():
## Same as /login
if request.method == "POST":
content = request.json
users = db.execute("SELECT * FROM users WHERE username=(:username)", username=content['username'])
if users:
if bcrypt.checkpw(content['password'].encode('utf-8'), users[0]['hash']):
return jsonify(users[0]['token']), 200, {'ContentType':'application/json'}
else:
return jsonify("unauthorized"), 401, {'ContentType':'application/json'}
else:
return jsonify("method not allowed"), 405, {'ContentType':'application/json'}
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseBlock:
def __init__(self, indent=0, first_offset=None, child_indent=0,
parent_width=None, top_margin=0, bottom_margin=0):
"""Construct a block.
Args:
indent: Block indent.
first_offset: Extra limit for the first line
(will be subtracted from the width).
child_indent: Extra indent for the nested blocks.
parent_width: Width of the parent block. This block width will be
calculated as ``parent_width - indent``.
top_margin: Number of blank lines before the block's content.
bottom_margin: Number of blank lines after the block's content.
"""
self.indent = indent
self.first_offset = first_offset
self.child_indent = child_indent
self.width = parent_width - indent if parent_width else None
self.top_margin = top_margin
self.bottom_margin = bottom_margin
self.merge_to_new_line = False
def is_empty(self):
return True
def clear(self):
pass
def start_box(self):
pass
def end_box(self):
pass
def is_box_started(self):
return False
def add_boxed(self, text):
raise NotImplementedError
def add_text(self, text):
raise NotImplementedError
def reopen_box(self):
raise NotImplementedError
def get_lines(self):
raise NotImplementedError
class LineBlock(BaseBlock):
"""Text paragraph with indentation."""
def __init__(self, indent=0, first_offset=None, child_indent=0,
parent_width=None, top_margin=0, bottom_margin=0):
super(LineBlock, self).__init__(indent, first_offset, child_indent,
parent_width, top_margin, bottom_margin)
self.clear()
def clear(self):
self.lines = []
super(LineBlock, self).clear()
def is_empty(self):
return len(self.lines) == 0
def add_boxed(self, text):
self.add_text(text)
def add_text(self, text):
lines = text.split('\n')
if self.lines:
self.lines[-1] += lines[0]
del lines[0]
self.lines.extend(lines)
def reopen_box(self):
pass
def get_lines(self):
return iter(self.lines)
class WrapBlock(BaseBlock):
"""Text paragraph with wrapping and indentation features."""
#: Enclosing leading (open) chars.
enclose_start = """([{"'"""
#: Trailing punctuation chars.
punctuation_end = """,.:)];!?"'}"""
def __init__(self, indent=0, first_offset=None, child_indent=0,
parent_width=None, top_margin=0, bottom_margin=0):
super(WrapBlock, self).__init__(indent, first_offset, child_indent,
parent_width, top_margin, bottom_margin)
self.words = []
self._box = None
def clear(self):
"""Clear block."""
self.words = []
self._box = None
super(WrapBlock, self).clear()
def is_empty(self):
return len(self.words) == 0
def start_box(self):
"""Start block of words with disabled wrapping.
All content added after calling the method will not be split to fit
to the :attr:`width`.
"""
if not self.is_box_started():
self._box = []
self.words.append(self._box)
def end_box(self):
"""Close block of words with disabled wrapping."""
if self._box:
self._handle_enclose()
self._box = None
def is_box_started(self):
return self._box is not None
def add_boxed(self, text):
"""Add text with protecting from wrapping.
Args:
text: Text to add.
Notes:
Don't add multi line text!
"""
if text == '\n':
self.words.append('\n')
else:
self.words.append([text])
self._handle_enclose()
def reopen_box(self):
"""Reopen last box."""
if self._box is None:
self._box = self.words[-1]
assert isinstance(self._box, list)
def _handle_enclose(self):
"""Helper method to handle enclose chars before last box.
It called after non-empty box is added.
"""
# If there are at least two words and word before last is not a box
# we check for punctuation.
if len(self.words) > 1 and not isinstance(self.words[-2], list):
c = self.words[-2][-1]
if c in self.enclose_start:
merge = True
if c in ('"', "'"):
# If enclosing char occurs an even number of times in prev
# words then don't merge with last box.
#
# Example:
#
# Code extracted "from pytest/setup.py" bla.
#
# Code extracted from pytest/setup.py" bla.
count = 0
stop = len(self.words) - 2
for i, w in enumerate(self.words):
if i > stop:
break
if not isinstance(w, list):
count += w.count(c)
if count % 2 == 0:
merge = False
if merge:
self.words[-1].insert(0, self.words[-2])
del self.words[-2]
def add_text(self, text):
"""Add text to the block.
Args:
text: String which may contain line breaks.
Notes:
If :meth:`start_box` was called then text will be protected
from the wrapping, so don't add multi line text in suc case.
"""
# Add word to box.
# Note: text must be without line breaks!
if self._box is not None:
self._box.append(text)
return
if not text or text == '\n':
return
elif text.startswith('\n'):
text = text[1:]
is_first_line = True
for line in text.splitlines():
if not line:
words = [u'\n', u'\n']
else:
words = [x for x in line.strip().split(' ') if x]
# Handle punctuation chars if prev word is boxed and given text
# start with punctuation. Check only for very first line and word.
#
# It handles the following cases:
#
# <boxed>,text
# <boxed>, text
# <boxed>) text
# <boxed>), text
# <boxed>),text <- in this case whole [,text] will be 'boxed'.
# etc.
#
# In above cases we need to prevent spaces and line breaks
# between boxed word and punctuation. For that we move
# word with punctuation inside the boxed word.
if (self.words
and isinstance(self.words[-1], list)
and is_first_line
and words):
# Is first word in the text has leading space.
# If line is empty then we force True to skip extra processing.
leading_space = text[0].isspace() if line else True
# If there is a space then we do nothing - in this case
# this word will be separated by space as expected.
# Otherwise check if word starts with punctuation char
# add it to the boxed word.
# NOTE: we add whole word not only punctuation chars,
# this allows to keep original formatting.
if not leading_space and words[0][0] in self.punctuation_end:
self.words[-1].append(words[0])
del words[0]
self.words.extend(words)
is_first_line = False
def get_lines(self):
"""Get result text lines.
Yields:
Text lines.
"""
# Do nothing for empty content.
if not self.words:
return
line = []
line_sz = 0
first_word = True
first_line = True
for word in self.words:
# Skip empty words and boxed lists.
if not word:
continue
if first_line:
if self.first_offset is None:
width = self.width
else:
width = self.width - self.first_offset
else:
width = self.width
# It's a protected from wrapping box of words, build result 'word'.
if isinstance(word, list):
word = ''.join(word)
if word == '\n':
word_sz = width + 1 # force new line
else:
word_sz = len(word) + (0 if first_word else 1) # 1 for space
if line_sz + word_sz <= width:
line_sz += word_sz
line.append(word)
first_word = False
else:
# Yield empty line if it contains only offset.
# If it's a first line and it's empty then skip it
# (because in such case the line is not filled yet).
if not first_line or line:
yield _join(line)
if word == '\n':
line = []
line_sz = 0
first_word = True
first_line = False
else:
# Recalc to have no +1 for possible space
# since we at line start.
word_sz = len(word)
line = [word]
line_sz = word_sz
first_line = False
yield _join(line)
def _join(words):
"""Join words into single line.
Args:
words: List of words.
Returns:
String with space separated words.
"""
return u' '.join(words) if words else u''
class BlockManager:
"""Blocks manager.
It manages blocks options, indentation, merging and constructs result
content lines.
"""
def __init__(self, indent=0, width=None):
"""Construct manager.
Args:
indent: Initial indent.
width: Content width.
"""
self.indent = indent
self.width = width
self.lines = []
self._blocks = []
self._block_params = None
self._last_block = None
def clear(self):
self._blocks = []
self.lines = []
self._last_block = None
self._block_params = None
@property
def block(self):
return self._blocks[-1] if self._blocks else None
@property
def last_width(self):
"""Last available width."""
if self._blocks:
return self._blocks[-1].width
return self.width
# NOTE: self._blocks must be non-empty
def _dump_current_lines(self):
block = self._blocks[-1]
if block.is_empty():
return
prev_block = self._last_block
merge = (
(prev_block is not None and prev_block.bottom_margin is None)
or block.top_margin is None
)
# Auto calculate first line offset if not specified.
if merge and self.lines and block.first_offset is None:
# first_offset = len(last line) + 1 for space - indent
block.first_offset = len(self.lines[-1]) + 1 - block.indent
lines = block.get_lines()
# Merge with last line if prev block has None bottom margin
# or this block has None top margin.
#
# There are two ways to merge starting from new line.
# 1. Set block's merge_to_new_line=True.
# 2. Add empty line to the top of the block content.
# In this case the line will be skipped on merging and
# remaining lines will be appended from the new line:
#
# if block.width:
# block.add_text('\n\n')
# else:
# block.add_text('\n')
#
if merge and self.lines:
# If merging is required from the new line then do nothing.
if block.top_margin is None and block.merge_to_new_line:
pass
else:
# Merge only non-empty lines.
line = next(lines).lstrip()
if line:
self.lines[-1] += u' ' + line
# Add top margin only if there are lines.
elif self.lines and prev_block and not block.is_empty():
# At first make sure we have margin between blocks.
# Choose biggest one.
margin = max(block.top_margin, prev_block.bottom_margin)
# Add margin between prev content and this block.
self.lines.extend([u''] * margin)
offset = u' ' * block.indent
self.lines.extend(offset + x for x in lines)
block.clear()
self._last_block = block
def open_block(self, indent=0, first_offset=None, child_indent=0,
top_margin=0, bottom_margin=0, no_wrap=False, next=None):
"""Open new block.
If previous block is not closed then:
* its content will be saved and block will be cleared.
* new block inherits indentation of the previous one.
Args:
indent: Block indent.
first_offset: Offset for the first line.
child_indent: Nested blocks extra indentation.
top_margin: Top margin (blank lines). If ``None`` then the block
will be merged with the previous block.
bottom_margin: Bottom margin, If ``None`` then next block will be
merged with this one.
no_wrap: Don't wrap content even if ``width`` is set.
next: Arguments for the next block. Keys are the same as for this
method. They will override parameters of the next
:meth:`open_block` call.
"""
if no_wrap:
cls = LineBlock
else:
cls = LineBlock if self.width is None else WrapBlock
# Add parent indent or initial one if there is no parent.
if self._blocks:
extra_indent = self.block.indent + self.block.child_indent
# If existing block has content then dump it.
self._dump_current_lines()
else:
extra_indent = self.indent
if self._block_params is None:
indent += extra_indent
block = cls(indent=indent, first_offset=first_offset,
child_indent=child_indent, parent_width=self.width,
top_margin=top_margin, bottom_margin=bottom_margin)
else:
kwargs = self._block_params
indent = kwargs.get('indent', indent) + extra_indent
kwargs['indent'] = indent
kwargs.setdefault('first_offset', first_offset)
kwargs.setdefault('child_indent', child_indent)
kwargs.setdefault('top_margin', top_margin)
kwargs.setdefault('bottom_margin', bottom_margin)
kwargs.setdefault('parent_width', self.width)
block = cls(**kwargs)
# Save next block params.
self._block_params = next
self._blocks.append(block)
def close_block(self):
"""Close block."""
if self._blocks:
self._dump_current_lines()
self._blocks.pop()
def close_all(self):
"""Close all remaining blocks."""
while self._blocks:
self.close_block()
|
#!/usr/bin/env python
"""
Load API client for a Tool Registry Service (TRS) endpoint based
either on the GA4GH specification or an existing client library.
"""
import logging
from bravado.requests_client import RequestsClient
from ga4ghtest.core.config import trs_config
from .client import TRSClient
logger = logging.getLogger(__name__)
def _get_trs_opts(service_id):
"""
Look up stored parameters for tool registry services.
"""
return trs_config()[service_id]
def _init_http_client(service_id=None, opts=None):
"""
Initialize and configure HTTP requests client for selected service.
"""
if service_id:
opts = _get_trs_opts(service_id)
http_client = RequestsClient()
http_client.set_api_key(host=opts['host'],
api_key=opts['auth'],
param_in='header')
return http_client
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
def load_trs_client(service_id, http_client=None):
"""Return an API client for the selected workflow execution service."""
trs_client = TRSClient(service=_get_trs_opts(service_id))
return TRSAdapter(trs_client)
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability import distributions as tfd
import tensorflow as tf
from planet.control import discounted_return
from planet import tools, PLANNING, STATES
def delta_degree(x):
return tf.where(tf.abs(x) < 180, x, x - tf.sign(x) * 360)
# compute angular clue
costn0 = lambda: tf.constant(0.0) # [throttle, steer(l-,r+)]
costn1 = lambda: tf.constant(1.0)
def cross_entropy_method(
cell, objective_fn, state, info_cmd, obs_shape, action_shape, horizon,
amount=1000, topk=100, iterations=10, discount=0.99,
min_action=-1,
max_action=1): # state,info_cmd: shape(num_envs,4): next_command_id, goal_heading_degree, current_heading_degree,dist_to_intersection
obs_shape, action_shape = tuple(obs_shape), tuple(action_shape)
original_batch = tools.shape(tools.nested.flatten(state)[0])[0] # original_batch: num_envs
initial_state = tools.nested.map(lambda tensor: tf.tile(
tensor, [amount] + [1] * (tensor.shape.ndims - 1)), state)
extended_batch = tools.shape(tools.nested.flatten(initial_state)[0])[0]
use_obs = tf.zeros([extended_batch, horizon, 1], tf.bool)
obs = tf.zeros((extended_batch, horizon) + obs_shape)
length = tf.ones([extended_batch], dtype=tf.int32) * horizon
# info_cmd components
info_cmd = tf.squeeze(info_cmd) # shape(3,)
cmd_id, goal_heading_degree, current_heading_degree, dist_to_intersection = info_cmd[0], info_cmd[1], info_cmd[2], \
info_cmd[3]
def iteration(mean_and_stddev, _):
mean, stddev = mean_and_stddev
# Sample action proposals from belief.
normal = tf.random_normal((original_batch, amount, horizon) + action_shape)
action = normal * stddev[:, None] + mean[:, None]
action = tf.clip_by_value(action, min_action, max_action)
# Evaluate proposal actions.
action = tf.reshape(
action, (extended_batch, horizon) + action_shape)
(_, state), _ = tf.nn.dynamic_rnn(
cell, (0 * obs, action, use_obs), initial_state=initial_state)
# objectives
objectives = objective_fn(state) # shape: ['reward':shape(1000,12), 'angular_speed_degree':shape(1000,12), ...]
if not PLANNING:
reward = objectives['reward']
angular_speed = objectives['angular_speed_degree']
if PLANNING:
reward = objectives['reward']
angular_speed = objectives['angular_speed_degree']
forward_speed = objectives['forward_speed'] # m/s
collided = objectives['collided']
intersection_offroad = objectives['intersection_offroad']
intersection_otherlane = objectives['intersection_otherlane']
# ################# #1. define reward for planning
# return_ = discounted_return.discounted_return(
# reward, length, discount)[:, 0]
# total_return = tf.reshape(return_, (original_batch, amount))
if not PLANNING:
################## #2. define reward for planning
return_ = discounted_return.discounted_return(reward, length, discount)[:, 0] # shape: (1000,)
return_ = tf.reshape(return_, (original_batch, amount)) # shape: (1, 1000)
# threshold_degree = tf.where(dist_to_intersection<10, 9.0*(10 - dist_to_intersection), 0)
threshold_degree = tf.where(dist_to_intersection < 9, 9 * (9 - dist_to_intersection), 0)
angular_turn_ = discounted_return.discounted_return(angular_speed, length, 1.0)[:, 0] # shape: (1000,)
# angular_turn_abs = discounted_return.discounted_return(-tf.abs(angular_speed), length, 1.0)[:, 0]
# angular_turn_relative = tf.reduce_sum(-tf.abs(angular_speed[...,1:]-angular_speed[...,:-1]),axis=-1)
heading_loss = - tf.abs(delta_degree(goal_heading_degree - (current_heading_degree + angular_turn_))) * \
tf.case(
{tf.equal(cmd_id, 3): costn1, tf.equal(cmd_id, 2): costn1, tf.equal(cmd_id, 1): costn1},
default=costn0)
heading_loss_weighted = heading_loss * tf.where(heading_loss > threshold_degree - 90,
tf.ones((amount,)) * 0.3, tf.ones(
(amount,)) * 100.0) # + 0.3*angular_turn_relative # + 0.1*angular_turn_abs
return_heading = tf.reshape(heading_loss_weighted, (original_batch, amount))
total_return = return_ + return_heading # /90.0*12*4
if PLANNING:
################## #3. define reward for planning
rewards = forward_speed / 10.0 - 300.0 * tf.where(collided > 0.3, collided, tf.ones_like(
collided) * 0.0) - 20.0 * intersection_offroad - 10.0 * intersection_otherlane
return_ = discounted_return.discounted_return(rewards, length, discount)[:, 0] # shape: (1000,)
return_ = tf.reshape(return_, (original_batch, amount)) # shape: (1, 1000)
# threshold_degree = tf.where(dist_to_intersection<10, 9.0*(10 - dist_to_intersection), 0)
threshold_degree = tf.where(dist_to_intersection < 9, 9 * (9 - dist_to_intersection), 0)
angular_turn_ = discounted_return.discounted_return(angular_speed, length, 1.0)[:, 0] # shape: (1000,)
# angular_turn_abs = discounted_return.discounted_return(-tf.abs(angular_speed), length, 1.0)[:, 0]
# angular_turn_relative = tf.reduce_sum(-tf.abs(angular_speed[...,1:]-angular_speed[...,:-1]),axis=-1)
heading_loss = - tf.abs(delta_degree(goal_heading_degree - (current_heading_degree + angular_turn_))) * \
tf.case(
{tf.equal(cmd_id, 3): costn1, tf.equal(cmd_id, 2): costn1, tf.equal(cmd_id, 1): costn1},
default=costn0)
heading_loss_weighted = heading_loss * tf.where(heading_loss > threshold_degree - 90,
tf.ones((amount,)) * 0.3, tf.ones(
(amount,)) * 1000.0) # + 0.3*angular_turn_relative # + 0.1*angular_turn_abs
return_heading = tf.reshape(heading_loss_weighted, (original_batch, amount))
total_return = return_ + return_heading # /90.0*12*4
# Re-fit belief to the best ones.
_, indices = tf.nn.top_k(total_return, topk, sorted=False)
indices += tf.range(original_batch)[:, None] * amount
best_actions = tf.gather(action, indices)
mean, variance = tf.nn.moments(best_actions, 1)
stddev = tf.sqrt(variance + 1e-6)
return mean, stddev
'''COMMAND_ORDINAL = {
"REACH_GOAL": 0,
"GO_STRAIGHT": 1,
"TURN_RIGHT": 2,
"TURN_LEFT": 3,
"LANE_FOLLOW": 4 }
'''
# compute action_bias
f_0 = lambda: tf.constant([1.0, 0.0]) # [throttle, steer(l-,r+)]
f_1eft = lambda: tf.constant([1.0, -0.5])
f_right = lambda: tf.constant([1.0, 0.5])
pred_func = {tf.equal(cmd_id, 3): f_1eft, tf.equal(cmd_id, 2): f_right}
action_bias = tf.case(pred_func, default=f_0)
mean_acion = tf.broadcast_to(action_bias, (original_batch, horizon) + action_shape)
# # compute angular clue
# angular_f_0 = lambda: tf.constant(0.0) # [throttle, steer(l-,r+)]
# angular_f_1eft = lambda: tf.constant(-3.0)
# angular_f_right = lambda: tf.constant(3.0)
#
# angular_pred_func = { tf.equal(cmd_id,3):angular_f_1eft, tf.equal(cmd_id,2):angular_f_right, tf.equal(cmd_id,1):angular_f_0 }
# angular_clue = tf.case(angular_pred_func, default=angular_f_0)
mean = tf.zeros((original_batch, horizon) + action_shape) # + action_bias
stddev = tf.ones((original_batch, horizon) + action_shape)
mean, stddev = tf.scan(
iteration, tf.range(iterations), (mean, stddev), back_prop=False)
mean, stddev = mean[-1], stddev[-1] # Select belief at last iterations.
# # some propability for using expert action instead of planned action
# mean = tf.where(1<1,mean,expert_action)
# mean = mean_acion
return mean
|
from typing import List, Optional, Tuple
from pydantic import BaseModel
class DMInputParams(BaseModel):
scale: float
size: Tuple[int, int]
mean: Optional[Tuple[float, ...]]
swapRB: Optional[bool]
crop: Optional[bool]
class Config:
allow_population_by_field_name = True
fields = {"swapRB": "swap_rb"}
class DetectDefaults(BaseModel):
confThreshold: Optional[float] = 0.5
nmsThreshold: Optional[float] = 0.5
class Config:
allow_population_by_field_name = True
fields = {
"confThreshold": "confidence_threshold",
"nmsThreshold": "nms_threshold",
}
class DrawDefaults(BaseModel):
color: Optional[Tuple[int, int, int]] = (255, 0, 0)
label_format: Optional[str] = "{} | {:.1%}"
offset: Optional[Tuple[int, int]] = (30, 30)
font_const: Optional[int] = 3
thickness: Optional[int] = 2
class DetectionModelConfig(BaseModel):
name: str
description: Optional[str]
weights_path: str
config_path: str
classes_path: str
input_params: DMInputParams
detect_defaults: Optional[DetectDefaults] = DetectDefaults()
draw_defaults: Optional[DrawDefaults] = DrawDefaults()
class_offset: Optional[int] = 0
class YasodConfig(BaseModel):
version: int
detection_models: List[DetectionModelConfig]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_mldmp
----------------------------------
Tests for `maximum likelihood MDP` module.
"""
import random
import unittest2
from tests.test_learner import TestLearner
from rltools.learners import MLMDP
from rltools.strategies import Strategy
from rltools.domains import randomwalk
class TestMLMDP(TestLearner):
# pylint: disable=protected-access, invalid-name
def setUp(self):
self.cls = MLMDP
def tearDown(self):
pass
def test_000_deterministic(self):
learner = MLMDP(discount_factor=0.75, learning_rate=1, normalize_count=0)
learner.init_episode()
learner.fit((0, 0, 0))
for _ in range(1000):
learner.fit((1, 1, 10))
learner.fit((0, 0, 0))
learner.fit((2, 2, -10))
learner.fit((0, 0, 0))
learner.converge()
self.assertEqual(learner.val(0, 0), 0)
self.assertEqual(learner.val(1, 0), 0)
self.assertEqual(learner.val(2, 0), 0)
self.assertEqual(learner.val(0, 1), -learner.val(0, 2))
self.assertEqual(learner.val(1, 1), -learner.val(1, 2))
self.assertEqual(learner.val(2, 1), -learner.val(2, 2))
def test_001_biased(self):
learner = MLMDP(discount_factor=0.75, learning_rate=1, normalize_count=0)
learner.init_episode()
learner.fit((0, 0, 0))
for _ in range(1000):
learner.fit((1, 1, 10 - random.random()))
learner.fit((0, 0, 0))
learner.fit((2, 2, -10))
learner.fit((0, 0, 0))
learner.converge()
self.assertEqual(learner.val(0, 0), 0)
self.assertEqual(learner.val(1, 0), 0)
self.assertEqual(learner.val(2, 0), 0)
self.assertLess(learner.val(0, 1), -learner.val(0, 2))
self.assertLess(learner.val(1, 1), -learner.val(1, 2))
self.assertLess(learner.val(2, 1), -learner.val(2, 2))
def test_004_play_random_walk(self):
agent = Strategy(MLMDP())
rmse = randomwalk.play(agent, converge=True)
self.assertLess(rmse, 0.1)
if __name__ == '__main__':
import sys
sys.exit(unittest2.main())
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : dimension.py
# Author : Honghua Dong
# Email : [email protected]
# Date : 04/20/2018
#
# This file is part of TextualReasoning-PyTorch.
# Distributed under terms of the MIT license.
import itertools
import torch
import torch.nn as nn
from jactorch.functional import broadcast
from ._utils import exclude_mask, mask_value
__all__ = ['Expander', 'Reducer', 'Permutation']
class Expander(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input, n=None):
if self.dim == 0:
assert n is not None
elif n is None:
n = input.size(self.dim)
dim = self.dim + 1
return broadcast(input.unsqueeze(dim), dim, n)
def get_output_dim(self, input_dim):
return input_dim
class Reducer(nn.Module):
def __init__(self, dim, exclude_self=True, exists=True, min_val=0., max_val=0.):
super().__init__()
self.dim = dim
self.exclude_self = exclude_self
self.exists = exists
self.min_val = min_val
self.max_val = max_val
def forward(self, input, mask=None):
shape = input.size()
inp0, inp1 = input, input
if self.exclude_self:
mask_self = exclude_mask(input, cnt=self.dim, dim=-1 - self.dim)
if mask is not None:
mask = mask.unsqueeze(-1) * mask_self
else:
mask = mask_self
if mask is not None:
inp0 = mask_value(input, mask, self.min_val)
inp1 = mask_value(input, mask, self.max_val)
if self.exists:
shape = shape[:-2] + (shape[-1] * 2, )
exists = torch.max(inp0, dim=-2)[0]
forall = torch.min(inp1, dim=-2)[0]
return torch.stack((exists, forall), dim=-1).view(shape)
shape = shape[:-2] + (shape[-1], )
return torch.max(inp0, dim=-2)[0].view(shape)
def get_output_dim(self, input_dim):
if self.exists:
return input_dim * 2
return input_dim
class Permutation(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input):
if self.dim <= 1:
return input
nr_dims = len(input.size())
# Assume the last dim is channel.
index = tuple(range(nr_dims - 1))
start_dim = nr_dims - 1 - self.dim
assert start_dim > 0
res = []
for i in itertools.permutations(index[start_dim:]):
p = index[:start_dim] + i + (nr_dims - 1,)
res.append(input.permute(p))
return torch.cat(res, dim=-1)
def get_output_dim(self, input_dim):
mul = 1
for i in range(self.dim):
mul *= i + 1
return input_dim * mul
|
import logging
import utils
import constants
logging.basicConfig(
level=logging.DEBUG,
format=constants.logfmt,
handlers=[logging.StreamHandler(), logging.FileHandler('../../data/logs/make_station_tree.log', 'w')],
)
logging.debug("started")
stations = utils.load_stations()
tree, node_station = utils.make_station_tree(stations)
tree.to_csv('../../data/station/tree.csv', index=False)
node_station.to_csv('../../data/station/node_station.csv', index=False)
logging.debug("completed")
|
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovoo_base
from tacker._i18n import _
from tacker.common import exceptions
from tacker.db import api as db_api
from tacker.db.db_sqlalchemy import api
from tacker.db.db_sqlalchemy import models
from tacker.objects import base
from tacker.objects import fields
@db_api.context_manager.writer
def _vnf_resource_create(context, values):
vnf_resource = models.VnfResource()
vnf_resource.update(values)
vnf_resource.save(context.session)
return vnf_resource
@db_api.context_manager.reader
def _vnf_resource_get_by_id(context, id):
query = api.model_query(context, models.VnfResource,
read_deleted="no", project_only=True). \
filter_by(id=id)
result = query.first()
if not result:
raise exceptions.VnfResourceNotFound(id=id)
return result
@db_api.context_manager.writer
def _vnf_resource_update(context, id, values):
vnf_resource = _vnf_resource_get_by_id(context, id)
vnf_resource.update(values)
vnf_resource.save(session=context.session)
return vnf_resource
@db_api.context_manager.writer
def _destroy_vnf_resource(context, id):
now = timeutils.utcnow()
updated_values = {'deleted': True,
'deleted_at': now
}
api.model_query(context, models.VnfResource).\
filter_by(id=id). \
update(updated_values, synchronize_session=False)
@db_api.context_manager.reader
def _vnf_resource_list(context, vnf_instance_id):
query = api.model_query(context, models.VnfResource, read_deleted="no",
project_only=True).\
filter_by(vnf_instance_id=vnf_instance_id)
return query.all()
def _make_vnf_resources_list(context, vnf_resource_list, db_vnf_resource_list):
vnf_resource_cls = VnfResource
vnf_resource_list.objects = []
for db_vnf_resource in db_vnf_resource_list:
vnf_resource_obj = vnf_resource_cls._from_db_object(
context, vnf_resource_cls(context), db_vnf_resource)
vnf_resource_list.objects.append(vnf_resource_obj)
vnf_resource_list.obj_reset_changes()
return vnf_resource_list
@base.TackerObjectRegistry.register
class VnfResource(base.TackerObject, base.TackerPersistentObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(nullable=False),
'vnf_instance_id': fields.StringField(nullable=False),
'resource_name': fields.StringField(nullable=True),
'resource_type': fields.StringField(nullable=False),
'resource_identifier': fields.StringField(nullable=False),
'resource_status': fields.StringField(nullable=True, default='status')
}
def __init__(self, context=None, **kwargs):
super(VnfResource, self).__init__(context, **kwargs)
self.obj_set_defaults()
@staticmethod
def _from_db_object(context, vnf_resource, db_vnf_resource):
for key in vnf_resource.fields:
if db_vnf_resource[key]:
setattr(vnf_resource, key, db_vnf_resource[key])
vnf_resource._context = context
vnf_resource.obj_reset_changes()
return vnf_resource
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason=_('already created'))
updates = self.obj_get_changes()
if 'id' not in updates:
updates['id'] = uuidutils.generate_uuid()
self.id = updates['id']
db_vnf_resource = _vnf_resource_create(self._context, updates)
self._from_db_object(self._context, self, db_vnf_resource)
@base.remotable
def save(self):
updates = self.tacker_obj_get_changes()
db_vnf_resource = _vnf_resource_update(self._context,
self.id, updates)
self._from_db_object(self._context, self, db_vnf_resource)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='destroy',
reason='no uuid')
_destroy_vnf_resource(context, self.id)
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_vnf_package = _vnf_resource_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_vnf_package)
@base.TackerObjectRegistry.register
class VnfResourceList(ovoo_base.ObjectListBase, base.TackerObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('VnfResource')
}
@base.remotable_classmethod
def get_by_vnf_instance_id(cls, context, vnf_instance_id):
db_vnf_resources = _vnf_resource_list(context, vnf_instance_id)
return _make_vnf_resources_list(context, cls(), db_vnf_resources)
|
"""
This package contains support code to package Salt with PyInstaller.
"""
|
from django import http
from django.shortcuts import render
from django.views import View
from address.models import Area
from utils.response_code import RET
class Areas(View):
# get /api/v1.0/areas/
def get(self, request):
"""展示区域数据"""
try:
areas = Area.objects.all()
except Exception as e:
return http.JsonResponse({
'errno': RET.DBERR,
'errmsg': '数据库查询错误'
})
area_list = []
for area in areas:
area_dict = {}
area_dict['aid'] = area.id
area_dict['aname'] = area.name
area_list.append(area_dict)
response = {
'errmsg': 'ok',
'errno': RET.OK,
'data': area_list
}
return http.JsonResponse(response) |
import os
import requests
with open('raw.csv') as f:
lis=[line.split(',') for line in f]
for i, person in enumerate(lis):
person[0] = person[0].replace(' ', '_')
person[1] = person[1].strip('\n')
print("Will create dir {0}, and store image from {1}".format(person[0], person[1]))
if not os.path.exists('raw/'+person[0]):
os.makedirs('raw/'+person[0])
with open('raw/{0}/{1}'.format(person[0], person[1].split('/')[-1]), 'wb') as handle:
response = requests.get(person[1], stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
|
from . import BaseHandler, apply_request_schema, apply_response_schema
from .exceptions import NotFoundError, ValidationError
from ..aliases import AliasNotFound, AliasStoreType
from ..aliases.manager import redact, reveal
from ..schemas.aliases import (
AliasResponseSchema,
AliasesResponseSchema,
RedactRequestSchema,
)
STORAGE_TYPE = AliasStoreType.PERSISTENT
class AliasesHandler(BaseHandler):
@apply_request_schema(RedactRequestSchema)
@apply_response_schema(AliasResponseSchema)
def post(self, validated_data: dict):
"""
---
description: Perform redact-operation for given values
requestBody:
content:
application/json:
schema: RedactRequestSchema
responses:
200:
content:
application/json:
schema: AliasResponseSchema
"""
results = []
for item in validated_data['data']:
value, format = item['value'], item['format']
alias = redact(value, format, STORAGE_TYPE)
results.append(
{
'aliases': [{'alias': alias.public_alias, 'format': format}],
'created_at': alias.created_at,
'value': item['value'],
}
)
return {'data': results}
@apply_response_schema(AliasesResponseSchema)
def get(self):
"""
---
description: Perform reveal-operation for given aliases
parameters:
- name: q
in: query
description: Coma-separated aliases
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: AliasesResponseSchema
"""
aliases = self.get_query_argument('q', default=None)
if not aliases:
raise ValidationError('Missing required parameter: "q"')
reveal_data = {}
errors = []
for public_alias in set(aliases.split(',')):
try:
reveal_result = _reveal(public_alias)
except AliasNotFound:
errors.append({'message': f'Unknown alias: {public_alias}'})
else:
reveal_data[public_alias] = reveal_result
result = {}
if reveal_data:
result['data'] = reveal_data
if errors:
result['errors'] = errors
return result
class AliasHandler(BaseHandler):
@apply_response_schema(AliasResponseSchema)
def get(self, public_alias: str):
"""
---
description: Perform reveal-operation for a single alias
parameters:
- name: public_alias
in: path
description: Public alias
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: AliasResponseSchema
404:
content:
application/json:
schema: ErrorResponseSchema
"""
try:
reveal_result = _reveal(public_alias)
except AliasNotFound:
raise NotFoundError(f'Unknown alias: {public_alias}')
return {'data': [reveal_result]}
def _reveal(public_alias: str) -> str:
alias = reveal(public_alias, STORAGE_TYPE)
return {
'aliases': [
{
'alias': alias.public_alias,
'format': alias.alias_generator,
}
],
'created_at': alias.created_at,
'value': alias.value,
}
|
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
##############################################################################
# Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II
# Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI
#
# Distributed under the Boost Software License, Version 1.0
# See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
##############################################################################
"""Utilities to get toolboxes informations
"""
__author__ = "Lapreste Jean-thierry ([email protected])"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2010 $"
__copyright__ = """ Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II
Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI"""
__license__ = "Boost Software License, Version 1.0"
import sys
import os
import re
from files_utils import read, exist
from nt2_fundamentals import Nt2_base_infos
class Toolbox_infos :
def __init__(self, tb_name) :
self.nbi = Nt2_base_infos()
self.__tb_name = tb_name
self.__tb_path = os.path.join(self.nbi.get_nt2_path(),'nt2/toolbox',tb_name)
self.__tb_style = self. __read_style()
def get_tb_path(self) : return self.__tb_path
def get_tb_name(self) : return self.__tb_name
def get_tb_style( self) : return self.__tb_style
def get_def_path(self) : return os.path.join(self.__tb_path,'function')
def get_bench_path(self) : return os.path.join(self.__tb_path,'bench')
def get_unit_path(self) : return os.path.join(self.__tb_path,'unit')
def get_doc_path(self) : return os.path.join(self.__tb_path,'doc')
def get_fctors_list(self) :
l = []
for name in os.listdir(self.get_def_path()) :
if name[-4:]=='.hpp' :
h = name[:-4]
l.append(h)
return l
def __read_style(self) :
dirname = self.get_tb_path()
filename = dirname+'.hpp'
if exist(filename) :
s = read(filename)
pattern = re.compile("^// This toolbox is of (.*) type")
for l in s :
d1 = re.match(pattern,l)
if d1 : return d1.groups()[0]
filename = os.path.join(dirname,'py_data.py')
if exist(filename) :
if re.search("'usr'",' '.join(read(filename))) :
return 'usr'
else :
return 'sys'
self.__tb_style = 'usr'
return 'usr'
def __str__(self) :
r = "tbi.get_tb_path(): %s" % tbi.get_tb_path ()
r += "\ntbi.get_tb_name(): %s" % tbi.get_tb_name ()
r += "\ntbi.get_tb_style(): %s" % tbi.get_tb_style ()
r += "\ntbi.get_def_path(): %s" % tbi.get_def_path ()
r += "\ntbi.get_bench_path(): %s" % tbi.get_bench_path ()
r += "\ntbi.get_unit_path(): %s" % tbi.get_unit_path ()
r += "\ntbi.get_doc_path(): %s" % tbi.get_doc_path ()
r += "\ntbi.get_fctors_list(): %s" % tbi.get_fctors_list()
r += "\n"
return r
if __name__ == "__main__" :
tbi = Toolbox_infos("arithmetic")
print tbi
tbi = Toolbox_infos("cephes")
print tbi
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2020
from streamsx.topology.schema import StreamSchema
#
# Defines Message types with default attribute names and types.
_SPL_SCHEMA_ACCESS_TOKEN = 'tuple<rstring access_token, rstring refresh_token, rstring scope, int64 expiration, rstring token_type, int64 expires_in>'
_SPL_SCHEMA_STT_RESULT = 'tuple<rstring conversationId, boolean transcriptionCompleted, rstring sttErrorMessage, float64 utteranceStartTime, float64 utteranceEndTime, rstring utterance>'
_SPL_SCHEMA_STT_RESULT_PARTIAL = 'tuple<boolean finalizedUtterance, float64 confidence>'
_SPL_SCHEMA_STT_INPUT = 'tuple<rstring conversationId, blob speech>'
_SPL_SCHEMA_STT_RESULT_KEYWORD_SPOTTING = 'tuple<map<rstring, list<tuple<float64 startTime, float64 endTime, float64 confidence>>> keywordsSpottingResults>'
class GatewaySchema:
"""
Structured stream schemas for :py:meth:`~streamsx.sttgateway.WatsonSTT`
"""
STTResult = StreamSchema (_SPL_SCHEMA_STT_RESULT)
"""
This schema is used as output in :py:meth:`~streamsx.sttgateway.WatsonSTT`
The schema defines following attributes
* conversationId(rstring) - identifier, for example file name
* transcriptionCompleted(boolean) - boolean value to indicate whether the full transcription/conversation is completed
* sttErrorMessage(rstring) - Watson STT error message if any.
* utteranceStartTime(float64) - start time of an utterance relative to the start of the audio
* utteranceEndTime(float64) - end time of an utterance relative to the start of the audio
* utterance(rstring) - the transcription of audio in the form of a single utterance
"""
pass
STTResultPartialExtension = StreamSchema (_SPL_SCHEMA_STT_RESULT_PARTIAL)
"""
This schema is added to STTResult schema when result mode is partial in :py:meth:`~streamsx.sttgateway.WatsonSTT`
The schema defines following attributes
* finalizedUtterance(boolean) - boolean value to indicate if this is an interim partial utterance or a finalized utterance.
* confidence(float64) - confidence value for an interim partial utterance or for a finalized utterance or for the full text.
"""
pass
STTResultKeywordExtension = StreamSchema (_SPL_SCHEMA_STT_RESULT_KEYWORD_SPOTTING)
"""
This schema is added to STTResult schema when keywords_to_be_spotted is set in :py:meth:`~streamsx.sttgateway.WatsonSTT`
The schema defines following attributes
* keywordsSpottingResults(map<rstring, list<tuple<float64 startTime, float64 endTime, float64 confidence>>>) - The keys of the map are the spotted keywords.
"""
pass
STTInput = StreamSchema (_SPL_SCHEMA_STT_INPUT)
"""
Use this schema as input for :py:meth:`~streamsx.sttgateway.WatsonSTT`
The schema defines following attributes
* conversationId(rstring) - identifier, for example file name
* speech(blob) - audio data
"""
pass
AccessToken = StreamSchema (_SPL_SCHEMA_ACCESS_TOKEN)
"""
This schema is used internally in :py:meth:`~streamsx.sttgateway.WatsonSTT` by the access token generator.
"""
pass
|
# Copyright 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_config import cfg
from oslo_log import log
from oslo_utils import strutils
from ironic_python_agent import agent
from ironic_python_agent import config
CONF = cfg.CONF
def run():
"""Entrypoint for IronicPythonAgent."""
log.register_options(CONF)
CONF(args=sys.argv[1:])
# Debug option comes from oslo.log, allow overriding it via kernel cmdline
ipa_debug = config.APARAMS.get('ipa-debug')
if ipa_debug is not None:
ipa_debug = strutils.bool_from_string(ipa_debug)
CONF.set_override('debug', ipa_debug)
log.setup(CONF, 'ironic-python-agent')
agent.IronicPythonAgent(CONF.api_url,
agent.Host(hostname=CONF.advertise_host,
port=CONF.advertise_port),
agent.Host(hostname=CONF.listen_host,
port=CONF.listen_port),
CONF.ip_lookup_attempts,
CONF.ip_lookup_sleep,
CONF.network_interface,
CONF.lookup_timeout,
CONF.lookup_interval,
CONF.standalone,
CONF.agent_token,
CONF.hardware_initialization_delay,
CONF.advertise_protocol).run()
|
from flask_sqlalchemy import SQLAlchemy
import json
db = SQLAlchemy()
class Interface(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
url = db.Column(db.String)
query_string = db.Column(db.String)
active = db.Column(db.Boolean, index=True)
default = db.Column(db.Boolean, index=True)
body = db.Column(db.Binary)
mock_prefix = db.Column(db.String)
def __init__(self, name, url, active=True, default=True, body=None, mock_prefix=None, query_string=None):
self.name = name
self.url = url
self.active = active
self.default = default
if body:
self.body = body
if mock_prefix:
self.mock_prefix = mock_prefix
self.query_string = query_string
def get_json_body(self):
return json.dumps(json.loads(self.body.decode()), ensure_ascii=False, indent=4)
def to_dict(self):
return {
'name': self.name,
'url': self.url,
'default': self.default,
'active': self.active,
'mock_prefix': self.mock_prefix,
'body': self.body.decode(),
'query_string': self.query_string
}
@classmethod
def from_dict(cls, interface_dict):
return cls(interface_dict['name'],
interface_dict['url'],
default=interface_dict['default'],
active=interface_dict['active'],
body=interface_dict['body'].encode(),
mock_prefix=interface_dict['mock_prefix'],
query_string=interface_dict['query_string'])
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 28 15:08:20 2014
@author: aitor
"""
import datetime
import json
import gzip
import time
import sys
import tweepy
from tweepy import StreamListener, Stream
credentials = json.load(open('credentials.json', 'r'))
CONSUMER_KEY = credentials['consumer_key']
CONSUMER_SECRET = credentials['consumer_secret']
OAUTH_TOKEN = credentials['oauth_token']
OAUTH_TOKEN_SECRET = credentials['oauth_secret']
tweets = []
initial_time = time.time()
class StdOutListener(StreamListener):
def on_data(self, raw_data):
global tweets, initial_time
elapsed_time = time.time () - initial_time #elapsed secons
#save the status every 30 mins
if elapsed_time >= 60 * 30:
now = datetime.datetime.now()
file_name = './tweets/tweets-%s-%s-%s-%s-%s.txt.gz' % (now.month, now.day, now.hour, now.minute, now.second)
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, 'Saving file')
with gzip.open(file_name, 'w') as f:
for tweet in tweets:
f.write(json.dumps(tweet) + '\n')
tweets = []
initial_time = time.time()
try:
data = json.loads(raw_data)
tweets.append(data)
except:
now = datetime.datetime.now()
print '(%s-%s %s:%s:%s) Invalid JSON Data %s' % (now.month, now.day, now.hour, now.minute, now.second, raw_data)
sys.stdout.flush()
return True
def on_error(self, status_code):
now = datetime.datetime.now()
print '(%s-%s %s:%s:%s)Got an error with status code: %s' % (now.month, now.day, now.hour, now.minute, now.second, status_code)
sys.stdout.flush()
#sleep 5 mins if an error occurs
time.sleep(5 * 60)
return True # To continue listening
def on_timeout(self):
now = datetime.datetime.now()
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, 'Timeout')
sys.stdout.flush()
return True # To continue listening
if __name__ == '__main__':
now = datetime.datetime.now()
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, 'Starting')
sys.stdout.flush()
keywords = json.load(open('keywords.json', 'r'))
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
listener = StdOutListener()
stream = Stream(auth, listener)
while True:
try:
# https://dev.twitter.com/streaming/reference/post/statuses/filter
stream.filter(track=keywords)
# With stream.filter(follow=IDs) to follow accounts
except Exception as e:
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, "Error streaming")
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, e.message)
sys.stdout.flush()
time.sleep(1 * 60)
now = datetime.datetime.now()
print '(%s-%s %s:%s:%s) %s' % (now.month, now.day, now.hour, now.minute, now.second, 'Done') |
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
AnalyticalSolver, used to solve the Quadratic
Constrained Optimization Problem for 2 gradients
analytically.
The AnalyticalSolver class contains the implementation of
the analytical QCOP Solver for 2 gradients.
"""
from copsolver.copsolver import COPSolver
class AnalyticalSolver(COPSolver):
"""AnalyticalSolver class. Inherits the COPSolver class.
AnalyticalSolver is used to calculate the alphas for the QCOP for 2
gradients.
"""
def solve(self, gradients):
"""Solves the Constrained Optimization Problem for 2 gradients
Given the gradients, compute analytically the alphas for the COP and returns them in a list of size 2.
Args:
gradients: numpy array of gradients of size 2 from the models
each gradient is a numpy array of float of the same size. Gradients
cannot be the same.Im
Returns:
A numpy array of floats in [0,1] of size 2 representing
the coefficients associated to the gradients
Raise:
ValueError: An error occured while checking the dimensions of
gradients
TypeError: An error occured while accessing the argument - one
of the arguments is NoneType
"""
if gradients is None:
raise TypeError('Argument: gradients type cannot be None')
if (len(gradients) != 2):
raise ValueError('Argument: The number of gradients must be equal to 2')
if (len(gradients[0]) != len(gradients[1])):
raise ValueError('Argument: The gradients must have the same length')
if (gradients[0] == gradients[1]).all():
return [0.5,0.5]
r"""
.. math::
\alpha = \frac{(\nabla_{w}L_{2}(w) - \nabla_{w}L_{1})^{T} \star \nabla_{w}L_{2}(w)}
{\|\nabla_{w}L_{1} - \nabla_{w}L_{2}\|^{2}}
Source: Multi-Gradient Descent For Multi-Objective Recommender Systems
"""
alpha = ((gradients[1] - gradients[0]) @ gradients[1]) \
/ ((gradients[0] - gradients[1]) @ (gradients[0] - gradients[1]))
if alpha < 0:
alpha = 0
if alpha > 1:
alpha = 1
return [alpha, 1-alpha]
|
# !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: attenStereoNet_embed_sga_11.py
# @brief:
# @author: Changjiang Cai, [email protected], [email protected]
# @version: 0.0.1
# @creation date: 16-10-2019
# @last modified: Mon 11 May 2020 01:02:03 AM EDT
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..baselines.GANet.libs.GANet.modules.GANet import SGA
#from .embednetwork import embed_net
#from .bilateral import bilateralFilter
from ..baselines.GANet.libs.sync_bn.modules.sync_bn import BatchNorm2d, BatchNorm3d
from ..baselines.GANet.libs.GANet.modules.GANet import DisparityRegression
#from ..baselines.GANet.libs.GANet.modules.GANet import GetCostVolume
from ..baselines.GANet.libs.GANet.modules.GANet import LGA, LGA2, LGA3
############################################
""" adapted from GANet paper code """
############################################
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs):
super(BasicConv, self).__init__()
# print(in_channels, out_channels, deconv, is_3d, bn, relu, kwargs)
self.relu = relu
self.use_bn = bn
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.bn = BatchNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.relu:
x = F.relu(x, inplace=True)
return x
class Conv2x(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, concat=True, bn=True, relu=True):
super(Conv2x, self).__init__()
self.concat = concat
kernel = 3
if deconv and is_3d:
#kernel = (3, 4, 4)
#updated by CCJ:
kwargs = {
'stride': 2,
'padding': 1,
'output_padding':1
}
elif deconv:
#kernel = 4
kwargs = {
'stride': 2,
'padding': 1,
'output_padding':1
}
else:
kwargs = {
'stride': 2,
'padding': 1,
}
#self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, stride=2, padding=1)
self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, **kwargs)
if self.concat:
self.conv2 = BasicConv(out_channels*2, out_channels, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
else:
self.conv2 = BasicConv(out_channels, out_channels, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
def forward(self, x, rem):
x = self.conv1(x)
#print ("[???] x size = ", x.size())
#print ("[???] rem size = ", rem.size())
assert(x.size() == rem.size())
if self.concat:
x = torch.cat((x, rem), 1)
else:
x = x + rem
x = self.conv2(x)
return x
class Feature(nn.Module):
def __init__(self,
#is_quarter_size = True
downsample_scale = 3
):
super(Feature, self).__init__()
#if not is_quarter_size:
assert downsample_scale in [2, 3, 4], "downsample_scale should be 2, 3, or 4!!!"
if downsample_scale == 3:
print ("[***] Feature() to 1/3 image size for original GANet !!!")
self.conv_start = nn.Sequential(
# Added by CCJ:
# Convolution In/Out Size: O = floor{(W - F + 2P)/S + 1}
BasicConv(3, 32, kernel_size=3, padding=1),
BasicConv(32, 32, kernel_size=5, stride=3, padding=2), #in size [H/3, W/3]
BasicConv(32, 32, kernel_size=3, padding=1))
elif downsample_scale == 4:
print ("[***] Feature() to 1/4 image size for PSMNet etc !!!")
self.conv_start = nn.Sequential(
# Added by CCJ:
# Convolution In/Out Size: O = floor{(W - F + 2P)/S + 1}
BasicConv(3, 32, kernel_size=3, padding=1),
BasicConv(32, 32, kernel_size=3, stride=2, padding=1), #in size [H/2, W/2]
BasicConv(32, 32, kernel_size=3, stride=2, padding=1), #in size [H/4, W/4]
BasicConv(32, 32, kernel_size=3, padding=1))
elif downsample_scale == 2:
print ("[***] Feature() to 1/2 image size for GCNet etc !!!")
self.conv_start = nn.Sequential(
# Added by CCJ:
# Convolution In/Out Size: O = floor{(W - F + 2P)/S + 1}
BasicConv(3, 32, kernel_size=3, padding=1),
BasicConv(32, 32, kernel_size=3, stride=2, padding=1), #in size [H/2, W/2]
BasicConv(32, 32, kernel_size=3, padding=1))
#else:
# raise Exception("No suitable downsample_scale value found ...")
self.conv1a = BasicConv(32, 48, kernel_size=3, stride=2, padding=1)
self.conv2a = BasicConv(48, 64, kernel_size=3, stride=2, padding=1)
self.conv3a = BasicConv(64, 96, kernel_size=3, stride=2, padding=1)
self.conv4a = BasicConv(96, 128, kernel_size=3, stride=2, padding=1)
self.deconv4a = Conv2x(128, 96, deconv=True)
self.deconv3a = Conv2x(96, 64, deconv=True)
self.deconv2a = Conv2x(64, 48, deconv=True)
self.deconv1a = Conv2x(48, 32, deconv=True)
self.conv1b = Conv2x(32, 48) # default: k=3,s=2,p=1
self.conv2b = Conv2x(48, 64)
self.conv3b = Conv2x(64, 96)
self.conv4b = Conv2x(96, 128)
self.deconv4b = Conv2x(128, 96, deconv=True)
self.deconv3b = Conv2x(96, 64, deconv=True)
self.deconv2b = Conv2x(64, 48, deconv=True)
self.deconv1b = Conv2x(48, 32, deconv=True)
def forward(self, x):
x = self.conv_start(x)
rem0 = x
x = self.conv1a(x)
rem1 = x
x = self.conv2a(x)
rem2 = x
x = self.conv3a(x)
rem3 = x
x = self.conv4a(x)
rem4 = x
x = self.deconv4a(x, rem3)
rem3 = x
x = self.deconv3a(x, rem2)
rem2 = x
x = self.deconv2a(x, rem1)
rem1 = x
x = self.deconv1a(x, rem0)
rem0 = x
x = self.conv1b(x, rem1)
rem1 = x
x = self.conv2b(x, rem2)
rem2 = x
x = self.conv3b(x, rem3)
rem3 = x
x = self.conv4b(x, rem4)
x = self.deconv4b(x, rem3)
x = self.deconv3b(x, rem2)
x = self.deconv2b(x, rem1)
x = self.deconv1b(x, rem0)
return x
class Guidance_11(nn.Module):
def __init__(
self,
#is_quarter_size = True,
# could be:
# 2: Half size, i.e., [H/2, W/2]
# 3: 1/3 size, i.e., [H/3, W/3]
# 4: quarter size, i.e., [H/4, W/4]
downsample_scale = 3,
is_lga = False):
super(Guidance_11, self).__init__()
#self.is_quarter_size = is_quarter_size
assert downsample_scale in [2, 3, 4], "downsample_scale should be 2, 3, or 4!!!"
self.is_lga = is_lga
self.conv0 = BasicConv(64, 16, kernel_size=3, padding=1)
#if not is_quarter_size:
if downsample_scale == 3:
print ("[***] Guidance_11() module to 1/3 image size for original GANet !!!")
self.conv1 = nn.Sequential(
BasicConv(16, 32, kernel_size=5, stride=3, padding=2),#in size [H/3, W/3]
BasicConv(32, 32, kernel_size=3, padding=1))
elif downsample_scale == 4:
print ("[***] Guidance_11() module to 1/4 image size for PSMNet etc !!!")
self.conv1 = nn.Sequential(
BasicConv(16, 32, kernel_size=3, stride=2, padding=1),#in size [H/2, W/2]
BasicConv(32, 32, kernel_size=3, stride=2, padding=1))#in size [H/4, W/4]
elif downsample_scale == 2:
print ("[***] Guidance_11() module to 1/2 image size for GCNet etc !!!")
self.conv1 = nn.Sequential(
BasicConv(16, 32, kernel_size=3, stride=2, padding=1),#in size [H/2, W/2]
BasicConv(32, 32, kernel_size=3, stride=1, padding=1))#in size [H/2, W/2]
#else:
# raise Exception("No suitable downsample_scale value found ...")
self.conv2 = BasicConv(32, 32, kernel_size=3, padding=1)
self.conv3 = BasicConv(32, 32, kernel_size=3, padding=1)
# self.conv11 = Conv2x(32, 48)
self.conv11 = nn.Sequential(BasicConv(32, 48, kernel_size=3, stride=2, padding=1),
BasicConv(48, 48, kernel_size=3, padding=1))
self.conv12 = BasicConv(48, 48, kernel_size=3, padding=1)
self.conv13 = BasicConv(48, 48, kernel_size=3, padding=1)
self.conv14 = BasicConv(48, 48, kernel_size=3, padding=1)
self.weight_sg1 = nn.Conv2d(32, 640, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg2 = nn.Conv2d(32, 640, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg3 = nn.Conv2d(32, 640, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg11 = nn.Conv2d(48, 960, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg12 = nn.Conv2d(48, 960, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg13 = nn.Conv2d(48, 960, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg14 = nn.Conv2d(48, 960, (3, 3), (1, 1), (1, 1), bias=False)
if self.is_lga:
self.weight_lg1 = nn.Sequential(BasicConv(16, 16, kernel_size=3, padding=1),
nn.Conv2d(16, 75, (3, 3), (1, 1), (1, 1) ,bias=False))
self.weight_lg2 = nn.Sequential(BasicConv(16, 16, kernel_size=3, padding=1),
nn.Conv2d(16, 75, (3, 3), (1, 1), (1, 1) ,bias=False))
def forward(self, x):
x = self.conv0(x)
rem = x
x = self.conv1(x)
sg1 = self.weight_sg1(x)
x = self.conv2(x)
sg2 = self.weight_sg2(x)
x = self.conv3(x)
sg3 = self.weight_sg3(x)
x = self.conv11(x)
sg11 = self.weight_sg11(x)
x = self.conv12(x)
sg12 = self.weight_sg12(x)
x = self.conv13(x)
sg13 = self.weight_sg13(x)
x = self.conv14(x)
sg14 = self.weight_sg14(x)
if self.is_lga:
lg1 = self.weight_lg1(rem)
lg2 = self.weight_lg2(rem)
else:
lg1 = None
lg2 = None
return dict([
('sg1', sg1),
('sg2', sg2),
('sg3', sg3),
('sg11', sg11),
('sg12', sg12),
('sg13', sg13),
('sg14', sg14),
('lg1', lg1),
('lg2', lg2)])
class Disp(nn.Module):
def __init__(self, maxdisp=192):
super(Disp, self).__init__()
self.maxdisp = maxdisp
self.softmax = nn.Softmin(dim=1)
self.disparity = DisparityRegression(maxdisp=self.maxdisp)
# self.conv32x1 = BasicConv(32, 1, kernel_size=3)
self.conv32x1 = nn.Conv3d(32, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
def forward(self, x):
x = F.interpolate(self.conv32x1(x), [self.maxdisp+1, x.size()[3]*3, x.size()[4]*3],
mode='trilinear', align_corners=False)
x = torch.squeeze(x, 1)
x = self.softmax(x)
return self.disparity(x)
class DispAgg(nn.Module):
def __init__(self, maxdisp=192):
super(DispAgg, self).__init__()
self.maxdisp = maxdisp
self.LGA3 = LGA3(radius=2)
self.LGA2 = LGA2(radius=2)
self.LGA = LGA(radius=2)
self.softmax = nn.Softmin(dim=1)
self.disparity = DisparityRegression(maxdisp=self.maxdisp)
# self.conv32x1 = BasicConv(32, 1, kernel_size=3)
self.conv32x1=nn.Conv3d(32, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
def lga(self, x, g):
g = F.normalize(g, p=1, dim=1)
x = self.LGA2(x, g)
return x
def forward(self, x, lg1, lg2):
x = F.interpolate(self.conv32x1(x), [self.maxdisp+1, x.size()[3]*3, x.size()[4]*3], mode='trilinear',
align_corners=False)
x = torch.squeeze(x, 1)
assert(lg1.size() == lg2.size())
x = self.lga(x, lg1)
x = self.softmax(x)
x = self.lga(x, lg2)
x = F.normalize(x, p=1, dim=1)
return self.disparity(x)
class SGABlock(nn.Module):
def __init__(self, channels=32, refine=False):
super(SGABlock, self).__init__()
self.refine = refine
if self.refine:
self.bn_relu = nn.Sequential(BatchNorm3d(channels),
nn.ReLU(inplace=True))
self.conv_refine = BasicConv(channels, channels, is_3d=True, kernel_size=3, padding=1, relu=False)
# self.conv_refine1 = BasicConv(8, 8, is_3d=True, kernel_size=1, padding=1)
else:
self.bn = BatchNorm3d(channels)
self.SGA=SGA()
self.relu = nn.ReLU(inplace=True)
def forward(self, x, g):
rem = x
#NOTE:
#Comments added by CCJ:
# split g channel C (e.g., C= 640) to 4 parts, corresponding to four directions (left, right, up and down),
# each with C/4 ( e.g., = 640/4=160) size along channel dim, i.e., dim=1;
# each C/4=160-dim vector is further divided into 32 x 5, where 32 is the same as input x channel,
# and 5 means w0, w1, ..., w4 in Eq (5) in GANet CVPR paper, s.t. w0 + w1 + ... + w4 = 1.0,
# this why F.normalize() is applied along dim=5, that is normalize those five values, s.t. w0 + w1 + ... + w4 = 1.0 !!!
k1, k2, k3, k4 = torch.split(g, (x.size()[1]*5, x.size()[1]*5, x.size()[1]*5, x.size()[1]*5), 1)
k1 = F.normalize(k1.view(x.size()[0], x.size()[1], 5, x.size()[3], x.size()[4]), p=1, dim=2)
k2 = F.normalize(k2.view(x.size()[0], x.size()[1], 5, x.size()[3], x.size()[4]), p=1, dim=2)
k3 = F.normalize(k3.view(x.size()[0], x.size()[1], 5, x.size()[3], x.size()[4]), p=1, dim=2)
k4 = F.normalize(k4.view(x.size()[0], x.size()[1], 5, x.size()[3], x.size()[4]), p=1, dim=2)
x = self.SGA(x, k1, k2, k3, k4)
if self.refine:
x = self.bn_relu(x)
x = self.conv_refine(x)
else:
x = self.bn(x)
assert(x.size() == rem.size())
x += rem
return self.relu(x)
# return self.bn_relu(x)
class CostAggregation_11(nn.Module):
def __init__(self,
cost_volume_in_channels = 64, # for DispNetC channels = 1, for PSMNet channels = 64;
):
super(CostAggregation_11, self).__init__()
print ("[***] cost_volume_in_channels = ", cost_volume_in_channels)
self.conv_start = BasicConv(cost_volume_in_channels, 32, is_3d=True, kernel_size=3, padding=1, relu=False)
self.conv_end = BasicConv(32, cost_volume_in_channels, is_3d=True, kernel_size=3, padding=1, relu=True, bn=False)
self.conv1a = BasicConv(32, 48, is_3d=True, kernel_size=3, stride=2, padding=1)
self.conv2a = BasicConv(48, 64, is_3d=True, kernel_size=3, stride=2, padding=1)
self.deconv1a = Conv2x(48, 32, deconv=True, is_3d=True, relu=False)
self.deconv2a = Conv2x(64, 48, deconv=True, is_3d=True)
self.sga1 = SGABlock(refine=True)
self.sga2 = SGABlock(refine=True)
self.sga11 = SGABlock(channels=48, refine=True)
self.sga12 = SGABlock(channels=48, refine=True)
def forward(self, x, g):
"""
args:
x: cost volume, in size [N,C,D,H,W];
g : guidance, in size [N, C2, H, W], where C2 = 20*C=20*32=640;
return
"""
x = self.conv_start(x) # C=32
x = self.sga1(x, g['sg1'])
rem0 = x
x = self.conv1a(x)
x = self.sga11(x, g['sg11'])
rem1 = x
#print ("[???] rem1 size:", rem1.size())
x = self.conv2a(x)
#print ("[???] after conv2a(x) size:", x.size())
x = self.deconv2a(x, rem1) #???
x = self.sga12(x, g['sg12'])
x = self.deconv1a(x, rem0)
x = self.sga2(x, g['sg2'])
#added by CCJ:
x = self.conv_end(x)
return x
""" generate input signal g, which is fed into the Guidance() block,
in order to generate the weights (in 4 directions in total) for SGA Block;
"""
class GetInput4Guidance(nn.Module):
def __init__(self,
#is_quarter_size = True
downsample_scale = 3
):
super(GetInput4Guidance, self).__init__()
assert downsample_scale in [2, 3, 4], "downsample_scale should be 2, 3, or 4!!!"
self.conv_start = nn.Sequential(BasicConv(3, 16, kernel_size=3, padding=1),
BasicConv(16, 32, kernel_size=3, padding=1))
self.conv_refine = nn.Conv2d(32, 32, (3, 3), (1,1), (1,1), bias=False) #just convolution, no bn and relu
self.feature = Feature(downsample_scale)
#self.inter_C = 4 if is_quarter_size else 3
self.inter_C = downsample_scale
self.bn_relu = nn.Sequential(BatchNorm2d(32),
nn.ReLU(inplace=True))
def forward(self, x):
"""
args:
x: input image, in size [N,3,H,W];
return:
g : signal for Guidance() module, in size [N, C=64, H, W];
"""
g = self.conv_start(x)
x = self.feature(x) # in size [N, C=32, H/3, W/3]
x = self.conv_refine(x)
x = F.interpolate(x, [x.size()[2] * self.inter_C, x.size()[3] * self.inter_C],
mode='bilinear', align_corners=False)
x = self.bn_relu(x)
g = torch.cat((g, x), 1)
return g
"""
SGA module, adapted from GANet_11 code;
"""
class SGA_CostAggregation(nn.Module):
def __init__(self,
is_guide_from_img,
#is_quarter_size, # feature in 1/4 image size (i.e., H/4 x W/4) or 1/3 size (i.e., H/3 x W/3)
downsample_scale, # feature in 1/2, 1/3, or 1/2 image size (i.e., H/4 x W/4, H/3 x W/3, or H/2 x W/2)
is_lga, # generate LGA(Local Guided Aggregation) weights or not
cost_volume_in_channels # input cost volume feature channels
):
super(SGA_CostAggregation, self).__init__()
self.is_guide_from_img = is_guide_from_img
if is_guide_from_img:
#self.get_g_from_img = GetInput4Guidance(is_quarter_size)
self.get_g_from_img = GetInput4Guidance(downsample_scale)
else:
self.get_g_from_img = None
self.guidance = Guidance_11(downsample_scale, is_lga)
self.cost_agg = CostAggregation_11(cost_volume_in_channels)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (BatchNorm2d, BatchNorm3d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, cv, g_in = None, img_for_g = None):
"""
args:
cv: cost volume, in size [N,C1,D,H,W]
g_in : input for guidance module, in size [N, C2=64, H, W]
img_for_g: input image for generating guide input g_in; in size [N,3,H,W]
"""
#--------------
# guidance
#--------------
if g_in is None:
assert self.is_guide_from_img, 'No g provided!!!'
g_in = self.get_g_from_img(img_for_g) # in size [N, 64, H, W]
#print("[???] g_in shape", g_in.size())
g_out = self.guidance(g_in)
#for k,v in g_out.items():
# if v is not None:
#print("[???] g_out[%s] has shape" %k, v.size())
# after the guidance(), g_out in size [N, 4*C3=640, H, W],
# with C3=640/4=160=5*32;
# Note: 640/4=160=5*32, and 32 corresponds the convolved
# cost volume (for changing its C=64 to C=32);
assert cv.ndim == 5, "Should be a 5D tenor!!!"
return self.cost_agg(cv, g_out).contiguous()
|
"""
Receives a command with search table results from search engine and calls filer
and utils modules to handle the command.
"""
class CommandHandler:
"""
Handles commands from searchengine.
"""
pass
|
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_min_inclusive_2_xsd.nistschema_sv_iv_atomic_non_negative_integer_min_inclusive_2 import NistschemaSvIvAtomicNonNegativeIntegerMinInclusive2
__all__ = [
"NistschemaSvIvAtomicNonNegativeIntegerMinInclusive2",
]
|
"""
This packages implements tomography algorithms and utilities in
python. It is made of several modules, as follows:
- siddon: The core of the package, with a fast C/OpenMP
implementation of the Siddon algorithm.
See: http://adsabs.harvard.edu/abs/1985MedPh..12..252S
- simu: Implements some utilities to perform simulations.
- solar: A module to load Solar physics data with appropriate
metadata.
- models: Defines linear model linking the data with the parameters
to estimate (emission maps, density maps).
* srt: Solar rotational tomography
* stsrt: Smooth temporal solar rotational tomography
* thomson: srt with Thomson scattering model (e.g. for white light data)
- phantom: To generate phantoms (Shepp-Logan, Modified Shepp Logan,
and Yu Ye Wang phantoms).
Along with these modules, a convenient way to call inversion routines
is provided in the command-line through the srt command. In order to
use srt, you need to have the srt file in your $PATH along with
siddon, lo and fitsarray in your $PYTHONPATH. For usage information,
type "srt -h".
"""
from siddon import *
import simu
import solar
import phantom
import models
import display
import srt as srt_cli
try:
import lo
except ImportError:
pass
if 'lo' in locals():
from lo_wrapper import *
version = "0.3.0"
|
"""Defines various distribution models."""
import numpy as np
import numpy.random as rnd
import scipy.stats as stats
from warnings import warn
import serums.enums as enums
class BaseSingleModel:
"""Generic base class for distribution models.
This defines the required functions and provides their recommended function
signature for inherited classes. It also defines base attributes for the
distribution.
Attributes
----------
location : N x 1 numpy array
location parameter of the distribution
scale : N x N numpy array
scale parameter of the distribution
"""
def __init__(self, loc=None, scale=None):
super().__init__()
self.location = loc
self.scale = scale
def sample(self, rng=None):
"""Draw a sample from the distribution.
This should be implemented by the child class.
Parameters
----------
rng : numpy random generator, optional
random number generator to use. The default is None.
Returns
-------
None.
"""
warn('sample not implemented by class {}'.format(type(self).__name__))
def pdf(self, x):
"""Calculate the PDF value at the given point.
This should be implemented by the child class.
Parameters
----------
x : N x 1 numpy array
Point to evaluate the PDF.
Returns
-------
float
PDF value.
"""
warn('pdf not implemented by class {}'.format(type(self).__name__))
return np.nan
class Gaussian(BaseSingleModel):
"""Represents a Gaussian distribution object."""
def __init__(self, mean=None, covariance=None):
"""Initialize an object.
Parameters
----------
mean : N x 1 numpy array, optional
Mean of the distribution. The default is None.
covariance : N x N numpy array, optional
Covariance of the distribution. The default is None.
Returns
-------
None.
"""
super().__init__(loc=mean, scale=covariance)
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def covariance(self):
"""Covariance of the distribution.
Returns
-------
N x N nmpy array.
"""
return self.scale
@covariance.setter
def covariance(self, val):
self.scale = val
def sample(self, rng=None):
"""Draw a sample from the current mixture model.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
numpy array
randomly sampled numpy array of the same shape as the mean.
"""
if rng is None:
rng = rnd.default_rng()
return rng.multivariate_normal(self.mean.flatten(), self.covariance)
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.multivariate_normal
return rv.pdf(x.flatten(), mean=self.mean.flatten(), cov=self.covariance)
class StudentsT(BaseSingleModel):
"""Represents a Student's t-distribution."""
def __init__(self, mean=None, scale=None, dof=None):
super().__init__(loc=mean, scale=scale)
self._dof = dof
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, must be greater than 0."""
return self._dof
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
self._dof = value
@property
def covariance(self):
"""Read only covariance of the distribution (if defined).
Returns
-------
N x N nmpy array.
"""
if self._dof <= 2:
msg = 'Degrees of freedom is {} and must be > 2'
raise RuntimeError(msg.format(self._dof))
return self._dof / (self._dof - 2) * self.scale
@covariance.setter
def covariance(self, val):
warn('Covariance is read only.')
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Parameters
----------
x : N x 1 numpy array
Value to evaluate the pdf at.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.multivariate_t
return rv.pdf(x.flatten(), loc=self.location.flatten(), shape=self.scale,
df=self.degrees_of_freedom)
def sample(self, rng=None):
"""Multi-variate probability density function for this distribution.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
PDF value of the state `x`.
"""
if rng is None:
rng = rnd.default_rng()
rv = stats.multivariate_t
rv.random_state = rng
x = rv.rvs(loc=self.location.flatten(),
shape=self.scale, df=self.degrees_of_freedom)
return x.reshape((x.size, 1))
class ChiSquared(BaseSingleModel):
"""Represents a Chi Squared distribution."""
def __init__(self, mean=None, scale=None, dof=None):
super().__init__(loc=mean, scale=scale)
self._dof = dof
@property
def mean(self):
"""Mean of the distribution.
Returns
-------
N x 1 nmpy array.
"""
return self.location
@mean.setter
def mean(self, val):
self.location = val
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, must be greater than 0."""
return self._dof
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
self._dof = value
@property
def covariance(self):
"""Read only covariance of the distribution (if defined).
Returns
-------
N x N nmpy array.
"""
if self._dof < 0:
msg = 'Degrees of freedom is {} and must be > 0'
raise RuntimeError(msg.format(self._dof))
return (self._dof * 2) * (self.scale**2)
@covariance.setter
def covariance(self, val):
warn('Covariance is read only.')
def pdf(self, x):
"""Multi-variate probability density function for this distribution.
Parameters
----------
x : N x 1 numpy array
Value to evaluate the pdf at.
Returns
-------
float
PDF value of the state `x`.
"""
rv = stats.chi2
return rv.pdf(x.flatten(), self._dof,
loc=self.location.flatten(), shape=self.scale)
def sample(self, rng=None):
"""Multi-variate probability density function for this distribution.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
PDF value of the state `x`.
"""
if rng is None:
rng = rnd.default_rng()
rv = stats.chi2
rv.random_state = rng
x = rv.rvs(self._dof, loc=self.location.flatten(),
scale=self.scale)
return x.reshape((x.size, 1))
class Cauchy(StudentsT):
"""Represents a Cauchy distribution.
This is a special case of the Student's t-distribution with the degrees of
freedom fixed at 1. However, the mean and covariance do not exist for this
distribution.
"""
def __init__(self, location=None, scale=None):
super().__init__(scale=scale, dof=1)
self.location = location
@property
def mean(self):
"""Mean of the distribution."""
warn('Mean does not exist for a Cauchy')
@mean.setter
def mean(self, val):
warn('Mean does not exist for a Cauchy')
@property
def degrees_of_freedom(self):
"""Degrees of freedom of the distribution, fixed at 1."""
return super().degrees_of_freedom
@degrees_of_freedom.setter
def degrees_of_freedom(self, value):
warn('Degrees of freedom is 1 for a Cauchy')
@property
def covariance(self):
"""Read only covariance of the distribution (if defined)."""
warn('Covariance is does not exist.')
@covariance.setter
def covariance(self, val):
warn('Covariance is does not exist.')
class GaussianScaleMixture(BaseSingleModel):
r"""Helper class for defining Gaussian Scale Mixture objects.
Note
----
This is an alternative method for representing heavy-tailed distributions
by modeling them as a combination of a standard Gaussian, :math:`v`, and
another positive random variable known as the generating variate, :math:`z`
.. math::
x \overset{d}{=} \sqrt{z} v
where :math:`\overset{d}{=}` means equal in distribution and :math:`x`
follows a GSM distribution (in general, a heavy tailed distribution).
This formulation is based on
:cite:`VilaValls2012_NonlinearBayesianFilteringintheGaussianScaleMixtureContext`,
:cite:`Wainwright1999_ScaleMixturesofGaussiansandtheStatisticsofNaturalImages`, and
:cite:`Kuruoglu1998_ApproximationofAStableProbabilityDensitiesUsingFiniteGaussianMixtures`.
Attributes
----------
type : :class:`serums.enums.GSMTypes`
Type of the distribution to represent as a GSM.
location_range : tuple
Minimum and maximum values for the location parameter. Useful if being
fed to a filter for estimating the location parameter. Each element must
match the type of the :attr:`.location` attribute.
scale_range : tuple
Minimum and maximum values for the scale parameter. Useful if being
fed to a filter for estimating the scale parameter. Each element must
match the type of the :attr:`.scale` attribute. The default is None.
df_range : tuple
Minimum and maximum values for the degree of freedom parameter.
Useful if being fed to a filter for estimating the degree of freedom
parameter. Each element must be a float. The default is None.
"""
__df_types = (enums.GSMTypes.STUDENTS_T, enums.GSMTypes.CAUCHY)
def __init__(self, gsm_type, location=None, location_range=None,
scale=None, scale_range=None, degrees_of_freedom=None,
df_range=None):
"""Initialize a GSM Object.
Parameters
----------
gsm_type : :class:`serums.enums.GSMTypes`
Type of the distribution to represent as a GSM.
location : N x 1 numpy array, optional
location parameter of the distribution. The default is None.
location_range : tuple, optional
Minimum and maximum values for the location parameter. Useful if being
fed to a filter for estimating the location parameter. Each element must
match the type of the :attr:`.location` attribute. The default is None
scale : N x N numpy array, optional
Scale parameter of the distribution being represented as a GSM.
The default is None.
scale_range : tuple, optional
Minimum and maximum values for the scale parameter. Useful if being
fed to a filter for estimating the scale parameter. Each element must
match the type of the :attr:`.scale` attribute. The default is None.
degrees_of_freedom : float, optional
Degrees of freedom parameter of the distribution being represented
as a GSM. This is not needed by all types. The default is None.
df_range : tuple, optional
Minimum and maximum values for the degree of freedom parameter.
Useful if being fed to a filter for estimating the degree of freedom
parameter. Each element must be a float. The default is None.
Raises
------
RuntimeError
If a `gsm_type` is given that is of the incorrect data type.
"""
super().__init__(loc=location, scale=scale)
if not isinstance(gsm_type, enums.GSMTypes):
raise RuntimeError('Type ({}) must be a GSMType'.format(gsm_type))
self.type = gsm_type
self._df = None
self.location_range = location_range
self.scale_range = scale_range
self.df_range = df_range
if degrees_of_freedom is not None:
self.degrees_of_freedom = degrees_of_freedom
if self.type is enums.GSMTypes.CAUCHY:
self._df = 1
@property
def degrees_of_freedom(self):
"""Degrees of freedom parameter of the distribution being represented as a GSM.
Returns
-------
float, optional
"""
if self.type in self.__df_types:
return self._df
else:
msg = 'GSM type {:s} does not have a degree of freedom.'.format(self.type)
warn(msg)
return None
@degrees_of_freedom.setter
def degrees_of_freedom(self, val):
if self.type in self.__df_types:
if self.type is enums.GSMTypes.CAUCHY:
warn('GSM type {:s} requires degree of freedom = 1'.format(self.type))
return
self._df = val
else:
msg = ('GSM type {:s} does not have a degree of freedom. '
+ 'Skipping').format(self.type)
warn(msg)
def sample(self, rng=None):
"""Draw a sample from the specified GSM type.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
float
randomly sampled value from the GSM.
"""
if rng is None:
rng = rnd.default_rng()
if self.type in [enums.GSMTypes.STUDENTS_T, enums.GSMTypes.CAUCHY]:
return self._sample_student_t(rng)
elif self.type is enums.GSMTypes.SYMMETRIC_A_STABLE:
return self._sample_SaS(rng)
else:
raise RuntimeError('GSM type: {} is not supported'.format(self.type))
def _sample_student_t(self, rng):
return stats.t.rvs(self.degrees_of_freedom, scale=self.scale,
random_state=rng)
def _sample_SaS(self, rng):
raise RuntimeError('sampling SaS distribution not implemented')
class BaseMixtureModel:
"""Generic base class for mixture distribution models.
This defines the required functions and provides their recommended function
signature for inherited classes. It also defines base attributes for the
mixture model.
Attributes
----------
weights : list
weight of each distribution
"""
def __init__(self, distributions=None, weights=None):
"""Initialize a mixture model object.
Parameters
----------
distributions : list, optional
Each element is a :class:`.BaseSingleModel`. The default is None.
weights : list, optional
Weight of each distribution. The default is None.
Returns
-------
None.
"""
if distributions is None:
distributions = []
if weights is None:
weights = []
self._distributions = distributions
self.weights = weights
def sample(self, rng=None):
"""Draw a sample from the current mixture model.
Parameters
----------
rng : numpy random generator, optional
Random number generator to use. If none is given then the numpy
default is used. The default is None.
Returns
-------
numpy array
randomly sampled numpy array of the same shape as the mean.
"""
if rng is None:
rng = rnd.default_rng()
mix_ind = rng.choice(np.arange(len(self.weights), dtype=int),
p=self.weights)
x = self._distributions[mix_ind].sample(rng=rng)
return x.reshape((x.size, 1))
def pdf(self, x):
"""Multi-variate probability density function for this mixture.
Returns
-------
float
PDF value of the state `x`.
"""
p = 0
for w, dist in zip(self.weights, self._distributions):
p += w * dist.pdf(x)
return p
def remove_components(self, indices):
"""Remove component distributions from the mixture by index.
Parameters
----------
indices : list
indices of distributions to remove.
Returns
-------
None.
"""
if not isinstance(indices, list):
indices = list(indices)
for index in sorted(indices, reverse=True):
del self._distributions[index]
del self.weights[index]
def add_component(self, *args):
"""Add a component distribution to the mixture.
This should be implemented by the child class.
Parameters
----------
*args : tuple
Additional arguments specific to the child distribution.
Returns
-------
None.
"""
warn('add_component not implemented by {}'.format(type(self).__name__))
class _DistListWrapper(list):
"""Helper class for wrapping lists of BaseSingleModel to get a list of a single parameter."""
def __init__(self, dist_lst, attr):
"""Give list of distributions and the attribute to access."""
self.dist_lst = dist_lst
self.attr = attr
def __getitem__(self, index):
"""Get the attribute of the item at the index in the list."""
if isinstance(index, slice):
step = 1
if index.step is not None:
step = index.step
return [getattr(self.dist_lst[ii], self.attr)
for ii in range(index.start, index.stop, step)]
elif isinstance(index, int):
return getattr(self.dist_lst[index], self.attr)
else:
fmt = 'Index must be a integer or slice not {}'
raise RuntimeError(fmt.format(type(index)))
def __setitem__(self, index, val):
"""Set the attribute of the item at the index to the value."""
if isinstance(index, slice):
step = 1
if index.step is not None:
step = index.step
for ii in range(index.start, index.stop, step):
setattr(self.dist_lst[ii], self.attr, val)
elif isinstance(index, int):
setattr(self.dist_lst[index], self.attr, val)
else:
fmt = 'Index must be a integer or slice not {}'
raise RuntimeError(fmt.format(type(index)))
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.dist_lst):
self.n += 1
return getattr(self.dist_lst[self.n - 1], self.attr)
else:
raise StopIteration
def __repr__(self):
return str([getattr(d, self.attr) for d in self.dist_lst])
def __len__(self):
return len(self.dist_lst)
def append(self, *args):
raise RuntimeError('Cannot append, use add_component function instead.')
def extend(self, *args):
raise RuntimeError('Cannot extend, use add_component function instead.')
class GaussianMixture(BaseMixtureModel):
"""Gaussian Mixture object."""
def __init__(self, means=None, covariances=None, **kwargs):
"""Initialize an object.
Parameters
----------
means : list, optional
Each element is a N x 1 numpy array. Will be used in place of supplied
distributions but requires covariances to also be given. The default is None.
covariances : list, optional
Each element is an N x N numpy array. Will be used in place of
supplied distributions but requires means to be given. The default is None.
**kwargs : dict, optional
See the base class for details.
Returns
-------
None.
"""
if means is not None and covariances is not None:
kwargs['distributions'] = [Gaussian(mean=m, covariance=c)
for m, c in zip(means, covariances)]
super().__init__(**kwargs)
@property
def means(self):
"""List of Gaussian means, each is a N x 1 numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'location')
@means.setter
def means(self, val):
if not isinstance(val, list):
warn('Must set means to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [Gaussian() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].mean = v
@property
def covariances(self):
"""List of Gaussian covariances, each is a N x N numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'scale')
@covariances.setter
def covariances(self, val):
if not isinstance(val, list):
warn('Must set covariances to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [Gaussian() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].covariance = v
def add_components(self, means, covariances, weights):
"""Add Gaussian distributions to the mixture.
Parameters
----------
means : list
Each is a N x 1 numpy array of the mean of the distributions to add.
covariances : list
Each is a N x N numpy array of the covariance of the distributions
to add.
weights : list
Each is a float for the weight of the distributions to add. No
normalization is done.
Returns
-------
None.
"""
if not isinstance(means, list):
means = [means, ]
if not isinstance(covariances, list):
covariances = [covariances, ]
if not isinstance(weights, list):
weights = [weights, ]
self._distributions.extend([Gaussian(mean=m, covariance=c)
for m, c in zip(means, covariances)])
self.weights.extend(weights)
class StudentsTMixture(BaseMixtureModel):
"""Students T mixture object."""
def __init__(self, means=None, scalings=None, dof=None, **kwargs):
if means is not None and scalings is not None and dof is not None:
if isinstance(dof, list):
dists = [StudentsT(mean=m, scale=s, dof=df)
for m, s, df in zip(means, scalings, dof)]
else:
dists = [StudentsT(mean=m, scale=s, dof=dof)
for m, s in zip(means, scalings)]
kwargs['distributions'] = dists
super().__init__(**kwargs)
@property
def means(self):
"""List of Gaussian means, each is a N x 1 numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'location')
@means.setter
def means(self, val):
if not isinstance(val, list):
warn('Must set means to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].mean = v
@property
def covariances(self):
"""Read only list of covariances, each is a N x N numpy array."""
return _DistListWrapper(self._distributions, 'covariance')
@property
def scalings(self):
"""List of scalings, each is a N x N numpy array. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'scale')
@scalings.setter
def scalings(self, val):
if not isinstance(val, list):
warn('Must set scalings to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].scale = v
@property
def dof(self):
"""Most common degree of freedom for the mixture. Deprecated but kept for compatability, new code should use degrees_of_freedom."""
vals, counts = np.unique([d.degrees_of_freedom for d in self._distributions],
return_counts=True)
inds = np.argwhere(counts == np.max(counts))
return vals[inds[0]].item()
@dof.setter
def dof(self, val):
for d in self._distributions:
d.degrees_of_freedom = val
@property
def degrees_of_freedom(self):
"""List of degrees of freedom, each is a float. Recommended to be read only."""
return _DistListWrapper(self._distributions, 'degrees_of_freedom')
@degrees_of_freedom.setter
def degrees_of_freedom(self, val):
if not isinstance(val, list):
warn('Must set degrees of freedom to a list')
return
if len(val) != len(self._distributions):
self.weights = [1 / len(val) for ii in range(len(val))]
self._distributions = [StudentsT() for ii in range(len(val))]
for ii, v in enumerate(val):
self._distributions[ii].degrees_of_freedom = v
def add_components(self, means, scalings, dof_lst, weights):
"""Add Student's t-distributions to the mixture.
Parameters
----------
means : list
Each is a N x 1 numpy array of the mean of the distributions to add.
scalings : list
Each is a N x N numpy array of the scale of the distributions
to add.
dof_lst : list
Each is a float representing the degrees of freedom of the distribution
to add.
weights : list
Each is a float for the weight of the distributions to add. No
normalization is done.
Returns
-------
None.
"""
if not isinstance(means, list):
means = [means, ]
if not isinstance(scalings, list):
scalings = [scalings, ]
if not isinstance(dof_lst, list):
dof_lst = [dof_lst, ]
if not isinstance(weights, list):
weights = [weights, ]
self._distributions.extend([StudentsT(mean=m, scale=s, dof=df)
for m, s, df in zip(means, scalings, dof_lst)])
self.weights.extend(weights)
|
import time
import usb.core, usb.util
# generateWords
from random import choice
from wordlist import wordlist
# detectHuman
import pygame
def generateWords():
return " ".join([choice(wordlist) for x in range(4)])
def detectHuman():
pygame.init()
pygame.event.set_grab(True)
screen = pygame.display.set_mode((700, 90))
screen.fill((50,50,50))
words = generateWords()
font = pygame.font.SysFont("monospace", 25)
label = font.render(words, 1, (255,255,255))
screen.blit(label, (10, 10))
newText = ""
while True:
pygame.display.flip()
events = pygame.event.get()
for event in events:
if event.type != pygame.KEYDOWN:
pass
elif event.key == pygame.K_RETURN and newText == words:
pygame.quit()
return True
elif event.key == pygame.K_BACKSPACE:
newText = ""
elif event.type == pygame.KEYDOWN and event.key < 256:
newText += chr(event.key)
font = pygame.font.SysFont("monospace", 25)
if words[:len(newText)] != newText:
color = (255,100,100)
else:
color = (100,255,100)
input = font.render(newText + "|",
1, color,
(50,50,50))
screen.blit(input, (10, 50))
devices = [x for x in usb.core.find(find_all=True, bDeviceClass=0)]
deviceCount = len(devices)
print("The DuckyKiller is now watching for Rubber Ducks.")
while True:
devices = [x for x in usb.core.find(find_all=True, bDeviceClass=0)]
time.sleep(.25)
if len(devices) > deviceCount:
detectHuman()
deviceCount = len(devices)
|
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import ddt
from rally import exceptions
from rally_openstack.task.scenarios.octavia import loadbalancers
from tests.unit import test
OCTAVIA_LB = "rally_openstack.task.scenarios.octavia.loadbalancers."
@ddt.ddt
class LoadbalancersTestCase(test.ScenarioTestCase):
def setUp(self):
super(LoadbalancersTestCase, self).setUp()
self.context.update({
"user": {"keypair": {"name": "foo_keypair_name"},
"credential": mock.MagicMock()},
"tenant": {"id": "foo_tenant_id",
"networks": [{
"name": "foo_net",
"subnets": ["foo_subnet0"]
}]}
})
cinder_patcher = mock.patch(
"rally_openstack.common.services.storage.block.BlockStorage")
self.cinder = cinder_patcher.start().return_value
self.cinder.create_volume.return_value = mock.Mock(id="foo_volume")
self.addCleanup(cinder_patcher.stop)
def create_env(self, scenario):
self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True}
scenario.generate_random_name = mock.Mock(
return_value="random_name")
scenario.octavia.load_balancer_create = mock.Mock(
return_value={"id": "foo_lb_id", "vip_port_id": 1234}
)
scenario.octavia.wait_for_loadbalancer_prov_status = mock.Mock(
return_value=True
)
scenario.octavia.listener_create = mock.Mock(
return_value={"listener": {"id": "listener_id"}}
)
scenario.octavia.wait_for_listener_prov_status = mock.Mock()
scenario.octavia.pool_create = mock.Mock(
return_value={"id": "pool_id"}
)
scenario.server = mock.Mock(
networks={"foo_net": {"subnets": "foo_subnet"}},
addresses={"foo_net": [{"addr": "foo_ip"}]},
tenant_id="foo_tenant"
)
scenario._boot_server_with_fip = mock.Mock(
return_value=(scenario.server, self.ip))
scenario.octavia.member_create = mock.Mock()
scenario._add_server_secgroups = mock.Mock()
# wait_for_status and get_from_manager are mocked in the base class as
# self.mock_wait_for_status and self.mock_get_from_manager
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
return scenario
@ddt.data({
"image": "foo_image", "flavor": "foo_flavor", "username": "foo_user",
"password": "foo_password", "floating_net": "foo_floating_net",
"port": "foo_port", "use_floating_ip": "foo_use_floating_ip",
"description": "foo_description", "admin_state": "foo_admin_state",
"listeners": "foo_listeners", "flavor_id": "foo_flavor_id",
"provider": "foo_provider",
"vip_qos_policy_id": "foo_vip_qos_policy_id",
})
@mock.patch(OCTAVIA_LB + "urlopen")
@mock.patch(OCTAVIA_LB + "network_wrapper.wrap")
def test_create_and_balance_http_vms(self, params, mock_wrap,
mock_urlopen):
scenario = self.create_env(
loadbalancers.CreateAndBalanceHttpVms(self.context))
# Mock a successful response from urlopen.
mock_response = mock.MagicMock()
mock_response.getcode.side_effect = [200]
mock_urlopen.return_value = mock_response
# self.clients is mocked in the base class
# Set a return value for self.clients("neutron").create_security_group
sec_grp = {"security_group": {"id": "sec_grp_id"}}
self.clients(
"neutron").create_security_group.return_value = sec_grp
netwrap = mock_wrap.return_value
fip = {"id": "foo_id", "ip": "foo_ip"}
netwrap.create_floating_ip.return_value = fip
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.run(**params)
# Check load_balancer_createte_security_group_rule is called with
# expected args.
expected_security_group_rule_args = {
"security_group_id": "sec_grp_id",
"direction": "ingress",
"port_range_max": 80,
"port_range_min": 80,
"protocol": "tcp",
"remote_ip_prefix": "0.0.0.0/0",
}
self.clients(
"neutron").create_security_group_rule.assert_called_once_with(
{"security_group_rule": expected_security_group_rule_args})
# create_floating_ip
# Check load_balancer_create is called with expected args.
expected_lb_create_args = {
"subnet_id": "foo_subnet0",
"description": "foo_description",
"admin_state": "foo_admin_state",
"project_id": "foo_tenant_id",
"listeners": "foo_listeners",
"flavor_id": "foo_flavor_id",
"provider": "foo_provider",
"vip_qos_policy_id": "foo_vip_qos_policy_id",
}
scenario.octavia.load_balancer_create.assert_called_once_with(
**expected_lb_create_args)
# Check listener_create is called with expected args.
expected_listener_create_args = {
"json": {
"listener": {
"protocol": "HTTP",
"protocol_port": 80,
"loadbalancer_id": "foo_lb_id",
}
}
}
scenario.octavia.listener_create.assert_called_once_with(
**expected_listener_create_args)
# Check wait_for_listener_prov_status is called with expected args.
(scenario.octavia.wait_for_loadbalancer_prov_status.
assert_called_once_with({"id": "foo_lb_id", "vip_port_id": 1234}))
# Check pool_create is called with expected args.
expected_pool_create_args = {
"lb_id": "foo_lb_id",
"protocol": "HTTP",
"lb_algorithm": "ROUND_ROBIN",
"listener_id": "listener_id",
"project_id": "foo_tenant_id",
}
scenario.octavia.pool_create.assert_called_once_with(
**expected_pool_create_args)
# Check update_floatingip is called with expected args.
self.clients(
"neutron").update_floatingip.assert_called_once_with(
"foo_id", {"floatingip": {"port_id": 1234}})
# Checks for two servers added as members to the load balancer group.
# Check _boot_server_with_fip is called with expected args.
expected_boot_server_args = [
mock.call("foo_image", "foo_flavor",
use_floating_ip="foo_use_floating_ip",
floating_net="foo_floating_net",
key_name="foo_keypair_name",
userdata="#cloud-config\npackages:\n - apache2"),
mock.call("foo_image", "foo_flavor",
use_floating_ip="foo_use_floating_ip",
floating_net="foo_floating_net",
key_name="foo_keypair_name",
userdata="#cloud-config\npackages:\n - apache2"),
]
self.assertEqual(
scenario._boot_server_with_fip.call_args_list,
expected_boot_server_args)
# Check member_create is called with expected args.
expected_member_args = {
"member": {"address": "foo_ip", "protocol_port": 80}
}
expected_member_args = {
"member": {"address": "foo_ip", "protocol_port": 80}
}
expected_pool_create_args = [
mock.call("pool_id", json=expected_member_args),
mock.call("pool_id", json=expected_member_args),
]
self.assertEqual(scenario.octavia.member_create.call_args_list,
expected_pool_create_args)
# Check _add_server_secgroups is called with expected args.
expected_member_args = {
"member": {"address": "foo_ip", "protocol_port": 80}
}
expected_add_server_secgroups_args = [
mock.call(scenario.server, "sec_grp_id"),
mock.call(scenario.server, "sec_grp_id"),
]
self.assertEqual(scenario._add_server_secgroups.call_args_list,
expected_add_server_secgroups_args)
# Check that _run_command is called for both servers with the script
# to wait for cloud-init to complete.
expected_command = {
"script_inline": "cloud-init status -w || exit 1",
"interpreter": "/bin/bash"
}
expected_run_command_call_args = [
mock.call("foo_ip", "foo_port", "foo_user", "foo_password",
command=expected_command),
mock.call("foo_ip", "foo_port", "foo_user", "foo_password",
command=expected_command)]
self.assertEqual(scenario._run_command.call_args_list,
expected_run_command_call_args)
# Check urlopen is called with expected args.
mock_urlopen.assert_called_once_with("http://foo_ip/")
# Check response.getcode was called.
mock_response.getcode.assert_called_once_with()
@ddt.data(
{"image": "some_image",
"flavor": "m1.small", "username": "test_user"}
)
@mock.patch(OCTAVIA_LB + "urlopen")
@mock.patch(OCTAVIA_LB + "network_wrapper.wrap")
def test_create_and_balance_http_vms_raises_ScriptError(
self, params, mock_wrap, mock_urlopen):
scenario = self.create_env(
loadbalancers.CreateAndBalanceHttpVms(self.context))
mock_response = mock.MagicMock()
mock_response.getcode.side_effect = [200]
mock_urlopen.return_value = mock_response
sec_grp = {"security_group": {"id": "sec_grp_id"}}
self.clients(
"neutron").create_security_group.return_value = sec_grp
netwrap = mock_wrap.return_value
fip = {"id": "foo_id", "ip": "foo_ip"}
netwrap.create_floating_ip.return_value = fip
scenario._run_command.return_value = (-1, "out", "err")
self.assertRaises(exceptions.ScriptError,
scenario.run,
**params,
)
@ddt.data(
{"image": "some_image",
"flavor": "m1.small", "username": "test_user"}
)
@mock.patch(OCTAVIA_LB + "urlopen")
@mock.patch(OCTAVIA_LB + "network_wrapper.wrap")
def test_create_and_balance_http_vms_raises_RallyException(
self, params, mock_wrap, mock_urlopen):
scenario = self.create_env(
loadbalancers.CreateAndBalanceHttpVms(self.context))
mock_response = mock.MagicMock()
mock_response.getcode.side_effect = [400]
mock_urlopen.return_value = mock_response
sec_grp = {"security_group": {"id": "sec_grp_id"}}
self.clients(
"neutron").create_security_group.return_value = sec_grp
netwrap = mock_wrap.return_value
fip = {"id": "foo_id", "ip": "foo_ip"}
netwrap.create_floating_ip.return_value = fip
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
self.assertRaises(exceptions.RallyException,
scenario.run,
**params,
)
|
import sys
import pytest
@pytest.mark.parametrize("mode", ["normal", "xdist"])
class TestFixture:
"""
Tests for ``subtests`` fixture.
"""
@pytest.fixture
def simple_script(self, testdir):
testdir.makepyfile(
"""
def test_foo(subtests):
for i in range(5):
with subtests.test(msg="custom", i=i):
assert i % 2 == 0
"""
)
def test_simple_terminal_normal(self, simple_script, testdir, mode):
if mode == "normal":
result = testdir.runpytest()
expected_lines = ["collected 1 item"]
else:
pytest.importorskip("xdist")
result = testdir.runpytest("-n1")
expected_lines = ["gw0 [1]"]
expected_lines += [
"* test_foo [[]custom[]] (i=1) *",
"* test_foo [[]custom[]] (i=3) *",
"* 2 failed, 1 passed in *",
]
result.stdout.fnmatch_lines(expected_lines)
def test_simple_terminal_verbose(self, simple_script, testdir, mode):
if mode == "normal":
result = testdir.runpytest("-v")
expected_lines = [
"*collected 1 item",
"test_simple_terminal_verbose.py::test_foo PASSED *100%*",
"test_simple_terminal_verbose.py::test_foo FAILED *100%*",
"test_simple_terminal_verbose.py::test_foo PASSED *100%*",
"test_simple_terminal_verbose.py::test_foo FAILED *100%*",
"test_simple_terminal_verbose.py::test_foo PASSED *100%*",
"test_simple_terminal_verbose.py::test_foo PASSED *100%*",
]
else:
pytest.importorskip("xdist")
result = testdir.runpytest("-n1", "-v")
expected_lines = [
"gw0 [1]",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
"*gw0*100%* test_simple_terminal_verbose.py::test_foo*",
]
expected_lines += [
"* test_foo [[]custom[]] (i=1) *",
"* test_foo [[]custom[]] (i=3) *",
"* 2 failed, 1 passed in *",
]
result.stdout.fnmatch_lines(expected_lines)
def test_skip(self, testdir, mode):
testdir.makepyfile(
"""
import pytest
def test_foo(subtests):
for i in range(5):
with subtests.test(msg="custom", i=i):
if i % 2 == 0:
pytest.skip('even number')
"""
)
if mode == "normal":
result = testdir.runpytest()
expected_lines = ["collected 1 item"]
else:
pytest.importorskip("xdist")
result = testdir.runpytest("-n1")
expected_lines = ["gw0 [1]"]
expected_lines += ["* 1 passed, 3 skipped in *"]
result.stdout.fnmatch_lines(expected_lines)
class TestSubTest:
"""
Test Test.subTest functionality.
"""
@pytest.fixture
def simple_script(self, testdir):
return testdir.makepyfile(
"""
from unittest import TestCase, main
class T(TestCase):
def test_foo(self):
for i in range(5):
with self.subTest(msg="custom", i=i):
self.assertEqual(i % 2, 0)
if __name__ == '__main__':
main()
"""
)
@pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"])
def test_simple_terminal_normal(self, simple_script, testdir, runner):
if runner == "unittest":
result = testdir.run(sys.executable, simple_script)
result.stderr.fnmatch_lines(
[
"FAIL: test_foo (__main__.T) [custom] (i=1)",
"AssertionError: 1 != 0",
"FAIL: test_foo (__main__.T) [custom] (i=3)",
"AssertionError: 1 != 0",
"Ran 1 test in *",
"FAILED (failures=2)",
]
)
else:
if runner == "pytest-normal":
result = testdir.runpytest(simple_script)
expected_lines = ["collected 1 item"]
else:
pytest.importorskip("xdist")
result = testdir.runpytest(simple_script, "-n1")
expected_lines = ["gw0 [1]"]
result.stdout.fnmatch_lines(
expected_lines
+ [
"* T.test_foo [[]custom[]] (i=1) *",
"E * AssertionError: 1 != 0",
"* T.test_foo [[]custom[]] (i=3) *",
"E * AssertionError: 1 != 0",
"* 2 failed, 1 passed in *",
]
)
@pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"])
def test_simple_terminal_verbose(self, simple_script, testdir, runner):
if runner == "unittest":
result = testdir.run(sys.executable, simple_script, "-v")
result.stderr.fnmatch_lines(
[
"test_foo (__main__.T) ... ",
"FAIL: test_foo (__main__.T) [custom] (i=1)",
"AssertionError: 1 != 0",
"FAIL: test_foo (__main__.T) [custom] (i=3)",
"AssertionError: 1 != 0",
"Ran 1 test in *",
"FAILED (failures=2)",
]
)
else:
if runner == "pytest-normal":
result = testdir.runpytest(simple_script, "-v")
expected_lines = [
"*collected 1 item",
"test_simple_terminal_verbose.py::T::test_foo FAILED *100%*",
"test_simple_terminal_verbose.py::T::test_foo FAILED *100%*",
"test_simple_terminal_verbose.py::T::test_foo PASSED *100%*",
]
else:
pytest.importorskip("xdist")
result = testdir.runpytest(simple_script, "-n1", "-v")
expected_lines = [
"gw0 [1]",
"*gw0*100%* FAILED test_simple_terminal_verbose.py::T::test_foo*",
"*gw0*100%* FAILED test_simple_terminal_verbose.py::T::test_foo*",
"*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*",
]
result.stdout.fnmatch_lines(
expected_lines
+ [
"* T.test_foo [[]custom[]] (i=1) *",
"E * AssertionError: 1 != 0",
"* T.test_foo [[]custom[]] (i=3) *",
"E * AssertionError: 1 != 0",
"* 2 failed, 1 passed in *",
]
)
@pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"])
def test_skip(self, testdir, runner):
p = testdir.makepyfile(
"""
from unittest import TestCase, main
class T(TestCase):
def test_foo(self):
for i in range(5):
with self.subTest(msg="custom", i=i):
if i % 2 == 0:
self.skipTest('even number')
if __name__ == '__main__':
main()
"""
)
if runner == "unittest":
result = testdir.runpython(p)
result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"])
else:
pytest.xfail("Not producing the expected results (#5)")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["collected 1 item", "* 3 skipped, 1 passed in *"]
)
class TestCapture:
def create_file(self, testdir):
testdir.makepyfile(
"""
import sys
def test(subtests):
print()
print('start test')
with subtests.test(i='A'):
print("hello stdout A")
print("hello stderr A", file=sys.stderr)
assert 0
with subtests.test(i='B'):
print("hello stdout B")
print("hello stderr B", file=sys.stderr)
assert 0
print('end test')
assert 0
"""
)
def test_capturing(self, testdir):
self.create_file(testdir)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*__ test (i='A') __*",
"*Captured stdout call*",
"hello stdout A",
"*Captured stderr call*",
"hello stderr A",
"*__ test (i='B') __*",
"*Captured stdout call*",
"hello stdout B",
"*Captured stderr call*",
"hello stderr B",
"*__ test __*",
"*Captured stdout call*",
"start test",
"end test",
]
)
def test_no_capture(self, testdir):
self.create_file(testdir)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
[
"start test",
"hello stdout A",
"Fhello stdout B",
"Fend test",
"*__ test (i='A') __*",
"*__ test (i='B') __*",
"*__ test __*",
]
)
result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"])
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_capture_with_fixture(self, testdir, fixture):
testdir.makepyfile(
r"""
import sys
def test(subtests, {fixture}):
print('start test')
with subtests.test(i='A'):
print("hello stdout A")
print("hello stderr A", file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'start test\nhello stdout A\n'
assert err == 'hello stderr A\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["*1 passed*",]
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0084_auto_20151125_2051'),
]
operations = [
migrations.AddField(
model_name='activity',
name='account_link',
field=models.CharField(default='/user/db0/#1', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='activity',
name='account_name',
field=models.CharField(default='Deby JP', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='activity',
name='account_picture',
field=models.CharField(default='http://www.gravatar.com/avatar/8e731f8661ed8a4549e5445ccffe388a?s=100&d=http%3A%2F%2Fschoolido.lu%2Favatar%2Ftwitter%2Fdbschoolidol', max_length=500),
preserve_default=False,
),
]
|
import glob
import os
import random
import re
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from .data_generator import DataGenerator
from .losses import focal_tversky, tversky
from .resunet import resunet
data = pd.read_csv("./lgg-mri-segmentation/kaggle_3m/data.csv")
data_map = []
for sub_dir_path in glob.glob("./lgg-mri-segmentation/kaggle_3m/" + "*"):
try:
dir_name = sub_dir_path.split("/")[-1]
for filename in os.listdir(sub_dir_path):
image_path = sub_dir_path + "/" + filename
data_map.extend([dir_name, image_path])
except Exception as e:
print(e)
df = pd.DataFrame({"patient_id": data_map[::2], "path": data_map[1::2]})
df_imgs = df[~df["path"].str.contains("mask")]
df_masks = df[df["path"].str.contains("mask")]
# Data sorting
imgs = sorted(
df_imgs["path"].values, key=lambda x: int(re.search(r"\d+", x[-7:]).group())
)
masks = sorted(
df_masks["path"].values, key=lambda x: int(re.search(r"\d+", x[-12:]).group())
)
# Sorting check
idx = random.randint(0, len(imgs) - 1)
print("Path to the Image:", imgs[idx], "\nPath to the Mask:", masks[idx])
# Final dataframe
brain_df = pd.DataFrame(
{"patient_id": df_imgs.patient_id.values, "image_path": imgs, "mask_path": masks}
)
def pos_neg_diagnosis(mask_path):
value = np.max(cv2.imread(mask_path))
if value > 0:
return 1
else:
return 0
brain_df["mask"] = brain_df["mask_path"].apply(lambda x: pos_neg_diagnosis(x))
brain_df_train = brain_df.drop(columns=["patient_id"])
# Convert the data in mask column to string format, to use categorical mode in flow_from_dataframe
brain_df_train["mask"] = brain_df_train["mask"].apply(lambda x: str(x))
train, test = train_test_split(brain_df_train, test_size=0.15)
brain_df_mask = brain_df[brain_df["mask"] == 1]
print(brain_df_mask.shape)
# creating test, train and val sets
X_train, X_val = train_test_split(brain_df_mask, test_size=0.15)
X_test, X_val = train_test_split(X_val, test_size=0.5)
print(
"Train size is {}, valid size is {} & test size is {}".format(
len(X_train), len(X_val), len(X_test)
)
)
train_ids = list(X_train.image_path)
train_mask = list(X_train.mask_path)
val_ids = list(X_val.image_path)
val_mask = list(X_val.mask_path)
train_data = DataGenerator(train_ids, train_mask)
val_data = DataGenerator(val_ids, val_mask)
seg_model = resunet(input_shape=(256, 256, 3))
adam = tf.keras.optimizers.Adam(learning_rate=0.05, epsilon=0.1)
earlystopping = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=20)
# save the best model with lower validation loss
checkpointer = ModelCheckpoint(
filepath="./models/ResUNet-segModel-weights.hdf5",
verbose=1,
save_best_only=True,
)
reduce_lr = ReduceLROnPlateau(
monitor="val_loss", mode="min", verbose=1, patience=10, min_delta=0.0001, factor=0.2
)
seg_model.compile(optimizer=adam, loss=focal_tversky, metrics=[tversky])
history = seg_model.fit(
train_data,
epochs=60,
validation_data=val_data,
callbacks=[checkpointer, earlystopping, reduce_lr],
)
|
# -*- coding: utf-8 -*-
import mysql.connector
def store_mysql(filepath):
conn = mysql.connector.connect(user = 'root', password = '1207', database = 'ShowMeTheCode')
cursor = conn.cursor()
# 判断表是否已经存在
cursor.execute('show tables in ShowMeTheCode;')
tables = cursor.fetchall()
findtables = False
for table in tables:
if 'code' in table:
findtables = True
if not findtables:
cursor.execute('''
CREATE TABLE `ShowMeTheCode`.`code` (
`id` INT NOT NULL AUTO_INCREMENT,
`code` VARCHAR(10) NOT NULL,
PRIMARY KEY (`id`));
''')
f = open(filepath, 'rb')
for line in f.readlines():
code = line.strip()
cursor.execute("insert into ShowMeTheCode.code (code) values(%s);", [code])
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
store_mysql('Activation_code.txt')
|
from api import app
from api import database
from flask import jsonify
from flask import request
@app.route('/companies', methods=['POST'])
def create_company():
company = request.get_json()
db = database()
db.update(company)
return jsonify(company), 201 |
import unittest
from Familytree.individual import Person
from Familytree.relations import Relations
from Familytree import variables
from unittest.mock import patch, Mock
from tests import mock_member_creation
class MyTestCase(unittest.TestCase):
def setUp(self):
self.member = Person(1, "Hubby", "Male")
def test_get_paternal_grandmother(self):
member = Person(7, "alpha", "Male")
father = Person(8, "beta", "Male")
grandmother = Person(9, "charlie", "Female")
# error cases
self.assertEqual(Relations().get_paternal_grandmother(member), None)
member.father = father
self.assertEqual(Relations().get_paternal_grandmother(member), None)
member.father.mother = grandmother
self.assertEqual(Relations().get_paternal_grandmother(member), grandmother)
def test_get_maternal_grandmother(self):
member = Person(7, "alpha", "Male")
mother = Person(8, "beta", "Female")
grandmother = Person(9, "charlie", "Female")
# error cases
self.assertEqual(Relations().get_paternal_grandmother(member), None)
member.mother = mother
self.assertEqual(Relations().get_paternal_grandmother(member), None)
member.mother.mother = grandmother
self.assertEqual(Relations().get_maternal_grandmother(member), grandmother)
def test_get_spouse_mother(self):
member = Person(7, "alpha", "Male")
spouse = Person(8, "alpha_spouse", "Female")
spouse_mother = Person(9, "alpha_spousemother", "Female")
# error cases
self.assertEqual(Relations().get_spouse_mother(member), None)
member.spouse = spouse
self.assertEqual(Relations().get_spouse_mother(member), None)
member.spouse.mother = spouse_mother
self.assertEqual(Relations().get_spouse_mother(member), spouse_mother)
@patch('Familytree.relations.Relations.get_paternal_grandmother', side_effect=[
None,
mock_member_creation(),
mock_member_creation(children=[Person(3, "Father", "Male")]),
mock_member_creation(children=[
Person(3, "Father", "Male"),
Person(4, "Uncle", "Male")
]),
mock_member_creation(children=[
Person(3, "Father", "Male"),
Person(4, "Uncle", "Male"),
Person(5, "Aunt", "Female")
])
])
def test_get_paternal_aunt(self, mock_get_paternal_grandmother):
# check if get_paternal_grandmother has been replaced by a mock
self.assertEqual(
isinstance(Relations.get_paternal_grandmother, Mock),
True
)
self.assertEqual(Relations().get_paternal_aunt(self.member), [])
self.assertEqual(Relations().get_paternal_aunt(self.member), [])
self.assertEqual(Relations().get_paternal_aunt(self.member), [])
self.assertEqual(Relations().get_paternal_aunt(self.member), [])
paternal_aunts = Relations().get_paternal_aunt(self.member)
self.assertEqual(len(paternal_aunts), 1)
self.assertEqual(paternal_aunts[0].name, "Aunt")
self.assertTrue(paternal_aunts[0].gender in variables.Gender[variables.female])
# to check that the mock_get_paternal_grandmother was called instead
# of self.member.get_paternal_grandmother
mock_get_paternal_grandmother.assert_called_with(self.member)
@patch('Familytree.relations.Relations.get_paternal_grandmother', side_effect=[
None,
mock_member_creation(),
mock_member_creation(children=[Person(3, "Father", "Male")]),
mock_member_creation(children=[
Person(3, "Aunt", "Female"),
Person(4, "Father", "Male")
]),
mock_member_creation(children=[
Person(3, "Father", "Male"),
Person(4, "Uncle", "Male"),
Person(5, "Aunt", "Female")
])
])
def test_get_paternal_uncle(self, mock_get_paternal_grandmother):
self.member.father = Person(3, "Father", "Male")
# check if get_paternal_grandmother has been replaced by a mock
self.assertEqual(isinstance(
Relations().get_paternal_grandmother, Mock),
True
)
self.assertEqual(Relations().get_paternal_uncle(self.member), [])
self.assertEqual(Relations().get_paternal_uncle(self.member), [])
self.assertEqual(Relations().get_paternal_uncle(self.member), [])
self.assertEqual(Relations().get_paternal_uncle(self.member), [])
paternal_uncle = Relations().get_paternal_uncle(self.member)
self.assertEqual(len(paternal_uncle), 1)
self.assertEqual(paternal_uncle[0].name, "Uncle")
self.assertTrue(paternal_uncle[0].gender in variables.Gender[variables.male])
# to check that the mock_get_paternal_grandmother was called instead
# of self.member.get_paternal_grandmother
mock_get_paternal_grandmother.assert_called_with(self.member)
@patch('Familytree.relations.Relations.get_maternal_grandmother', side_effect=[
None,
mock_member_creation(),
mock_member_creation(children=[Person(3, "Mother", "Female")]),
mock_member_creation(children=[
Person(3, "Mother", "Female"),
Person(4, "Uncle", "Male")
]),
mock_member_creation(children=[
Person(3, "Mother", "Female"),
Person(4, "Uncle", "Male"),
Person(5, "Aunt", "Female")
])
])
def test_get_maternal_aunt(self, mock_get_maternal_grandmother):
self.member.mother = Person(3, "Mother", "Female")
# check if get_paternal_grandmother has been replaced by a mock
self.assertEqual(isinstance(
Relations.get_maternal_grandmother, Mock),
True
)
self.assertEqual(Relations().get_maternal_aunt(self.member), [])
self.assertEqual(Relations().get_maternal_aunt(self.member), [])
self.assertEqual(Relations().get_maternal_aunt(self.member), [])
self.assertEqual(Relations().get_maternal_aunt(self.member), [])
maternal_aunts = Relations().get_maternal_aunt(self.member)
self.assertEqual(len(maternal_aunts), 1)
self.assertEqual(maternal_aunts[0].name, "Aunt")
self.assertTrue(maternal_aunts[0].gender in variables.Gender[variables.female])
# to check that the mock_get_paternal_grandmother was called instead of
# self.member.get_paternal_grandmother
mock_get_maternal_grandmother.assert_called_with(self.member)
@patch('Familytree.relations.Relations.get_maternal_grandmother', side_effect=[
None,
mock_member_creation(),
mock_member_creation(children=[Person(3, "Mother", "Female")]),
mock_member_creation(children=[
Person(3, "Aunt", "Female"),
Person(4, "Mother", "Female")
]),
mock_member_creation(children=[
Person(3, "Mother", "Female"),
Person(4, "Uncle", "Male"),
Person(5, "Aunt", "Female")
])
])
def test_get_maternal_uncle(self, mock_get_maternal_grandmother):
# check if get_paternal_grandmother has been replaced by a mock
self.assertEqual(
isinstance(Relations.get_maternal_grandmother, Mock),
True
)
self.assertEqual(Relations().get_maternal_uncle(self.member), [])
self.assertEqual(Relations().get_maternal_uncle(self.member), [])
self.assertEqual(Relations().get_maternal_uncle(self.member), [])
self.assertEqual(Relations().get_maternal_uncle(self.member), [])
maternal_uncle = Relations().get_maternal_uncle(self.member)
self.assertEqual(len(maternal_uncle), 1)
self.assertEqual(maternal_uncle[0].name, "Uncle")
self.assertTrue(maternal_uncle[0].gender in variables.Gender[variables.male])
# to check that the mock_get_paternal_grandmother was called
# instead of self.member.get_paternal_grandmother
mock_get_maternal_grandmother.assert_called_with(self.member)
@patch('Familytree.relations.Relations.get_siblings', return_value=[
mock_member_creation(
name="Alpha", gender='Male', spouse=mock_member_creation(
name="Beta", gender='Female', spouse=mock_member_creation(
name="Alpha")
)
),
mock_member_creation(
name="Charlie", gender='Female', spouse=mock_member_creation(
name="Delta", gender='Male', spouse=mock_member_creation(
name="Charlie")
)
),
mock_member_creation(
name="Charlie", gender='Female'
)
])
def test_get_sibling_spouses(self, mock_get_siblings):
self.assertEqual(len(Relations().get_sibling_spouses(self.member)), 2)
def test_get_spouse_siblings(self):
self.assertEqual(len(Relations().get_spouse_siblings(self.member)), 0)
self.member.spouse = mock_member_creation(name="Wife")
# spouse_siblings = Relations().get_siblings(self.member.spouse)
spouse_siblings = [
mock_member_creation(name="Alpha"),
mock_member_creation(name="Beta")
]
self.assertEqual(len(spouse_siblings), 2)
@patch('Familytree.relations.Relations.get_spouse_siblings', return_value=[
mock_member_creation(name="Alpha", gender='Male'),
mock_member_creation(name="Beta", gender='Female')
])
@patch('Familytree.relations.Relations.get_sibling_spouses', return_value=[
mock_member_creation(name="Charlie", gender='Male'),
mock_member_creation(name="Delta", gender='Female')
])
def test_get_brother_in_law(self, mock_get_sibling_spouses,
mock_get_spouse_siblings):
self.assertEqual(len(Relations().get_brother_in_law(self.member)), 2)
@patch('Familytree.relations.Relations.get_spouse_siblings', return_value=[
mock_member_creation(name="Alpha", gender='Male'),
mock_member_creation(name="Beta", gender='Female')
])
@patch('Familytree.relations.Relations.get_sibling_spouses', return_value=[
mock_member_creation(name="Charlie", gender='Male'),
mock_member_creation(name="Delta", gender='Female')
])
def test_get_sister_in_law(self, mock_get_sibling_spouses,
mock_get_spouse_siblings):
self.assertEqual(len(Relations().get_sister_in_law(self.member)), 2)
def test_get_son(self):
member = Person(5, "Dummy", "Male")
son = Person(7, "Son", "Male")
daughter = Person(7, "Daughter", "Female")
self.assertEqual(Relations().get_son(member), [])
member.children.append(daughter)
self.assertEqual(Relations().get_son(member), [])
member.children.append(son)
sons = Relations().get_son(member)
self.assertEqual(len(sons), 1)
self.assertEqual(sons[0].name, "Son")
self.assertTrue(sons[0].gender in variables.Gender[variables.male])
def test_get_daughter(self):
member = Person(5, "Dummy", "Male")
son = Person(7, "Son", "Male")
daughter = Person(7, "Daughter", "Female")
self.assertEqual(Relations().get_daughter(member), [])
member.children.append(son)
self.assertEqual(Relations().get_daughter(member), [])
member.children.append(daughter)
daughters = Relations().get_daughter(member)
self.assertEqual(len(daughters), 1)
self.assertEqual(daughters[0].name, "Daughter")
self.assertTrue(daughters[0].gender in variables.Gender[variables.female])
def test_get_siblings(self):
member = Person(5, "Dummy", "Male")
mother = Person(9, "Mother", "Female")
son = Person(7, "Son", "Male")
daughter = Person(7, "Daughter", "Female")
self.assertEqual(Relations().get_siblings(member), [])
member.mother = mother
self.assertEqual(Relations().get_siblings(member), [])
mother.children.extend([member, son, daughter])
member.mother = mother
siblings = Relations().get_siblings(member)
self.assertEqual(len(siblings), 2)
@patch('Familytree.relations.Relations.get_siblings')
@patch('Familytree.relations.Relations.get_daughter')
@patch('Familytree.relations.Relations.get_son')
@patch('Familytree.relations.Relations.get_sister_in_law')
@patch('Familytree.relations.Relations.get_brother_in_law')
@patch('Familytree.relations.Relations.get_maternal_uncle')
@patch('Familytree.relations.Relations.get_maternal_aunt')
@patch('Familytree.relations.Relations.get_paternal_uncle')
@patch('Familytree.relations.Relations.get_paternal_aunt')
def test_get_relationship(self, mock_get_paternal_aunt,
mock_get_paternal_uncle,
mock_get_maternal_aunt, mock_get_maternal_uncle,
mock_get_brother_in_law, mock_get_sister_in_law,
mock_get_son, mock_get_daughter,
mock_get_siblings):
self.assertEqual(Relations().get_relation(self.member, 'invalid_relation'), None)
Relations().get_relation(self.member, 'Paternal-Aunt')
mock_get_paternal_aunt.assert_called_with(self.member)
Relations().get_relation(self.member, 'Paternal-Uncle')
mock_get_paternal_uncle.assert_called_with(self.member)
Relations().get_relation(self.member, 'Maternal-Aunt')
mock_get_maternal_aunt.assert_called_with(self.member)
Relations().get_relation(self.member, 'Maternal-Uncle')
mock_get_maternal_uncle.assert_called_with(self.member)
Relations().get_relation(self.member, 'Brother-In-Law')
mock_get_brother_in_law.assert_called_with(self.member)
Relations().get_relation(self.member, 'Sister-In-Law')
mock_get_sister_in_law.assert_called_with(self.member)
Relations().get_relation(self.member, 'Son')
mock_get_son.assert_called_with(self.member)
Relations().get_relation(self.member, 'Daughter')
mock_get_daughter.assert_called_with(self.member)
Relations().get_relation(self.member, 'Siblings')
mock_get_siblings.assert_called_with(self.member)
if __name__ == '__main__':
unittest.main()
|
name = 'stemplar'
__version__ = '0.0.1' |
from __future__ import unicode_literals
from redis.exceptions import DataError, ResponseError
from limpyd import fields
from limpyd.exceptions import UniquenessError
from ..base import LimpydBaseTest
from ..model import TestRedisModel
class HMTest(LimpydBaseTest):
"""
Test behavior of hmset and hmget
"""
class HMTestModel(TestRedisModel):
foo = fields.InstanceHashField()
bar = fields.InstanceHashField(indexable=True)
baz = fields.InstanceHashField(unique=True)
def test_hmset_should_set_values(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
self.assertEqual(obj.foo.hget(), 'FOO')
self.assertEqual(obj.bar.hget(), 'BAR')
self.assertEqual(obj.baz.hget(), 'BAZ')
def test_hmget_should_get_values(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
data = obj.hmget('foo', 'bar', 'baz')
self.assertEqual(data, ['FOO', 'BAR', 'BAZ'])
obj.hmset(baz='QUX')
data = obj.hmget('bar', 'baz')
self.assertEqual(data, ['BAR', 'QUX'])
def test_hdel_should_delete_values(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
count = obj.hdel('bar', 'baz')
self.assertEqual(count, 2)
self.assertEqual(obj.hmget('foo', 'bar', 'baz'), ['FOO', None, None])
obj.hmset(baz='QUX')
self.assertEqual(obj.hmget('foo', 'bar', 'baz'), ['FOO', None, 'QUX'])
count = obj.hdel('bar', 'baz')
self.assertEqual(count, 1) # 'bar' was already deleted
self.assertEqual(obj.hmget('foo', 'bar', 'baz'), ['FOO', None, None])
def test_empty_hmset_call_should_fail(self):
obj = self.HMTestModel(foo='FOO', bar='BAR', baz='BAZ')
with self.assertRaises(DataError):
obj.hmset()
# nothing modified...
data = obj.hmget('foo', 'bar', 'baz')
self.assertEqual(data, ['FOO', 'BAR', 'BAZ'])
def test_empty_hmget_call_should_fail(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
with self.assertRaises(ResponseError):
obj.hmget()
def test_empty_hdel_call_should_fail(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
with self.assertRaises(ResponseError):
obj.hdel()
def test_hmset_should_index_values(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
self.assertSetEqual(set(self.HMTestModel.collection(bar='BAR')), {obj._pk})
self.assertSetEqual(set(self.HMTestModel.collection(baz='BAZ')), {obj._pk})
def test_hdel_should_deindex_values(self):
obj = self.HMTestModel()
obj.hmset(foo='FOO', bar='BAR', baz='BAZ')
obj.hdel('foo', 'bar')
self.assertSetEqual(set(self.HMTestModel.collection(bar='BAR')), set([]))
self.assertSetEqual(set(self.HMTestModel.collection(baz='BAZ')), {obj._pk})
def test_hmset_should_not_index_if_an_error_occurs(self):
self.HMTestModel(baz="BAZ")
test_obj = self.HMTestModel()
with self.assertRaises(UniquenessError):
# The order of parameters below is important. Yes all are passed via
# the kwargs dict, but order is not random, it's consistent, and
# here i have to be sure that "bar" is managed first in hmset, so i
# do some tests to always have the wanted order.
# So bar will be indexed, then baz will raise because we already
# set the "BAZ" value for this field.
test_obj.hmset(baz='BAZ', foo='FOO', bar='BAR')
# We must not have an entry in the bar index with the BAR value because
# the hmset must have raise an exception and revert index already set.
self.assertSetEqual(set(self.HMTestModel.collection(bar='BAR')), set())
def test_hgetall_should_return_all_set_fields(self):
obj = self.HMTestModel(foo='FOO', bar='BAR')
data = obj.hgetall()
self.assertEqual(data, dict(foo='FOO', bar='BAR'))
obj.foo.hdel()
data = obj.hgetall()
self.assertEqual(data, dict(bar='BAR',))
def test_hkeys_should_return_all_set_fieldnames(self):
obj = self.HMTestModel(foo='FOO', bar='BAR')
data = obj.hkeys()
self.assertSetEqual(set(data), {'foo', 'bar'})
obj.foo.hdel()
data = obj.hkeys()
self.assertSetEqual(set(data), {'bar'})
def test_hvals_should_return_all_set_values(self):
obj = self.HMTestModel(foo='FOO', bar='BAR')
data = obj.hvals()
self.assertSetEqual(set(data), {'FOO', 'BAR'})
obj.foo.hdel()
data = obj.hvals()
self.assertSetEqual(set(data), {'BAR'})
def test_hlen_should_return_number_of_set_fields(self):
obj = self.HMTestModel(foo='FOO', bar='BAR')
self.assertEqual(obj.hlen(), 2)
obj.foo.hdel()
self.assertEqual(obj.hlen(), 1)
def test_delete_is_an_alias_for_hdel(self):
obj = self.HMTestModel(foo='FOO', bar='BAR')
obj.foo.delete()
self.assertEqual(obj.hgetall(), {'bar': 'BAR'})
|
'''
Faça um Programa que pergunte em que turno você estuda. Peça para digitar M-matutino ou V-Vespertino ou N- Noturno. Imprima a mensagem "Bom Dia!", "Boa Tarde!" ou "Boa Noite!" ou "Valor Inválido!", conforme o caso.
'''
print('Em que turno voce estuda:')
turno = str(input('''
M-matutino
V-vesperino
N-noturno
''')).upper()
if turno == 'M':
print('Bom dia!')
elif turno == 'V':
print('Boa tarde!')
elif turno == 'N':
print('Boa noite!')
else:
print('Horario Invalido!')
|
import socket
import struct
def quad2int(ip):
return struct.unpack("!L", socket.inet_aton(ip))[0]
def int2quad(ip):
return socket.inet_ntoa(struct.pack('!L', ip))
|
import logging
from bson.errors import InvalidId
from django.http import JsonResponse, HttpResponseNotFound, HttpResponseBadRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from api.models import db_model
from bson import ObjectId
from api.models.auth import RequireLogin
from api.models import rule_model
import json
from datetime import datetime
logger = logging.getLogger(__name__)
@csrf_exempt
def rest(request, *pargs, **kwargs):
"""
Calls python function corresponding with HTTP METHOD name.
Calls with incomplete arguments will return HTTP 400
"""
if request.method == 'GET':
rest_function = get
elif request.method == 'POST':
rest_function = post
elif request.method == 'PUT':
rest_function = put
elif request.method == 'DELETE':
rest_function = delete
else:
return JsonResponse({"error": "HTTP METHOD UNKNOWN"})
try:
return rest_function(request, *pargs, **kwargs)
except TypeError:
return HttpResponseBadRequest("argument mismatch")
@RequireLogin()
def get(request, rule_id=None):
"""
Retrieve rule based on rule_id.
"""
if rule_id is None:
return get_all_rules()
dbc = db_model.connect()
try:
rule = dbc.rule.find_one({"_id": ObjectId(rule_id)})
except InvalidId:
return HttpResponseNotFound()
if rule is None:
return HttpResponseNotFound()
else:
rule['id'] = str(rule.pop('_id'))
return JsonResponse(rule)
def get_all_rules():
"""
Retrieve all rules.
"""
dbc = db_model.connect()
rules = [r for r in dbc.rule.find()]
for rule in rules:
rule['id'] = str(rule.pop('_id'))
return JsonResponse({"rules": rules})
@RequireLogin()
def post(request):
"""
Create new rule.
"""
try:
new = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("invalid JSON")
rule = rule_model.validate(new)
if rule is None:
return HttpResponseBadRequest("invalid rule")
else:
dbc = db_model.connect()
rule['createdAt'] = datetime.isoformat(datetime.now())
rule['updatedAt'] = datetime.isoformat(datetime.now())
rule_id = str(dbc.rule.save(rule))
r = JsonResponse({"id": rule_id})
r['location'] = "/api/rule/%s" % rule_id
logger.info("rule '%s' created by '%s'" % (rule_id, request.user['username']))
return r
@RequireLogin()
def put(request, rule_id):
"""
Update existing rule based on rule_id.
"""
try:
in_json = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("invalid JSON")
dbc = db_model.connect()
try:
rule = dbc.rule.find_one({"_id": ObjectId(rule_id)})
except InvalidId:
return HttpResponseNotFound()
if rule is None:
return HttpResponseNotFound()
else:
in_json['createdAt'] = rule['createdAt']
rule = rule_model.validate(in_json)
if rule is None:
return HttpResponseBadRequest("invalid rule")
else:
rule['_id'] = ObjectId(rule_id)
rule['updatedAt'] = datetime.isoformat(datetime.now())
dbc.rule.save(rule)
r = JsonResponse({"id": rule_id})
r['location'] = "/api/rule/%s" % rule_id
logger.info("rule '%s' updated by '%s'" % (rule_id, request.user['username']))
return r
@RequireLogin()
def delete(request, rule_id):
"""
Delete rule based on rule_id.
"""
dbc = db_model.connect()
try:
rule = dbc.rule.find_one({'_id': ObjectId(rule_id)})
except InvalidId:
return HttpResponseNotFound()
if rule is None:
return HttpResponseNotFound()
else:
dbc.rule.remove({"_id": ObjectId(rule_id)})
logger.info("rule '%s' deleted by '%s'" % (rule_id, request.user['username']))
return HttpResponse() |
from .__about__ import __version__
from .riak_repl import RiakReplCheck
__all__ = ['__version__', 'RiakReplCheck']
|
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
类似斐波那契数列,动态规划
结果:
执行用时 : 48 ms, 在所有 Python3 提交中击败了68.02%的用户
内存消耗 : 13.8 MB, 在所有 Python3 提交中击败了5.22%的用户
"""
class Solution:
def climbStairs(self, n):
# 类似斐波那契数列的解法
# 如果只有1个台阶,则返回1
if n == 1:
return 1
dp = [1,2] # 初始状态是两个台阶,一阶有1种爬法(1),二阶有2种爬法(1+1或者2)
# 对于高阶,分别是上一阶和上上阶的爬法总和
for i in range(2,n):
dp.append(dp[i-1]+dp[i-2])
return dp[n-1]
if __name__ == "__main__":
n = 3
answer = Solution().climbStairs(n)
print(answer) |
# The MIT License (MIT)
#
# Copyright (c) 2019 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ili9341`
====================================================
Display driver for ILI9341
* Author(s): Scott Shawcroft
Implementation Notes
--------------------
**Hardware:**
* 2.2" 18-bit color TFT LCD display with microSD card breakout
<https://www.adafruit.com/product/1480>
* 2.4" TFT LCD with Touchscreen Breakout Board w/MicroSD Socket
<https://www.adafruit.com/product/2478>
* 2.8" TFT LCD with Touchscreen Breakout Board w/MicroSD Socket
<https://www.adafruit.com/product/1770>
* 3.2" TFT LCD with Touchscreen Breakout Board w/MicroSD Socket
<https://www.adafruit.com/product/1743>
* TFT FeatherWing - 2.4" 320x240 Touchscreen For All Feathers
<https://www.adafruit.com/product/3315>
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import displayio
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ILI9341.git"
_INIT_SEQUENCE = (
b"\x01\x80\x80" # Software reset then delay 0x80 (128ms)
b"\xEF\x03\x03\x80\x02"
b"\xCF\x03\x00\xC1\x30"
b"\xED\x04\x64\x03\x12\x81"
b"\xE8\x03\x85\x00\x78"
b"\xCB\x05\x39\x2C\x00\x34\x02"
b"\xF7\x01\x20"
b"\xEA\x02\x00\x00"
b"\xc0\x01\x23" # Power control VRH[5:0]
b"\xc1\x01\x10" # Power control SAP[2:0];BT[3:0]
b"\xc5\x02\x3e\x28" # VCM control
b"\xc7\x01\x86" # VCM control2
b"\x36\x01\x38" # Memory Access Control
b"\x37\x01\x00" # Vertical scroll zero
b"\x3a\x01\x55" # COLMOD: Pixel Format Set
b"\xb1\x02\x00\x18" # Frame Rate Control (In Normal Mode/Full Colors)
b"\xb6\x03\x08\x82\x27" # Display Function Control
b"\xF2\x01\x00" # 3Gamma Function Disable
b"\x26\x01\x01" # Gamma curve selected
b"\xe0\x0f\x0F\x31\x2B\x0C\x0E\x08\x4E\xF1\x37\x07\x10\x03\x0E\x09\x00" # Set Gamma
b"\xe1\x0f\x00\x0E\x14\x03\x11\x07\x31\xC1\x48\x08\x0F\x0C\x31\x36\x0F" # Set Gamma
b"\x11\x80\x78" # Exit Sleep then delay 0x78 (120ms)
b"\x29\x80\x78" # Display on then delay 0x78 (120ms)
)
# pylint: disable=too-few-public-methods
class ILI9341(displayio.Display):
"""ILI9341 display driver"""
def __init__(self, bus, hardware_rotation=0, **kwargs):
init_seq = _INIT_SEQUENCE
init_seq += (b"\x36\x01\x58", b"\x36\x01\x38", b"\x36\x01\xD8", b"\x36\x01\xF8")[hardware_rotation%4]
if hardware_rotation%2: # if odd
kwargs['width'], kwargs['height'] = kwargs['height'], kwargs['width']
super().__init__(bus, init_seq, **kwargs)
|
import pandas as pd
import pytest
from pyspark.sql import DataFrame
from sparkypandy import Columny, DataFramy
from sparkypandy.testing import assert_series_equal
from tests.conftest import ALL_COLUMN_NAMES, NUMERIC_COLUMN_NAMES
class TestColumny:
@pytest.mark.parametrize("col_name", ALL_COLUMN_NAMES) # type: ignore
def test_under_name(self, df_sparky: DataFramy, col_name: str) -> None:
assert df_sparky[col_name]._name == col_name
@pytest.mark.parametrize("col_name", ALL_COLUMN_NAMES) # type: ignore
def test_from_spark(self, df_spark: DataFrame, df_sparky: DataFramy, col_name: str) -> None:
# TODO: Not sure how to test this class method. Also not sure how to
# test equality for a Column instance. For now, I am simply
# asserting that both instance dicts are equal.
# Maybe compare result from `.to_pandas()` here too?
col_spark = df_spark[col_name]
actual = Columny.from_spark(col=col_spark, df_sparky=df_sparky).__dict__
expected = Columny(jc=col_spark._jc, df_sparky=df_sparky).__dict__
assert actual == expected
@pytest.mark.parametrize("col_name", ALL_COLUMN_NAMES) # type: ignore
def test_to_pandas(self, df_sparky: DataFramy, df_pandas: pd.DataFrame, col_name: str) -> None:
col_sparky = df_sparky[col_name]
col_pandas = df_pandas[col_name]
assert_series_equal(col_sparky.to_pandas(), col_pandas)
# ==================================================================
# test aggregations
# ==================================================================
# test: mean()
@pytest.mark.parametrize("col_name", NUMERIC_COLUMN_NAMES) # type: ignore
def test_mean(self, df_sparky: DataFramy, df_pandas: pd.DataFrame, col_name: str) -> None:
mean_df_sparky = df_sparky[col_name].mean().to_pandas()
mean_pandas = df_pandas[col_name].mean()
assert mean_df_sparky.iloc[0] == mean_pandas
pd.testing.assert_series_equal(mean_df_sparky, pd.Series(mean_pandas, name=f"mean({col_name})"))
@pytest.mark.parametrize("col_name", NUMERIC_COLUMN_NAMES) # type: ignore
def test_mean_with_alias(self, df_sparky: DataFramy, df_pandas: pd.DataFrame, col_name: str) -> None:
target_alias_str = "target_alias_str"
mean_df_sparky = df_sparky[col_name].mean(alias=target_alias_str).to_pandas()
mean_pandas = df_pandas[col_name].mean()
assert mean_df_sparky.iloc[0] == mean_pandas
pd.testing.assert_series_equal(mean_df_sparky, pd.Series(mean_pandas, name=target_alias_str))
@pytest.mark.parametrize("col_name", NUMERIC_COLUMN_NAMES) # type: ignore
def test_mean_with_collect(self, df_sparky: DataFramy, df_pandas: pd.DataFrame, col_name: str) -> None:
mean_sparky = df_sparky[col_name].mean(collect=True)
mean_pandas = df_pandas[col_name].mean()
assert mean_sparky == mean_pandas
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import astor
from paddle.utils import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static import utils
class BasicApiTransformer(gast.NodeTransformer):
"""
Class to transform basic API from dygraph to static graph.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of BasicApiTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
self.class_node_dict = {}
def transform(self):
to_tensor_transformer = ToTensorTransformer(self.root)
to_tensor_transformer.transform()
self.visit(self.root)
return self.wrapper_root
def visit_Assign(self, node):
if self._update_class_node_dict(node):
return None
for child_node in gast.walk(node.value):
if isinstance(child_node, gast.Call):
self._visit_Call(child_node)
return node
def visit_Expr(self, node):
value_node = node.value
for child_node in gast.walk(value_node):
if isinstance(child_node, gast.Call):
# TODO(liym27):
# Considers that a dygraph api which modifies the input or has a output.
if utils.is_dygraph_api(child_node):
return
else:
self._visit_Call(child_node)
return node
def _visit_Call(self, node):
assert isinstance(node, gast.Call)
func_name = astor.to_source(gast.gast_to_ast(node.func))
if self._is_dygraph_forward(func_name):
class_node = self._get_class_node(func_name)
static_node = utils.to_static_ast(node, class_node)
return static_node
else:
return node
def _is_dygraph_forward(self, func_id):
return func_id in self.class_node_dict
def _get_class_node(self, func_id):
return self.class_node_dict[func_id]
def _update_class_node_dict(self, node):
assert isinstance(node, gast.Assign)
node_value = node.value
if isinstance(node_value, gast.Call):
if is_to_variable(node_value):
return False
if utils.is_dygraph_api(node_value):
dygraph_api = node_value.func.attr
if not utils.dygraph_class_to_static_api.get(dygraph_api):
return False
utils.update_args_of_func(node_value, node_value, "__init__")
target_str = astor.to_source(gast.gast_to_ast(node.targets[0]))
self.class_node_dict[target_str] = node_value
return True
# TODO: node.value is not dygraph class
return False
class ToTensorTransformer(gast.NodeTransformer):
"""
Class to transform paddle.to_tensor and paddle.to_variable to paddle.assign
"""
def __init__(self, node):
assert isinstance(
node, gast.AST
), "Input non-gast.AST node for the initialization of ToTensorTransformer."
self.root = node
def transform(self):
self.visit(self.root)
return self.root
def visit_Call(self, node):
assert isinstance(node, gast.Call)
if is_to_variable(node):
node = to_assign_node(node)
self.generic_visit(node)
return node
def is_to_variable(node):
assert isinstance(node, gast.Call)
api_name = utils.ast_to_source_code(node.func).strip()
if utils.is_dygraph_api(node):
return api_name.endswith("to_variable")
if utils.is_paddle_api(node):
return api_name.endswith("to_tensor")
return False
def to_assign_node(node):
# Transform dygraph api `fluid.dygraph.to_variable` alias `paddle.to_tensor` to static api `paddle.assign`.
# NOTE:
# 1. Api `to_variable` supports data type {float16, float32, float64, int16, int32, int64, uint8, uint16},
# but api `assign` only supports {float32, float64, int32, int64, bool};
# 2. If the input of api `assign` is numpy.ndarray, its size cannot be greater than 1024 * 1024.
assert isinstance(node, gast.Call)
assign_api = gast.parse('paddle.assign').body[0].value
node.func = assign_api
if node.args:
node.args = [node.args[0]]
node.keywords = []
else:
for idx, kw in enumerate(node.keywords):
if kw.arg == 'value' or kw.arg == 'data':
node.keywords[idx].arg = 'x'
node.keywords = [node.keywords[idx]]
node.args = []
break
return node
|
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import datetime
import os
import pkg_resources
import sphinx.cmd.quickstart as sphinx_quickstart
from sphinx.util.console import bold
from hieroglyph import version
def ask_user(d):
"""Wrap sphinx.cmd.quickstart.ask_user, and add additional questions."""
# Print welcome message
msg = bold('Welcome to the Hieroglyph %s quickstart utility.') % (
version(),
)
print(msg)
msg = """
This will ask questions for creating a Hieroglyph project, and then ask
some basic Sphinx questions.
"""
print(msg)
# set a few defaults that we don't usually care about for Hieroglyph
d.update({
'version': datetime.date.today().strftime('%Y.%m.%d'),
'release': datetime.date.today().strftime('%Y.%m.%d'),
'make_mode': True,
})
if 'project' not in d:
print('''
The presentation title will be included on the title slide.''')
d['project'] = sphinx_quickstart.do_prompt('Presentation title')
if 'author' not in d:
d['author'] = sphinx_quickstart.do_prompt('Author name(s)')
# slide_theme
theme_entrypoints = pkg_resources.iter_entry_points('hieroglyph.theme')
themes = [
t.load()
for t in theme_entrypoints
]
msg = """
Available themes:
"""
for theme in themes:
msg += '\n'.join([
bold(theme['name']),
theme['desc'],
'', '',
])
msg += """Which theme would you like to use?"""
print(msg)
d['slide_theme'] = sphinx_quickstart.do_prompt(
'Slide Theme',
themes[0]['name'],
sphinx_quickstart.choice(
*[t['name'] for t in themes]
),
)
# Ask original questions
print("")
sphinx_quickstart.ask_user(d)
def quickstart(path=None):
templatedir = os.path.join(os.path.dirname(__file__), 'templates')
d = sphinx_quickstart.DEFAULTS.copy()
d['extensions'] = ['hieroglyph']
d.update(dict(("ext_" + ext, False) for ext in sphinx_quickstart.EXTENSIONS))
if path:
d['path'] = path
ask_user(d)
sphinx_quickstart.generate(d, templatedir=templatedir)
def main():
parser = ArgumentParser(
description='Run hieroglyph -q to start a presentation',
)
parser.add_argument('-v', '--version', action='store_true',
help="Print current version of hieroglyph")
parser.add_argument('-q', '--quickstart', action='store_true',
help="Start a hieroglyph project")
parser.add_argument('path', nargs='?', default=None,
help='Output directory for new presentation.')
args = vars(parser.parse_args())
if (args['version']):
print(version())
elif (args['quickstart']):
quickstart(args['path'])
|
#!/usr/bin/env python3
import os
from setuptools import setup
about = {} # type: ignore
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "url_hostname", "__version__.py")) as f:
exec(f.read(), about)
setup(
name=about["__title__"],
description=about["__description__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__author_email__"],
url=about["__url__"],
packages=["url_hostname"],
include_package_data=True,
license=about["__license__"],
zip_safe=False,
keywords="",
)
|
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import time
class Worker(QRunnable):
'''
Worker thread
'''
@pyqtSlot()
def run(self):
'''
Your code goes in this function
'''
print("Thread start")
time.sleep(5)
print("Thread complete")
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.counter = 0
layout = QVBoxLayout()
self.l = QLabel("Start")
b = QPushButton("DANGER!")
b.pressed.connect(self.oh_no)
layout.addWidget(self.l)
layout.addWidget(b)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
self.threadpool = QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
def oh_no(self):
worker = Worker()
self.threadpool.start(worker)
def recurring_timer(self):
self.counter += 1
self.l.setText("Counter: %d" % self.counter)
app = QApplication([])
window = MainWindow()
app.exec_() |
from codewars.decode_morse_adv import decode_bits, decode_morse
def test_simple_messages() -> None:
assert decode_bits('111') == '.'
assert decode_bits('111000111') == '..'
assert decode_bits('111000111000111') == '...'
assert decode_bits('10001') == '. .'
assert decode_bits('111000000000111') == '. .'
assert decode_bits('111110000011111') == '..'
def test_leading_and_trailing_zeros() -> None:
assert decode_bits('01110') == '.'
assert decode_bits('000000011100000') == '.'
def test_example_input() -> None:
bits = (
'110011001100110000001100000011111100110011111100111111'
'000000000000001100111111001111110011111100000011001100'
'1111110000001111110011001100000011'
)
assert decode_bits(bits) == '.... . -.-- .--- ..- -.. .'
assert decode_morse('.... . -.-- .--- ..- -.. .') == 'HEY JUDE'
|
from django.contrib import admin
from profiles_api import models
## Models need to be defined to be later used for Admin.
admin.site.register(models.UserProfile)
admin.site.register(models.ProfileFeedItem)
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
UserRegisterAPIView, UserLoginAPIView, UserLogoutAPIView,
PasswordChangeAPIView, DepartamentViewSet, UserViewSet)
app_name = 'accounts'
router = DefaultRouter()
router.register(r'departaments', DepartamentViewSet)
router.register(r'workers', UserViewSet)
urlpatterns = [
path('workers/change-password/', PasswordChangeAPIView.as_view(), name='password_change'),
path('', include(router.urls)),
path('login/', UserLoginAPIView.as_view(), name='login'),
path('logout/', UserLogoutAPIView.as_view(), name='logout'),
path('register/', UserRegisterAPIView.as_view(), name='register'),
]
|
import twitter
import mysql.connector
import json
import os
def collect_data_about(name, twitter_api, cursor):
MAX_ENTRY_PER_PERSON = 1000
print(f'Quering twitter API for: {name}')
search_results = twitter_api.search.tweets(q=name, count=100, lang='en')
processStatusCounter = 0
while True:
try:
statuses = search_results['statuses']
except KeyError:
return
for status in statuses:
tweet = status['text']
username = status['user']['screen_name']
created_at = status['created_at']
retweet_count = status['retweet_count']
country = status['user']['location']
tweet_id = status['id_str']
query = '''
INSERT INTO tweets (character_id, `name`, tweet, tweet_id, username, created_at, retweet_count, country)
VALUES (
(SELECT id from characters WHERE %s = hero OR %s = alterego OR %s = actor),
%s, %s, %s, %s, %s, %s, %s
)
'''
cursor.execute(query, (name, name, name, name, tweet, tweet_id, username, created_at, retweet_count, country))
processStatusCounter += 1
if processStatusCounter >= MAX_ENTRY_PER_PERSON:
print(f'Received tweets for {name}: {processStatusCounter}')
return
try:
next_results = search_results['search_metadata']['next_results']
except KeyError:
print(f'Received tweets for {name}: {processStatusCounter}')
return
kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&") ])
search_results = twitter_api.search.tweets(**kwargs)
def init_db(cursor):
characters = [
{
'hero': 'Iron Man',
'alterego': 'Tony Stark',
'actor': 'Robert Downey Jr.'
},
{
'hero': 'Hulk',
'alterego': 'Bruce Banner',
'actor': 'Mark Ruffalo'
},
{
'hero': 'Spider-Man',
'alterego': 'Peter Parker',
'actor': 'Tom Holland'
},
{
'hero': 'Thor',
'actor': 'Tom Hemsworth'
},
{
'hero': 'Loki',
'actor': 'Tom Hiddleston'
},
{
'hero': 'Captain America',
'alterego': 'Steve Rogers',
'actor': 'Chris Evans'
},
{
'hero': 'Deadpool',
'alterego': 'Wade Wilson',
'actor': 'Ryan Reynolds'
},
{
'hero': 'Winter Soldier',
'alterego': 'Bucky Barnes',
'actor': 'Sebastian Stan'
},
{
'hero': 'Doctor Strange',
'alterego': 'Doctor Strange',
'actor': 'Benedict Cumberbatch'
},
{
'hero': 'Black Panther',
'alterego': "T'Challa",
'actor': 'Chadwick Boseman'
},
{
'hero': 'Hawkeye',
'alterego': 'Clint Barton',
'actor': 'Jeremy Renner'
},
{
'hero': 'Captain Marvel',
'alterego': 'Carol Danvers',
'actor': 'Brie Larson'
},
{
'hero': 'Vision',
'alterego': 'Jarvis',
'actor': 'Paul Bettany'
},
{
'hero': 'Ant-Man',
'alterego': 'Scott Lang',
'actor': 'Paul Rudd'
},
{
'hero': 'Thanos',
'actor': 'Josh Brolin'
},
{
'hero': 'Star Lord',
'alterego': 'Peter Quill',
'actor': 'Chris Pratt'
},
{
'hero': 'Groot',
'actor': 'Vin Diesel'
},
{
'hero': 'Rocket Raccoon',
'actor': 'Bradley Cooper'
},
{
'hero': 'Gamora',
'actor': 'Zoe Saldana'
},
{
'hero': 'Nebula',
'actor': 'Karen Gillan'
}
]
cursor.execute('DROP TABLE IF EXISTS tweets')
cursor.execute('DROP TABLE IF EXISTS characters')
cursor.execute('''
CREATE TABLE characters (
id INT AUTO_INCREMENT PRIMARY KEY,
hero VARCHAR(255),
alterego VARCHAR(255),
actor VARCHAR(255)
)
''')
for character in characters:
query = 'INSERT INTO characters (hero, alterego, actor) VALUES (%s, %s, %s)'
cursor.execute(query, (character.get('hero', ''), character.get('alterego', ''), character.get('actor', '')))
cursor.execute('''
CREATE TABLE tweets (
id INT AUTO_INCREMENT PRIMARY KEY,
character_id INT NOT NULL,
name VARCHAR(255),
tweet VARCHAR(255),
tweet_id VARCHAR(255),
username VARCHAR(255),
created_at VARCHAR(255),
retweet_count INT,
country VARCHAR(255) CHARACTER SET utf8mb4,
FOREIGN KEY (character_id) REFERENCES characters(id)
)
''')
def fill_db(twitter_api, cursor):
cursor.execute('SELECT hero, alterego, actor FROM characters')
rows = cursor.fetchall()
for row in rows:
for col in row:
if col != '':
collect_data_about(col, twitter_api, cursor)
def get_stats(cursor):
stats = {}
cursor.execute('''
SELECT characters.hero AS hero, COUNT(DISTINCT tweets.tweet_id)
AS tweet_count FROM characters
INNER JOIN tweets ON characters.id = tweets.character_id
GROUP BY hero
''')
rows = cursor.fetchall()
for row in rows:
stats[row[0]] = row[1]
return stats
if __name__ == "__main__":
marvelDB = mysql.connector.connect(
host = "localhost",
user = "marietamarvel",
passwd = "4516",
database = "marvelDB"
)
cursor = marvelDB.cursor()
CONSUMER_KEY = 'aaaaaaaaaa'
CONSUMER_SECRET = 'bbbbbbbbbbbbb'
OAUTH_TOKEN = 'ccccccccccccccc'
OAUTH_TOKEN_SECRET = 'dddddddddddddd'
twitter_auth = twitter.oauth.OAuth(
OAUTH_TOKEN,
OAUTH_TOKEN_SECRET,
CONSUMER_KEY,
CONSUMER_SECRET
)
twitter_api = twitter.Twitter(auth = twitter_auth)
init_db(cursor)
marvelDB.commit()
fill_db(twitter_api, cursor)
marvelDB.commit()
stats = get_stats(cursor)
print(f'Total unique tweets for each hero (and their alter ego or actor) in last week:')
for name in stats:
print(f'{name}: {stats[name]}')
|
from argparse import ArgumentParser
class ModelTrainingArgs(ArgumentParser):
def __init__(self):
super().__init__()
self.add_argument("-d", "--dataset", help="Dataset name")
self.add_argument("-c", "--config", help="Config file path")
self.add_argument(
"--debug", action="store_true", help="Enable debugging information."
)
self.add_argument(
"-p", "--pretrained-model", help="Path to the pretrained model."
)
|
from sklearn import tree
from dataset import TransDataset
import numpy as np
trans_dataset = TransDataset()
X, y = trans_dataset.get_x_y()
print(np.array(X).shape)
print(np.array(y).shape)
regressor = tree.DecisionTreeRegressor()
regressor = regressor.fit(X, y)
test_dataset = TransDataset("test")
test_x, test_y = test_dataset.get_x_y()
total_count, count, total_loss = 0, 0, 0
for x, y in zip(test_x, test_y):
loss = abs(regressor.predict([x]) - y[0])
total_loss += loss
total_count += 1
if loss > 50:
count += 1
print("total count", total_count)
print("cross the boundary count", count)
print("cross rate", count/total_count)
print("average lss", total_loss/total_count)
|
URL = "https://www.omdbapi.com/"
|
Subsets and Splits