input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>galv/server
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import numpy as np
import tensorrt as trt
import test_util as tu
def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None
def np_to_trt_dtype(np_dtype):
if np_dtype == bool:
return trt.bool
elif np_dtype == np.int8:
return trt.int8
elif np_dtype == np.int32:
return trt.int32
elif np_dtype == np.float16:
return trt.float16
elif np_dtype == np.float32:
return trt.float32
return None
def trt_format_to_string(trt_format):
# FIXME uncomment the following formats once TRT used is up-to-date
# if trt_format == trt.TensorFormat.CDHW32:
# return "CDHW32"
# if trt_format == trt.TensorFormat.DHWC8:
# return "DHWC8"
# if trt_format == trt.TensorFormat.HWC:
# return "HWC"
if trt_format == trt.TensorFormat.CHW2:
return "CHW2"
if trt_format == trt.TensorFormat.CHW32:
return "CHW32"
if trt_format == trt.TensorFormat.LINEAR:
return "LINEAR"
if trt_format == trt.TensorFormat.CHW4:
return "CHW4"
if trt_format == trt.TensorFormat.HWC8:
return "HWC8"
if trt_format == trt.TensorFormat.CHW16:
return "CHW16"
return "INVALID"
def create_plan_dynamic_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
input_memory_format,
output_memory_format,
min_dim=1,
max_dim=64):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
trt_input_memory_format = input_memory_format
trt_output_memory_format = output_memory_format
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
if max_batch == 0:
input_with_batchsize = [i for i in input_shape]
else:
input_with_batchsize = [-1] + [i for i in input_shape]
in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize)
in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = network.add_identity(add.get_output(0))
out1 = network.add_identity(sub.get_output(0))
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
out0.get_output(0).dtype = trt_output0_dtype
out1.get_output(0).dtype = trt_output1_dtype
in0.allowed_formats = 1 << int(trt_input_memory_format)
in1.allowed_formats = 1 << int(trt_input_memory_format)
out0.get_output(0).allowed_formats = 1 << int(trt_output_memory_format)
out1.get_output(0).allowed_formats = 1 << int(trt_output_memory_format)
if (trt_input_dtype == trt.int8):
in0.dynamic_range = (-128.0, 127.0)
in1.dynamic_range = (-128.0, 127.0)
if (trt_output0_dtype == trt.int8):
out0.get_output(0).dynamic_range = (-128.0, 127.0)
if (trt_output1_dtype == trt.int8):
out1.get_output(0).dynamic_range = (-128.0, 127.0)
min_shape = []
opt_shape = []
max_shape = []
if max_batch != 0:
min_shape = min_shape + [1]
opt_shape = opt_shape + [max(1, max_batch)]
max_shape = max_shape + [max(1, max_batch)]
for i in input_shape:
if i == -1:
min_shape = min_shape + [min_dim]
opt_shape = opt_shape + [int((max_dim + min_dim) / 2)]
max_shape = max_shape + [max_dim]
else:
min_shape = min_shape + [i]
opt_shape = opt_shape + [i]
max_shape = max_shape + [i]
profile = builder.create_optimization_profile()
profile.set_shape("INPUT0", min_shape, opt_shape, max_shape)
profile.set_shape("INPUT1", min_shape, opt_shape, max_shape)
flags = 1 << int(trt.BuilderFlag.STRICT_TYPES)
datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype])
for dt in datatype_set:
if (dt == trt.int8):
flags |= 1 << int(trt.BuilderFlag.INT8)
elif (dt == trt.float16):
flags |= 1 << int(trt.BuilderFlag.FP16)
config = builder.create_builder_config()
config.flags = flags
config.add_optimization_profile(profile)
config.max_workspace_size = 1 << 20
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
# Use a different model name for different kinds of models
base_name = "plan_nobatch" if max_batch == 0 else "plan"
base_name += "_" + trt_format_to_string(
input_memory_format) + "_" + trt_format_to_string(output_memory_format)
model_name = tu.get_model_name(base_name, input_dtype, output0_dtype,
output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_fixed_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
input_memory_format, output_memory_format):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
trt_input_memory_format = input_memory_format
trt_output_memory_format = output_memory_format
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network()
in0 = network.add_input("INPUT0", trt_input_dtype, input_shape)
in1 = network.add_input("INPUT1", trt_input_dtype, input_shape)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = network.add_identity(add.get_output(0))
out1 = network.add_identity(sub.get_output(0))
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
out0.get_output(0).dtype = trt_output0_dtype
out1.get_output(0).dtype = trt_output1_dtype
in0.allowed_formats = 1 << int(trt_input_memory_format)
in1.allowed_formats = 1 << int(trt_input_memory_format)
out0.get_output(0).allowed_formats = 1 << int(trt_output_memory_format)
out1.get_output(0).allowed_formats = 1 << int(trt_output_memory_format)
if (trt_input_dtype == trt.int8):
in0.dynamic_range = (-128.0, 127.0)
in1.dynamic_range = (-128.0, 127.0)
if (trt_output0_dtype == trt.int8):
out0.get_output(0).dynamic_range = (-128.0, 127.0)
if (trt_output1_dtype == trt.int8):
out1.get_output(0).dynamic_range = (-128.0, 127.0)
flags = 1 << int(trt.BuilderFlag.STRICT_TYPES)
datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype])
for dt in datatype_set:
if (dt == trt.int8):
flags |= 1 << int(trt.BuilderFlag.INT8)
elif (dt == trt.float16):
flags |= 1 << int(trt.BuilderFlag.FP16)
config = builder.create_builder_config()
config.flags = flags
config.max_workspace_size = 1 << 20
builder.max_batch_size = max(1, max_batch)
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
base_name = "plan_nobatch" if max_batch == 0 else "plan"
base_name += "_" + trt_format_to_string(
input_memory_format) + "_" + trt_format_to_string(output_memory_format)
model_name = tu.get_model_name(base_name, input_dtype, output0_dtype,
output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_modelconfig(models_dir, max_batch, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, input_memory_format,
output_memory_format, version_policy):
if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for different kinds of models
base_name = "plan_nobatch" if max_batch == 0 else "plan"
base_name += "_" + trt_format_to_string(
input_memory_format) + "_" + trt_format_to_string(output_memory_format)
model_name = tu.get_model_name(base_name, input_dtype, output0_dtype,
output1_dtype)
config_dir = models_dir + "/" + model_name
if -1 in input_shape:
profile_index = 0
config = '''
name: "{}"
platform: "tensorrt_plan"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
instance_group [
{{
profile:"{}"
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape), profile_index)
else:
config = '''
name: "{}"
platform: "tensorrt_plan"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
| |
<reponame>Severian-desu-ga/SDG<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Author: <NAME>
import random, os, glob, time, sys
import pygame
pygame.init()
pygame.font.init()
pygame.mixer.init()
HEIGHT = 650
WIDTH = 1200
radius = 10
running = False
paused = False
rt_change = False
x = 10
y = 10
maze = []
streamers_x = []
streamers_y = []
skill = 8
runs = 0
current = 0
volume = 0.15
cursor = 0
blank = False
silent = False
schoice = 0
cx1 = 35
cx2 = 20
cy1 = 110
cy2 = 95
FONT1 = pygame.font.SysFont("comicsansms", 35)
FONT2 = pygame.font.SysFont("arial", 25)
FONT3 = pygame.font.SysFont("timesnewroman", 30)
try:
folder = os.path.abspath(os.path.join(__file__, "../"))
except:
folder = os.path.join(os.path.dirname(sys.argv[0]), "")
gsfold = os.path.join(folder, 'Game_Stats/')
win = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Roll Along!")
def get_platform():
platforms = {
'linux' : 'Linux',
'linux1' : 'Linux',
'linux2' : 'Linux',
'darwin' : 'OS X',
'win32' : 'Windows',
'mysys' : 'Windows/MSYS2',
'cygwin' : 'Windows/Cygwin',
'os2' : 'OS/2',
'os2emx' : 'OS/2 EMX',
'riscos' : 'RiscOS',
'atheos' : 'AtheOS',
'freebsd7' : 'FreeBSD 7',
'freebsd8' : 'FreeBSD 8',
'freebsdN' : 'FreeBSD N',
'openbsd6' : 'OpenBSD 6'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
platform = get_platform()
if (platform=='Linux') or (platform=='OS X'):
rm_music = True
import webbrowser, subprocess
from subprocess import PIPE
try:
import psutil
except:
print("Make sure python3-dev is installed with 'sudo apt-get install python3-dev'. If it has been, then psutil will now install automatically")
try:
subprocess.call([sys.executable, "-m", "pip3", "install", "--user", 'psutil'])
import psutil
except:
subprocess.call([sys.executable, "-m", "pip", "install", "--user", 'psutil'])
import psutil
if (platform=='Linux'):
try:
import gi
gi.require_version('Wnck','3.0')
from gi.repository import Wnck
except:
print("Please install software using 'sudo apt-get install python3-gi gir1.2-wnck-3.0' or other install method for these packages")
else:
print("Manually minimize default music player if opened with f-key")
else:
rm_music = False
def alt_player(p,wait,songs,loaded,load):
tic = time.time()
load_new = p.io_counters()
if load_new[1]>=(load+2):
r = 1
else:
r = 0
load = load_new[1]
loaded.append(r)
if (len(loaded)>10):
loaded.pop(0)
if (sum(loaded)<1):
songs = random.sample(songs,len(songs))
try:
p.terminate()
except:
pass
try:
fl_type = subprocess.run(["xdg-mime", "query", "filetype", str(songs[0])], stdout=PIPE, stderr=PIPE, universal_newlines=True)
fl_type = (fl_type.stdout).split('\n')
default = subprocess.run(["xdg-mime", "query", "default", str(fl_type[0])], stdout=PIPE, stderr=PIPE, universal_newlines=True)
default = (default.stdout).split(".")
except:
pass
for i in range(len(songs)):
webbrowser.open(str(songs[i]))
finder = []
loaded.append(1)
loaded.append(1)
time.sleep(1.0)
for proc in psutil.process_iter(['pid', 'name']):
finder.append(proc.info)
open_ls = finder[-(len(songs)-1):]
try:
screen=Wnck.Screen.get_default()
screen.force_update()
windows=screen.get_windows()
for w in windows:
if str(w.get_name()).endswith(('.mp3', 'wav', 'ogg')):
pid = w.get_pid()
w.minimize()
p = psutil.Process(pid)
except:
for i in range(len(open_ls)):
d = open_ls[i]
p = psutil.Process(d['pid'])
usage = p.cpu_percent(interval=0.01)
try:
if (usage>=1) & (str(d['name']).lower()==str(default[-2]).lower()):
break
elif (str(d['name']).lower()==str(default[-2]).lower()):
print("If " + str(default[-2]) + " is open in another instance, player may not have been linked to game")
break
elif usage>=2:
print("Your default player's PID may not have been found")
print("This is the PID found: ", d['name'])
break
except:
if usage>=2:
print("Issue identifying default player for audio file type")
break
toc = time.time()
rest = toc-tic
wait_new = int(wait - rest*2000)
if wait_new<0:
wait_new = 0
return p,wait_new,loaded,load,songs
def speed_test():
if not os.path.isdir(gsfold):
os.makedirs(gsfold)
if os.path.isfile(gsfold + 'Speed.txt'):
speed = open(gsfold + "Speed.txt","r")
else:
speed = open(gsfold + "Speed.txt","w+")
tic = time.time()
test1 = []
for i in range(5000000):
test1.append('x')
toc = time.time()
guage = toc-tic
speed.write(str(guage))
latency = speed.read()
try:
wait = int(-10*float(latency)+35)
except:
latency = guage
wait = int(-10*float(latency)+35)
return wait
wait = speed_test()
wait_new = wait
def background():
global bground
bground = []
bfold = os.path.join(folder, 'Background/')
if not os.path.isdir(bfold):
os.makedirs(bfold)
extensions = [ "jpg", "jpeg", "png", "bmp", "gif" ]
try:
for extension in extensions:
for backg in glob.glob(bfold + "*." + extension):
bground.append(backg)
back = random.randint(0,len(bground)-1)
bg = pygame.image.load(str(bground[back]))
bg = pygame.transform.scale(bg, (WIDTH, HEIGHT))
return bg
except:
pass
try:
bg = background()
if bg == None:
wait = int(wait*1.4 + 4.8)
except:
wait = int(wait*1.4 + 4.8)
def music(schoice):
global songs
if (schoice == 0) or (schoice==len(songs)):
songs = []
mfold = os.path.join(folder, 'Music/')
if not os.path.isdir(mfold):
os.makedirs(mfold)
extensions = [ "mp3", "wav", "ogg"]
for extension in extensions:
for tune in glob.glob(mfold + "*." + extension):
songs.append(tune)
songs = random.sample(songs,len(songs))
pygame.mixer.music.load(str(songs[schoice]))
pygame.mixer.music.play()
pygame.mixer.music.set_volume(volume)
try:
music(schoice)
except:
pass
def HScore():
if skill==6:
file = 'HScore_E.txt'
elif skill==8:
file = 'HScore_M.txt'
else:
file = 'HScore_D.txt'
if os.path.isfile(gsfold + file):
Hi_score = open(gsfold + file,"r")
current = Hi_score.read()
Hi_score.close()
else:
Hi_score = open(gsfold + file,"w+")
Hi_score.write(str(runs))
current = str(runs)
Hi_score.close()
return current, file
current, file = HScore()
def HFile(current, skill):
if skill==6:
file = 'HScore_E.txt'
elif skill==8:
file = 'HScore_M.txt'
else:
file = 'HScore_D.txt'
if runs>=int(current):
Hi_score = open(gsfold + file,"w+")
Hi_score.write(str(runs))
current = str(runs)
Hi_score.close()
return current
def Start_Maze():
for rows in range(random.randint(20,30)):
t = (random.randint(20,WIDTH-30)) #position of columns
n = (random.randint(10,HEIGHT-10)) #center of column postions
v = random.randint(20,150) #size of columns
for stacks in range(25):
maze.append(t)
maze.append(random.randint(n-v,n+v))
def Draw_Maze():
for i in range(len(maze)-1):
if (i % 2) == 0:
pygame.draw.rect(win, (80,30,30), (maze[i], maze[i+1], radius, radius))
def Draw_circle(x,y):
pygame.draw.circle(win, (255,0,0), (int(x), int(y)), radius)
def move(items):
for item in items:
item[0] += item[2]
item[1] += item[3]
def removeUseless_x(items):
for item in items:
if item[1] > HEIGHT:
items.remove(item)
def removeUseless_y(items):
for item in items:
if item[0] < 25:
items.remove(item)
def Draw_streamers():
num_s = 1
xvals = set()
yvals = set()
ticker = random.randint(4,skill)
attack = random.randint(0,3)
if (ticker>=(random.randint(5,10))) & (attack>0):
while len(xvals) < num_s:
pos = random.randint(40, WIDTH-15)
xvals.add(pos)
DY = random.randint(6,11)
for val in xvals:
streamers_x.append([val,0,0,DY])
for item in streamers_x:
pygame.draw.circle(win, (50, 30, 150),(item[0], item[1]), 4)
if (ticker>=(random.randint(5,10))) & (attack==0):
while len(yvals) < num_s:
pos = random.randint(10, HEIGHT)
yvals.add(pos)
DX = random.randint(6,11)
for val in yvals:
streamers_y.append([WIDTH,val,-DX,0])
for item in streamers_y:
pygame.draw.circle(win, (50, 30, 150),(item[0], item[1]), 4)
move(streamers_x)
move(streamers_y)
removeUseless_x(streamers_x)
removeUseless_y(streamers_y)
def Lose():
for itemx in streamers_x:
s = abs(x-itemx[0])
t = abs(y-itemx[1])
if (s<=13) & (t<=13):
running = False
return running
for itemy in streamers_y:
s = abs(x-itemy[0])
t = abs(y-itemy[1])
if (s<=13) & (t<=13):
running = False
return running
else:
running = True
return running
def winning():
pygame.draw.rect(win, (0, 128, 0), (WIDTH-40 , 12, 3.5*radius, 2.5*radius),1)
nr_wins = FONT2.render(str(runs), True, (0, 128, 0))
if runs<10:
win.blit(nr_wins, (WIDTH-22 , 10))
elif runs<100:
win.blit(nr_wins, (WIDTH-30 , 10))
else:
win.blit(nr_wins, (WIDTH-40 , 10))
def redrawGameWindow():
try:
if not blank:
win.blit(bg, [0, 0])
else:
win.fill((0,0,0))
except:
win.fill((0,0,0))
Draw_circle(x,y)
Draw_Maze()
Draw_streamers()
winning()
pygame.display.flip()
run = True
while run:
if running:
for event in pygame.event.get():
pass
if event.type == pygame.QUIT:
try:
p.terminate()
except:
pass
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE):
paused = not paused
while paused:
try:
try:
pygame.mixer.music.pause()
except:
p.suspend()
except:
pass
pygame.time.delay(300)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE):
try:
try:
pygame.mixer.music.unpause()
except:
p.resume()
except:
pass
paused = not paused
if (event.key == pygame.K_w) or (event.key == pygame.K_UP) & (y>=15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs(x-maze[i])
t = abs((y-14)-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
y = y - 14
if (event.key == pygame.K_s) or (event.key == pygame.K_DOWN) & (y<=HEIGHT-15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs(x-maze[i])
t = abs((y+14)-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
y = y + 14
if (event.key == pygame.K_a) or (event.key == pygame.K_LEFT) & (x>=15):
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs((x-14)-maze[i])
t = abs(y-maze[i+1])
if (s<=10) & (t<=10):
yes = False
break
if yes:
x = x - 14
if (event.key == pygame.K_d) or (event.key == pygame.K_RIGHT):
if x>=(WIDTH-7):
x = 10
y = 10
maze = []
streamers_x = []
streamers_y = []
runs += 1
pygame.time.delay(200)
Start_Maze()
current = HFile(current, skill)
yes = True
for i in range(len(maze)-1):
if (i % 2) == 0:
s = abs((x+14)-maze[i])
t = abs(y-maze[i+1])
if (s<=10) | |
<filename>src/actuator/config_tasks.py
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Configuration tasks modeled after Ansible modules
'''
from actuator.config import _ConfigTask, ConfigException
from actuator.exec_agents.core import ExecutionException
class PingTask(_ConfigTask):
"""
Checks to see if a remote machine is alive by ssh'ing into it.
"""
pass
class ScriptTask(_ConfigTask):
"""
Transfers a script *as is* to the remote host and executes it. The script
is run in a shell environment. This task will process Var replacement
patterns out of the arguments, but does not touch the contents of the
script.
See http://docs.ansible.com/script_module.html for full details.
"""
def __init__(self, name, free_form, creates=None, removes=None, **kwargs):
"""
@param name: logical name for the task
@param free_form: A string with the path to the locally available
script followed by optional arguments to the script. This may
contain Var replacement patterns that will be processed through
the Vars for the task_role.
@keyword creates: String; the name of a file on the remote system that
the script will create. If already present, the script will not
be run. If not supplied no test for a file to be created will be
done.
@keyword removes: String; the name of a file on the remote system that
the script will remove. If it isn't there, then the script will
not be run. If not supplied then no removal test will be performed.
@keyword **kwargs: the other available keyword arguments for
L{_ConfigTask}
"""
super(ScriptTask, self).__init__(name, **kwargs)
self.free_form = None
self._free_form = free_form
self.creates = None
self._creates = creates
self.removes = None
self._removes = removes
def get_init_args(self):
__doc__ = _ConfigTask.get_init_args.__doc__
args, kwargs = super(ScriptTask, self).get_init_args()
args = args + (self._free_form,)
kwargs["creates"] = self._creates
kwargs["removes"] = self._removes
return args, kwargs
def _fix_arguments(self):
super(ScriptTask, self)._fix_arguments()
self.free_form = self._get_arg_value(self._free_form)
self.creates = self._get_arg_value(self._creates)
self.removes = self._get_arg_value(self._removes)
class CommandTask(ScriptTask):
"""
Runs a command on the remote system. Nothing is transferred to the remote
system; the command is expected to exist already.
Arguments besides the name can contain Var replacement patterns; these
will be processed through the task_role's view of its Vars in the
namespace.
If your commmand needs to use shell metacharacters, use L{ShellTask}
instead.
See http://docs.ansible.com/command_module.html for full details.
"""
def __init__(self, name, free_form, chdir=None, creates=None,
executable=None, removes=None, warn=None, **kwargs):
"""
@param name: logical name for the task
@param free_form: A string containing the remote command to run, along
with any arguments the command needs
@keyword chdir: Directory path to cd to before running the command.
@keyword executable: Full path to an alternative shell to run the
in
@keyword warn: whether or not to warn about this specific command
@keyword creates: String; the name of a file on the remote system that
the script will create. If already present, the script will not
be run. If not supplied no test for a file to be created will be
done.
@keyword removes: String; the name of a file on the remote system that
the script will remove. If it isn't there, then the script will
not be run. If not supplied then no removal test will be performed.
@keyword **kwargs: the other available keyword arguments for
L{_ConfigTask}
"""
super(CommandTask, self).__init__(name, free_form, creates=creates,
removes=removes, **kwargs)
self.chdir = None
self._chdir = chdir
self.executable = None
self._executable = executable
self.warn = None
self._warn = warn
def get_init_args(self):
args, kwargs = super(CommandTask, self).get_init_args()
kwargs["chdir"] = self._chdir
kwargs["executable"] = self._executable
kwargs["warn"] = self._warn
return args, kwargs
def _fix_arguments(self):
super(CommandTask, self)._fix_arguments()
self.chdir = self._get_arg_value(self._chdir)
self.executable = self._get_arg_value(self._executable)
self.warn = self._get_arg_value(self._warn)
class ShellTask(CommandTask):
"""
Almost the same as the L{CommandTask}, except that the task is run within
a shell, and so shell meta-characters (rediction, etc) can be used.
The arguments for ShellTask are the same as those for L{CommandTask}.
See http://docs.ansible.com/shell_module.html for full details
"""
pass
class CopyFileTask(_ConfigTask):
"""
Copies a file from the local system to the remote system.
The file is copied without impacting its contents. If you want to modify
a file using Var replacement patterns and for the Vars in the task_role's
namespace, use L{ProcessCopyFileTask} instead.
Copy can work on a single file or a directory hierachy of files.
For full details see http://docs.ansible.com/copy_module.html
"""
def __init__(self, name, dest, backup=False, content=None,
directory_mode=None, follow=False, force=True, group=None,
mode=None, owner=None, selevel="s0", serole=None, setype=None,
seuser=None, src=None, validate=None,
**kwargs):
"""
@param name: logical name for the task
@param dest: The full path of where to copy the file. If src is a
directory this must be a directory as well
@keyword backup: boolean; if True, create a backup of any existing
file with the same name
@keyword content: Content of the file to copy to the remote. If this is
used instead of src, then dest must be the path to a file
@keyword directory_mode: If the copy is recursive, set the directories
to this mode, but only if the directory doesn't already exist.
@keyword follow: boolean; flag to indicate that if there are filesystem
links, they should be followed. Default no.
@keyword force: boolean; default is True. If True, replace the remote
file if it already exists. If False, do not replace.
@keyword group: name of the group that should own the file/directory,
as would be given to chown
@keyword mode: string mode that the file/directory should be. Symbolic
modes are supported.
@keyword owner: name of the user who should own the file/directory,
as will be supplied to chown
@keyword selevel: Default is 's0'. Level part of the SELinux file
context. This is the MLS/MCS attribute, sometimes known as the
range. _default feature works as for seuser.
@keyword serole: role part of SELinux file context, _default feature
works as for seuser.
@keyword setype: type part of SELinux file context, _default feature
works as for seuser.
@keyword seuser: user part of SELinux file context. Will default to
system policy, if applicable. If set to _default, it will use the
user portion of the policy if available
@keyword src: Local path to copy to the remote server; may be absolute
or relative. If the path ends in a directory, the directory will
be copied recursively. In this case, if the path ends in '/', only
the directory content will be copied recursively. If there is no
'/' on the end, then the directory and its contents are copied
recursively.
@keyword validate: The validation command to run before copying into
place. The path to the file to validate is passed in via '%s' which
must be present as in the visudo example below (or, if in a Var,
it can be represented with a replacement pattern). The command is
passed securely so shell features like expansion and pipes won't
work.
@keyword **kwargs: the other available keyword arguments for
L{_ConfigTask}
"""
super(CopyFileTask, self).__init__(name, **kwargs)
if content is None and src is None:
raise ConfigException("Either 'content' or 'src' must be provided")
self.dest = None
| |
<reponame>medismailben/llvm-project
#!/usr/bin/env python
"""A tool for extracting a list of symbols to export
When exporting symbols from a dll or exe we either need to mark the symbols in
the source code as __declspec(dllexport) or supply a list of symbols to the
linker. This program automates the latter by inspecting the symbol tables of a
list of link inputs and deciding which of those symbols need to be exported.
We can't just export all the defined symbols, as there's a limit of 65535
exported symbols and in clang we go way over that, particularly in a debug
build. Therefore a large part of the work is pruning symbols either which can't
be imported, or which we think are things that have definitions in public header
files (i.e. template instantiations) and we would get defined in the thing
importing these symbols anyway.
"""
from __future__ import print_function
import sys
import re
import os
import subprocess
import multiprocessing
import argparse
# Define functions which extract a list of symbols from a library using several
# different tools. We use subprocess.Popen and yield a symbol at a time instead
# of using subprocess.check_output and returning a list as, especially on
# Windows, waiting for the entire output to be ready can take a significant
# amount of time.
def dumpbin_get_symbols(lib):
process = subprocess.Popen(['dumpbin','/symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^.+SECT.+External\s+\|\s+(\S+).*$", line)
if match:
yield match.group(1)
process.wait()
def nm_get_symbols(lib):
process = subprocess.Popen(['nm','-P',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^(\S+)\s+[BDGRSTVW]\s+\S+\s+\S+$", line)
if match:
yield match.group(1)
process.wait()
def readobj_get_symbols(lib):
process = subprocess.Popen(['llvm-readobj','-symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# When looking through the output of llvm-readobj we expect to see Name,
# Section, then StorageClass, so record Name and Section when we see
# them and decide if this is a defined external symbol when we see
# StorageClass.
match = re.search('Name: (\S+)', line)
if match:
name = match.group(1)
match = re.search('Section: (\S+)', line)
if match:
section = match.group(1)
match = re.search('StorageClass: (\S+)', line)
if match:
storageclass = match.group(1)
if section != 'IMAGE_SYM_ABSOLUTE' and \
section != 'IMAGE_SYM_UNDEFINED' and \
storageclass == 'External':
yield name
process.wait()
# Define functions which determine if the target is 32-bit Windows (as that's
# where calling convention name decoration happens).
def dumpbin_is_32bit_windows(lib):
# dumpbin /headers can output a huge amount of data (>100MB in a debug
# build) so we read only up to the 'machine' line then close the output.
process = subprocess.Popen(['dumpbin','/headers',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
retval = False
for line in process.stdout:
match = re.match('.+machine \((\S+)\)', line)
if match:
retval = (match.group(1) == 'x86')
break
process.stdout.close()
process.wait()
return retval
def objdump_is_32bit_windows(lib):
output = subprocess.check_output(['objdump','-f',lib],
universal_newlines=True)
for line in output:
match = re.match('.+file format (\S+)', line)
if match:
return (match.group(1) == 'pe-i386')
return False
def readobj_is_32bit_windows(lib):
output = subprocess.check_output(['llvm-readobj','-file-headers',lib],
universal_newlines=True)
for line in output:
match = re.match('Format: (\S+)', line)
if match:
return (match.group(1) == 'COFF-i386')
return False
# MSVC mangles names to ?<identifier_mangling>@<type_mangling>. By examining the
# identifier/type mangling we can decide which symbols could possibly be
# required and which we can discard.
def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
# Keep unmangled (i.e. extern "C") names
if not '?' in symbol:
if calling_convention_decoration:
# Remove calling convention decoration from names
match = re.match('[_@]([^@]+)', symbol)
if match:
return match.group(1)
return symbol
# Function template instantiations start with ?$; keep the instantiations of
# clang::Type::getAs, as some of them are explipict specializations that are
# defined in clang's lib/AST/Type.cpp; discard the rest as it's assumed that
# the definition is public
elif re.match('\?\?\$getAs@.+@Type@clang@@', symbol):
return symbol
elif symbol.startswith('??$'):
return None
# Deleting destructors start with ?_G or ?_E and can be discarded because
# link.exe gives you a warning telling you they can't be exported if you
# don't
elif symbol.startswith('??_G') or symbol.startswith('??_E'):
return None
# Constructors (?0) and destructors (?1) of templates (?$) are assumed to be
# defined in headers and not required to be kept
elif symbol.startswith('??0?$') or symbol.startswith('??1?$'):
return None
# An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
# that mentions an anonymous namespace can be discarded, as the anonymous
# namespace doesn't exist outside of that translation unit.
elif re.search('\?A(0x\w+)?@', symbol):
return None
# Keep mangled llvm:: and clang:: function symbols. How we detect these is a
# bit of a mess and imprecise, but that avoids having to completely demangle
# the symbol name. The outermost namespace is at the end of the identifier
# mangling, and the identifier mangling is followed by the type mangling, so
# we look for (llvm|clang)@@ followed by something that looks like a
# function type mangling. To spot a function type we use (this is derived
# from clang/lib/AST/MicrosoftMangle.cpp):
# <function-type> ::= <function-class> <this-cvr-qualifiers>
# <calling-convention> <return-type>
# <argument-list> <throw-spec>
# <function-class> ::= [A-Z]
# <this-cvr-qualifiers> ::= [A-Z0-9_]*
# <calling-convention> ::= [A-JQ]
# <return-type> ::= .+
# <argument-list> ::= X (void)
# ::= .+@ (list of types)
# ::= .*Z (list of types, varargs)
# <throw-spec> ::= exceptions are not allowed
elif re.search('(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$', symbol):
return symbol
return None
# Itanium manglings are of the form _Z<identifier_mangling><type_mangling>. We
# demangle the identifier mangling to identify symbols that can be safely
# discarded.
def should_keep_itanium_symbol(symbol, calling_convention_decoration):
# Start by removing any calling convention decoration (which we expect to
# see on all symbols, even mangled C++ symbols)
if calling_convention_decoration and symbol.startswith('_'):
symbol = symbol[1:]
# Keep unmangled names
if not symbol.startswith('_') and not symbol.startswith('.'):
return symbol
# Discard manglings that aren't nested names
match = re.match('_Z(T[VTIS])?(N.+)', symbol)
if not match:
return None
# Demangle the name. If the name is too complex then we don't need to keep
# it, but it the demangling fails then keep the symbol just in case.
try:
names, _ = parse_itanium_nested_name(match.group(2))
except TooComplexName:
return None
if not names:
return symbol
# Constructors and destructors of templates classes are assumed to be
# defined in headers and not required to be kept
if re.match('[CD][123]', names[-1][0]) and names[-2][1]:
return None
# Keep the instantiations of clang::Type::getAs, as some of them are
# explipict specializations that are defined in clang's lib/AST/Type.cpp;
# discard any other function template instantiations as it's assumed that
# the definition is public
elif symbol.startswith('_ZNK5clang4Type5getAs'):
return symbol
elif names[-1][1]:
return None
# Keep llvm:: and clang:: names
elif names[0][0] == '4llvm' or names[0][0] == '5clang':
return symbol
# Discard everything else
else:
return None
# Certain kinds of complex manglings we assume cannot be part of a public
# interface, and we handle them by raising an exception.
class TooComplexName(Exception):
pass
# Parse an itanium mangled name from the start of a string and return a
# (name, rest of string) pair.
def parse_itanium_name(arg):
# Check for a normal name
match = re.match('(\d+)(.+)', arg)
if match:
n = int(match.group(1))
name = match.group(1)+match.group(2)[:n]
rest = match.group(2)[n:]
return name, rest
# Check for constructor/destructor names
match = re.match('([CD][123])(.+)', arg)
if match:
return match.group(1), match.group(2)
# Assume that a sequence of characters that doesn't end a nesting is an
# operator (this is very imprecise, but appears to be good enough)
match = re.match('([^E]+)(.+)', arg)
if match:
return match.group(1), match.group(2)
# Anything else: we can't handle it
return None, arg
# Parse an itanium mangled template argument list from the start of a string
# and throw it away, returning the rest of the string.
def skip_itanium_template(arg):
# A template argument list starts with I
assert arg.startswith('I'), arg
tmp = arg[1:]
while tmp:
# Check for names
match = re.match('(\d+)(.+)', tmp)
if match:
n = int(match.group(1))
tmp = match.group(2)[n:]
continue
# Check for substitutions
match = re.match('S[A-Z0-9]*_(.+)', tmp)
if match:
tmp = match.group(1)
# Start of a template
elif tmp.startswith('I'):
tmp = skip_itanium_template(tmp)
# Start of a nested name
elif tmp.startswith('N'):
_, tmp = parse_itanium_nested_name(tmp)
# Start of an expression: assume that it's too complicated
elif tmp.startswith('L') | |
the message, 1-4096 characters after entities parsing
- `chat_id` :`Union[int,str,]` Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Required if inline_message_id is not specified. Identifier of the message to edit
- `inline_message_id` :`str` Required if chat_id and message_id are not specified. Identifier of the inline message
- `reply_markup` :`types.InlineKeyboardMarkup` A JSON-serialized object for an inline keyboard.
- `parse_mode` :`str` Mode for parsing entities in the message text. See formatting options for more details.
- `entities` :`list` A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode
- `disable_web_page_preview` :`bool` Disables link previews for links in this message
**Returns:**
- A `tuple`, on success a `None` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
if disable_web_page_preview is None:
disable_web_page_preview = self.default_disable_web_preview
data = {
"chat_id": chat_id,
"message_id": message_id,
"inline_message_id": inline_message_id,
"text": text,
"parse_mode": parse_mode,
"entities": entities,
"disable_web_page_preview": disable_web_page_preview,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("editMessageText", data), None)
def editMessageCaption(self, chat_id: Union[int, str, ] = None, message_id: int = None, caption: str = None, inline_message_id: str = None, reply_markup: types.InlineKeyboardMarkup = None, parse_mode: str = None, caption_entities: list = None):
"""Use this method to edit captions of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. [See Telegram API](https://core.telegram.org/bots/api#editmessagecaption)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Required if inline_message_id is not specified. Identifier of the message to edit
- `caption` :`str` New caption of the message, 0-1024 characters after entities parsing
- `inline_message_id` :`str` Required if chat_id and message_id are not specified. Identifier of the inline message
- `reply_markup` :`types.InlineKeyboardMarkup` A JSON-serialized object for an inline keyboard.
- `parse_mode` :`str` Mode for parsing entities in the message caption. See formatting options for more details.
- `caption_entities` :`list` A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
**Returns:**
- A `tuple`, on success a `None` as first member and a botApiResponse object as second member
"""
if parse_mode is None:
parse_mode = self.default_parse_mode
data = {
"chat_id": chat_id,
"message_id": message_id,
"inline_message_id": inline_message_id,
"caption": caption,
"parse_mode": parse_mode,
"caption_entities": caption_entities,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("editMessageCaption", data), None)
def editMessageMedia(self, media: types.InputMedia, chat_id: Union[int, str, ] = None, message_id: int = None, inline_message_id: str = None, reply_markup: types.InlineKeyboardMarkup = None):
"""Use this method to edit animation, audio, document, photo, or video messages. If a message is part of a message album, then it can be edited only to an audio for audio albums, only to a document for document albums and to a photo or a video otherwise. When an inline message is edited, a new file can't be uploaded; use a previously uploaded file via its file_id or specify a URL. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. [See Telegram API](https://core.telegram.org/bots/api#editmessagemedia)
- - - - -
**Args**:
- `media` :`types.InputMedia` A JSON-serialized object for a new media content of the message
- `chat_id` :`Union[int,str,]` Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Required if inline_message_id is not specified. Identifier of the message to edit
- `inline_message_id` :`str` Required if chat_id and message_id are not specified. Identifier of the inline message
- `reply_markup` :`types.InlineKeyboardMarkup` A JSON-serialized object for a new inline keyboard.
**Returns:**
- A `tuple`, on success a `None` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"message_id": message_id,
"inline_message_id": inline_message_id,
"media": helper.toDict(media, True),
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("editMessageMedia", data), None)
def editMessageReplyMarkup(self, chat_id: Union[int, str, ] = None, message_id: int = None, inline_message_id: str = None, reply_markup: types.InlineKeyboardMarkup = None):
"""Use this method to edit only the reply markup of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. [See Telegram API](https://core.telegram.org/bots/api#editmessagereplymarkup)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Required if inline_message_id is not specified. Identifier of the message to edit
- `inline_message_id` :`str` Required if chat_id and message_id are not specified. Identifier of the inline message
- `reply_markup` :`types.InlineKeyboardMarkup` A JSON-serialized object for an inline keyboard.
**Returns:**
- A `tuple`, on success a `None` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"message_id": message_id,
"inline_message_id": inline_message_id,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("editMessageReplyMarkup", data), None)
def stopPoll(self, chat_id: Union[int, str, ], message_id: int, reply_markup: types.InlineKeyboardMarkup = None):
"""Use this method to stop a poll which was sent by the bot. On success, the stopped Poll is returned. [See Telegram API](https://core.telegram.org/bots/api#stoppoll)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Identifier of the original message with the poll
- `reply_markup` :`types.InlineKeyboardMarkup` A JSON-serialized object for a new message inline keyboard.
**Returns:**
- A `tuple`, on success a `types.Poll` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"message_id": message_id,
"reply_markup": helper.toDict(reply_markup, True),
}
return self.response(self.sendRequest("stopPoll", data), types.Poll)
def deleteMessage(self, chat_id: Union[int, str, ], message_id: int):
"""Use this method to delete a message, including service messages, with the following limitations:- A message can only be deleted if it was sent less than 48 hours ago.- A dice message in a private chat can only be deleted if it was sent more than 24 hours ago.- Bots can delete outgoing messages in private chats, groups, and supergroups.- Bots can delete incoming messages in private chats.- Bots granted can_post_messages permissions can delete outgoing messages in channels.- If the bot is an administrator of a group, it can delete any message there.- If the bot has can_delete_messages permission in a supergroup or a channel, it can delete any message there.Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#deletemessage)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `message_id` :`int` Identifier of the message to delete
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"message_id": message_id,
}
return self.response(self.sendRequest("deleteMessage", data), bool)
def sendSticker(self, chat_id: Union[int, str, ], sticker: Union[types.InputFile, str, ], reply_markup: Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, ] = None, disable_notification: bool = None, reply_to_message_id: int = None, allow_sending_without_reply: bool = None):
"""Use this method to send static .WEBP or animated .TGS stickers. On success, the sent Message is returned. [See Telegram API](https://core.telegram.org/bots/api#sendsticker)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `sticker` :`Union[types.InputFile,str,]` Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .WEBP file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
- `reply_markup` :`Union[types.InlineKeyboardMarkup,types.ReplyKeyboardMarkup,types.ReplyKeyboardRemove,types.ForceReply,]` Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a | |
from contextlib import contextmanager
from importlib import import_module
from time import time
import logging
import os
import pygit2
from gitmodel import conf
from gitmodel import exceptions
from gitmodel import models
from gitmodel import utils
class Workspace(object):
"""
A workspace acts as an encapsulation within which any model work is done.
It is analogous to a git working directory. It also acts as a "porcelain"
layer to pygit2's "plumbing".
In contrast to a working directory, this class does not make use of the
repository's INDEX and HEAD files, and instead keeps track of the these in
memory.
Passing initial_branch will set the default head for the workspace.
"""
def __init__(self, repo_path, initial_branch='refs/heads/master'):
self.config = conf.Config()
# set up a model registry
class ModelRegistry(dict):
"""This class acts like a so-called AttrDict"""
def __init__(self):
self.__dict__ = self
self.models = ModelRegistry()
try:
self.repo = pygit2.Repository(repo_path)
except KeyError:
msg = "Git repository not found at {}".format(repo_path)
raise exceptions.RepositoryNotFound(msg)
self.index = None
# set default head
self.head = initial_branch
# Set branch to head. If it the branch (head commit) doesn't exist, set
# index to a new empty tree.
try:
self.repo.lookup_reference(self.head)
except KeyError:
oid = self.repo.TreeBuilder().write()
self.index = self.repo[oid]
else:
self.update_index(self.head)
# add a base GitModel which can be extended if needed
self.register_model(models.GitModel, 'GitModel')
self.log = logging.getLogger(__name__)
def register_model(self, cls, name=None):
"""
Register a GitModel class with this workspace. A GitModel cannot be
used until it is registered with a workspace. This does not alter the
origingal class, but rather creates a "clone" which is bound to this
workspace. If a model attribute requires special handling for the
cloning, that object should define a "contribute_to_class" method.
Note that when a GitModel with any RelatedFields is registered, its
related models will be automatically registered with the same workspace
if they have not already been registered with a workspace.
"""
if not issubclass(cls, models.GitModel):
raise TypeError("{0!r} is not a GitModel.".format(cls))
if not name:
name = cls.__name__
if self.models.get(name):
return self.models[name]
if hasattr(cls, '_meta'):
if cls._meta.workspace != self:
msg = "{0} is already registered with a different workspace"
raise ValueError(msg.format(cls.__name__))
# class has already been created with _meta, so we just register
# and return it.
self.models[name] = cls
return cls
metaclass = models.DeclarativeMetaclass
attrs = dict(cls.__dict__, **{
'__workspace__': self,
})
if not attrs.get('__module__'):
attrs['__module__'] == __name__
if attrs.get('__dict__'):
del attrs['__dict__']
# the cloned model must subclass the original so as not to break
# type-checking operations
bases = [cls]
# parents must also be registered with the workspace
for base in cls.__bases__:
if issubclass(base, models.GitModel) and \
not hasattr(base, '_meta'):
base = self.models.get(name) or self.register_model(base)
bases.append(base)
# create the new class and attach it to the workspace
new_model = metaclass(name, tuple(bases), attrs)
self.models[name] = new_model
return new_model
def import_models(self, path_or_module):
"""
Register all models declared within a given python module
"""
if isinstance(path_or_module, basestring):
mod = import_module(path_or_module)
else:
mod = path_or_module
for name in dir(mod):
item = getattr(mod, name)
if isinstance(item, type) and \
issubclass(item, models.GitModel):
self.register_model(item, name)
return self.models
def create_blob(self, content):
return self.repo.create_blob(content)
def create_branch(self, name, start_point=None):
"""
Creates a head reference with the given name. The start_point argument
is the head to which the new branch will point -- it may be a branch
name, commit id, or tag name (defaults to current branch).
"""
if not start_point:
start_point = self.head
start_point_ref = self.repo.lookup_reference(start_point)
if start_point_ref.type != pygit2.GIT_OBJ_COMMIT:
raise ValueError('Given reference must point to a commit')
branch_ref = 'refs/heads/{}'.format(name)
self.repo.create_reference(branch_ref, start_point_ref.target)
def get_branch(self, ref_name):
return Branch(self.repo, ref_name)
def set_branch(self, name):
"""
Sets the current head ref to the given branch name
"""
# make sure the branch is a valid head ref
ref = 'refs/heads/{}'.format(name)
self.repo.lookup_reference(ref)
self.update_index(ref)
@property
def branch(self):
try:
return self.get_branch(self.head)
except exceptions.RepositoryError:
return None
def update_index(self, treeish):
"""Sets the index to the current branch or to the given treeish"""
# Don't change the index if there are pending changes.
if self.index and self.has_changes():
msg = "Cannot checkout a different branch with pending changes"
raise exceptions.RepositoryError(msg)
tree = utils.treeish_to_tree(self.repo, treeish)
if treeish.startswith('refs/heads'):
# if treeish is a head ref, update head
self.head = treeish
else:
# otherwise, we're in "detached head" mode
self.head = None
self.index = tree
def add(self, path, entries):
"""
Updates the current index given a path and a list of entries
"""
oid = utils.path.build_path(self.repo, path, entries, self.index)
self.index = self.repo[oid]
def remove(self, path):
"""
Removes an item from the index
"""
parent, name = os.path.split(path)
parent_tree = parent and self.index[parent] or self.index
tb = self.repo.TreeBuilder(parent_tree.oid)
tb.remove(name)
oid = tb.write()
if parent:
path, parent_name = os.path.split(parent)
entry = (parent_name, oid, pygit2.GIT_FILEMODE_TREE)
oid = utils.path.build_path(self.repo, path, [entry], self.index)
self.index = self.repo[oid]
def add_blob(self, path, content, mode=pygit2.GIT_FILEMODE_BLOB):
"""
Creates a blob object and adds it to the current index
"""
path, name = os.path.split(path)
blob = self.repo.create_blob(content)
entry = (name, blob, mode)
self.add(path, [entry])
return blob
@contextmanager
def commit_on_success(self, message='', author=None, committer=None):
"""
A context manager that allows you to wrap a block of changes and commit
those changes if no exceptions occur. This also ensures that the
repository is in a clean state (i.e., no changes) before allowing any
further changes.
"""
# ensure a clean state
if self.has_changes():
msg = "Repository has pending changes. Cannot auto-commit until "\
"pending changes have been comitted."
raise exceptions.RepositoryError(msg)
yield
self.commit(message, author, committer)
def diff(self):
"""
Returns a pygit2.Diff object representing a diff between the current
index and the current branch.
"""
if self.branch:
tree = self.branch.tree
else:
empty_tree = self.repo.TreeBuilder().write()
tree = self.repo[empty_tree]
return tree.diff_to_tree(self.index)
def has_changes(self):
"""Returns True if the current tree differs from the current branch"""
# As of pygit2 0.19, Diff.patch seems to raise a non-descript GitError
# if there are no changes, so we check the iterable length instead.
return len(tuple(self.diff())) > 0
def commit(self, message='', author=None, committer=None):
"""Commits the current tree to the current branch."""
if not self.has_changes():
return None
parents = []
if self.branch:
parents = [self.branch.commit.oid]
return self.create_commit(self.head, self.index, message, author,
committer, parents)
def create_commit(self, ref, tree, message='', author=None,
committer=None, parents=None):
"""
Create a commit with the given ref, tree, and message. If parent
commits are not given, the commit pointed to by the given ref is used
as the parent. If author and commitor are not given, the defaults in
the config are used.
"""
if not author:
author = self.config.DEFAULT_GIT_USER
if not committer:
committer = author
default_offset = self.config.get('DEFAULT_TZ_OFFSET', None)
author = utils.make_signature(*author, default_offset=default_offset)
committer = utils.make_signature(*committer,
default_offset=default_offset)
if parents is None:
try:
parent_ref = self.repo.lookup_reference(ref)
except KeyError:
parents = [] # initial commit
else:
parents = [parent_ref.oid]
# FIXME: create_commit updates the HEAD ref. HEAD isn't used in
# gitmodel, however it would be prudent to make sure it doesn't
# get changed. Possibly need to just restore it after the commit
return self.repo.create_commit(ref, author, committer, message,
tree.oid, parents)
def walk(self, sort=pygit2.GIT_SORT_TIME):
"""Iterate through commits on the current branch"""
#NEEDS-TEST
for commit in self.repo.walk(self.branch.oid, sort):
yield commit
@contextmanager
def lock(self, id):
"""
Acquires a lock with the given id. Uses an empty reference to store the
lock state, eg: refs/locks/my-lock
"""
start_time = time()
while self.locked(id):
if time() - start_time > self.config.LOCK_WAIT_TIMEOUT:
msg = ("Lock wait timeout exceeded while trying to acquire "
"lock '{}' on {}").format(id, self.path)
raise exceptions.LockWaitTimeoutExceeded(msg)
time.sleep(self.config.LOCK_WAIT_INTERVAL)
# The blob itself is not important, just the fact that the ref exists
emptyblob = self.create_blob('')
ref = self.create_reference('refs/locks/{}'.format(id), emptyblob)
yield
ref.delete()
def locked(self, id):
try:
self.repo.lookup_reference('refs/locks/{}'.format(id))
except KeyError:
return False
return True
def sync_repo_index(self, checkout=True):
"""
Updates the git repository's index with the current workspace index.
If ``checkout`` is ``True``, the filesystem will be updated with the
contents of the index.
This is useful if you want to utilize the git repository using standard
git tools.
This function acquires a workspace-level INDEX lock.
"""
with self.lock('INDEX'):
self.repo.index.read_tree(self.index.oid)
if checkout:
self.repo.checkout()
class Branch(object):
"""
A representation of a git branch | |
import functools
import math
import warnings
import numpy as np
import cupy
from cupy.cuda import cufft
from cupy.fft import config
from cupy.fft._cache import get_plan_cache
_reduce = functools.reduce
_prod = cupy._core.internal.prod
@cupy._util.memoize()
def _output_dtype(dtype, value_type):
if value_type != 'R2C':
if dtype in [np.float16, np.float32]:
return np.complex64
elif dtype not in [np.complex64, np.complex128]:
return np.complex128
else:
if dtype in [np.complex64, np.complex128]:
return np.dtype(dtype.char.lower())
elif dtype == np.float16:
return np.float32
elif dtype not in [np.float32, np.float64]:
return np.float64
return dtype
def _convert_dtype(a, value_type):
out_dtype = _output_dtype(a.dtype, value_type)
if out_dtype != a.dtype:
a = a.astype(out_dtype)
return a
def _cook_shape(a, s, axes, value_type, order='C'):
if s is None or s == a.shape:
return a
if (value_type == 'C2R') and (s[-1] is not None):
s = list(s)
s[-1] = s[-1] // 2 + 1
for sz, axis in zip(s, axes):
if (sz is not None) and (sz != a.shape[axis]):
shape = list(a.shape)
if shape[axis] > sz:
index = [slice(None)] * a.ndim
index[axis] = slice(0, sz)
a = a[tuple(index)]
else:
index = [slice(None)] * a.ndim
index[axis] = slice(0, shape[axis])
shape[axis] = sz
z = cupy.zeros(shape, a.dtype.char, order=order)
z[tuple(index)] = a
a = z
return a
def _convert_fft_type(dtype, value_type):
if value_type == 'C2C' and dtype == np.complex64:
return cufft.CUFFT_C2C
elif value_type == 'R2C' and dtype == np.float32:
return cufft.CUFFT_R2C
elif value_type == 'C2R' and dtype == np.complex64:
return cufft.CUFFT_C2R
elif value_type == 'C2C' and dtype == np.complex128:
return cufft.CUFFT_Z2Z
elif value_type == 'R2C' and dtype == np.float64:
return cufft.CUFFT_D2Z
elif value_type == 'C2R' and dtype == np.complex128:
return cufft.CUFFT_Z2D
else:
raise ValueError
def _exec_fft(a, direction, value_type, norm, axis, overwrite_x,
out_size=None, out=None, plan=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
elif (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R needs a workaround (see below)
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError(
'Invalid number of FFT data points (%d) specified.' % n)
# Workaround for hipFFT/rocFFT:
# Both cuFFT and hipFFT/rocFFT have this requirement that 0-th and
# N/2-th element must be real, but cuFFT internally simply ignores it
# while hipFFT handles it badly in both Plan1d and PlanNd, so we must
# do the correction ourselves to ensure the condition is met.
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
a[..., 0].imag = 0
if out_size is None:
a[..., -1].imag = 0
elif out_size % 2 == 0:
a[..., out_size // 2].imag = 0
if out_size is None:
out_size = n
batch = a.size // n
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError('Use the cuFFT plan either as a context manager'
' or as an argument.')
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
# TODO(leofang): do we need to add the current stream to keys?
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
cache[keys] = plan
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-5]))
mgr.set_callbacks(plan)
cache[keys] = plan
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError('expected plan to have type cufft.Plan1d')
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if out_size != plan.nx:
raise ValueError('Target array size does not match the plan.',
out_size, plan.nx)
if batch != plan.batch:
raise ValueError('Batch size does not match the plan.')
if config.use_multi_gpus != (plan.gpus is not None):
raise ValueError('Unclear if multiple GPUs are to be used or not.')
if overwrite_x and value_type == 'C2C':
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm == 'backward' and direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward' and direction == cufft.CUFFT_FORWARD:
out /= sz
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
def _fft_c2c(a, direction, norm, axes, overwrite_x, plan=None):
for axis in axes:
a = _exec_fft(a, direction, 'C2C', norm, axis, overwrite_x, plan=plan)
return a
def _fft(a, s, axes, norm, direction, value_type='C2C', overwrite_x=False,
plan=None):
if not isinstance(a, cupy.ndarray):
raise TypeError('The input array a must be a cupy.ndarray')
if (s is not None) and (axes is not None) and len(s) != len(axes):
raise ValueError('Shape and axes have different lengths.')
if axes is None:
if s is None:
dim = a.ndim
else:
dim = len(s)
axes = [i for i in range(-dim, 0)]
else:
axes = tuple(axes)
if not axes:
if value_type == 'C2C':
return a
else:
raise IndexError('list index out of range')
if norm is None: # for backward compatibility
norm = 'backward'
# it is important that we check norm after validating axes for NumPy
# compatibility: if axes=(), early return is triggered and norm is not
# checked...
if norm not in ('backward', 'ortho', 'forward'):
raise ValueError('Invalid norm value %s, should be "backward", '
'"ortho", or "forward".' % norm)
a = _convert_dtype(a, value_type)
a = _cook_shape(a, s, axes, value_type)
if value_type == 'C2C':
a = _fft_c2c(a, direction, norm, axes, overwrite_x, plan=plan)
elif value_type == 'R2C':
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x)
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
else: # C2R
a = _fft_c2c(a, direction, norm, axes[:-1], overwrite_x)
# _cook_shape tells us input shape only, and no output shape
out_size = _get_fftn_out_size(a.shape, s, axes[-1], value_type)
a = _exec_fft(a, direction, value_type, norm, axes[-1], overwrite_x,
out_size)
return a
def _prep_fftn_axes(ndim, s=None, axes=None, value_type='C2C'):
"""Configure axes argument for an n-dimensional FFT.
The axes to be transformed are returned in ascending order.
"""
# compatibility checks for cupy.cuda.cufft.PlanNd
if (s is not None) and (axes is not None) and len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if axes is None:
if s is None:
dim = ndim
else:
dim = len(s)
axes = tuple([i + ndim for i in range(-dim, 0)])
axes_sorted = axes
else:
axes = tuple(axes)
if not axes:
return (), ()
if _reduce(min, axes) < -ndim or _reduce(max, axes) > ndim - 1:
raise ValueError("The specified axes exceed the array dimensions.")
if value_type == 'C2C':
axes_sorted = tuple(sorted([ax % ndim for ax in axes]))
else: # C2R or R2C
# The last axis is special, need to isolate it and append
# to the rest of (sorted) axes
axes_sorted = sorted([ax % ndim for ax in axes[:-1]])
axes_sorted.append(axes[-1] % ndim)
axes_sorted = tuple(axes_sorted)
# unsorted axes for _cook_shape, sorted ones are otherwise used
return axes, axes_sorted
def _nd_plan_is_possible(axes_sorted, ndim):
# PlanNd supports 1D, 2D and 3D batch transforms over contiguous axes
# Axes must be contiguous and the first or last axis must be in the axes.
return (0 < len(axes_sorted) <= 3
and (0 in axes_sorted or (ndim - 1) in axes_sorted)
and | |
import astropy.units as u
import numpy as np
from lofti_gaia.loftitools import *
from lofti_gaia.cFunctions import calcOFTI_C
#from loftitools import *
import pickle
import time
import matplotlib.pyplot as plt
# Astroquery throws some warnings we can ignore:
import warnings
warnings.filterwarnings("ignore")
'''This module obtaines measurements from Gaia EDR3 (Gaia DR2 is also available as a secondary option) and runs through the LOFTI Gaia/OFTI
wide stellar binary orbit fitting technique.
'''
class Fitter(object):
'''Initialize the Fitter object for the binary system, and compute observational constraints
to be used in the orbit fit. User must provide Gaia source ids, tuples of mass estimates for
both objects, specify the number of desired orbits in posterior sample. Fit will be
for object 2 relative to object 1.
Attributes are tuples of (value,uncertainty) unless otherwise indicated. Attributes
with astropy units are retrieved from Gaia archive, attributes without units are
computed from Gaia values. All relative values are for object 2 relative to object 1.
Args:
sourceid1, sourceid2 (int): Gaia source ids for the two objects, fit will be for motion of \
object 2 relative to object 1
mass1, mass2 (tuple, flt): tuple os mass estimate for object 1 and 2, of the form (value, uncertainty)
Norbits (int): Number of desired orbits in posterior sample. Default = 100000
results_filename (str): Filename for fit results files. If none, results will be written to files \
named FitResults.yr.mo.day.hr.min.s
astrometry (dict): User-supplied astrometric measurements. Must be dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates". May be same as the rv table. \
Sep, deltaRA, and deltaDEC must be in arcseconds, PA in degrees, dates in decimal years. \
Default = None
user_rv (dict): User-supplied radial velocity measurements. Must be dictionary or table or pandas dataframe with\
column names "rv,rverr,rv_dates". May be same as the astrometry table. Default = None.
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
ruwe1, ruwe2 (flt): RUWE value from Gaia archive
ref_epoch (flt): reference epoch in decimal years. For Gaia DR2 this is 2015.5, for Gaia EDR3 it is 2016.0
plx1, plx2 (flt): parallax from Gaia in mas
RA1, RA2 (flt): right ascension from Gaia; RA in deg, uncertainty in mas
Dec1, Dec2 (flt): declination from Gaia; Dec in deg, uncertainty in mas
pmRA1, pmRA2 (flt): proper motion in RA in mas yr^-1 from Gaia
pmDec1, pmDec2 (flt): proper motion in DEC in mas yr^-1 from Gaia
rv1, rv2 (flt, optional): radial velocity in km s^-1 from Gaia
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia
plx (flt): weighted mean parallax for the binary system in mas
distance (flt): distance of system in pc, computed from Gaia parallax using method \
of Bailer-Jones et. al 2018.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
sep (flt): total separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
sep_au (flt): separation in AU
sep_km (flt): separation in km
total_vel (flt): total velocity vector in km s^-1. If RV is available for both, \
this is the 3d velocity vector; if not it is just the plane of sky velocity.
total_planeofsky_vel (flt): total velocity in the plane of sky in km s^-1. \
In the absence of RV this is equivalent to the total velocity vector.
deltaGmag (flt): relative contrast in Gaia G magnitude. Does not include uncertainty.
inflateProperMOtionError (flt): an optional factor to mulitply default gaia proper motion error by.
Written by <NAME>, 2020
'''
def __init__(self, sourceid1, sourceid2, mass1, mass2, Norbits = 100000, \
results_filename = None,
astrometry = None,
user_rv = None,
catalog = 'gaiaedr3.gaia_source',
inflateProperMotionError=1
):
self.sourceid1 = sourceid1
self.sourceid2 = sourceid2
try:
self.mass1 = mass1[0]
self.mass1err = mass1[1]
self.mass2 = mass2[0]
self.mass2err = mass2[1]
self.mtot = [self.mass1 + self.mass2, np.sqrt((self.mass1err**2) + (self.mass2err**2))]
except:
raise ValueError('Masses must be tuples of (value,error), ex: mass1 = (1.0,0.05)')
self.Norbits = Norbits
if not results_filename:
self.results_filename = 'FitResults.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
self.stats_filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
else:
self.results_filename = results_filename
self.stats_filename = results_filename+'.Stats.txt'
self.astrometry = False
# check if user supplied astrometry:
if astrometry is not None:
# if so, set astrometric flag to True:
self.astrometry = True
# store observation dates:
self.astrometric_dates = astrometry['dates']
# if in sep/pa, convert to ra/dec:
if 'sep' in astrometry:
try:
astr_ra = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.sin(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
astr_dec = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.cos(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
self.astrometric_ra = np.array([
[np.mean(astr_ra[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_ra[i]) for i in range(len(astrometry['sep']))]
])
self.astrometric_dec = np.array([
[np.mean(astr_dec[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_dec[i]) for i in range(len(astrometry['sep']))]
])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
elif 'ra' in astrometry:
# else store the ra/dec as attributes:
try:
self.astrometric_ra = np.array([astrometry['ra'], astrometry['raerr']])
self.astrometric_dec = np.array([astrometry['dec'], astrometry['decerr']])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
else:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
# Check if user supplied rv:
self.use_user_rv = False
if user_rv is not None:
# set user rv flag to true:
self.use_user_rv = True
try:
# set attributes; multiply rv by -1 due to difference in coordinate systems:
self.user_rv = np.array([user_rv['rv']*-1,user_rv['rverr']])
self.user_rv_dates = np.array(user_rv['rv_dates'])
except:
raise ValueError('RV keys not recognized. Please use column names "rv,rverr,rv_dates"')
self.catalog = catalog
# Get Gaia measurements, compute needed constraints, and add to object:
self.PrepareConstraints(catalog=self.catalog,inflateFactor=inflateProperMotionError)
def edr3ToICRF(self,pmra,pmdec,ra,dec,G):
''' Corrects for biases in proper motion. The function is from https://arxiv.org/pdf/2103.07432.pdf
Args:
pmra,pmdec (float): proper motion
ra, dec (float): right ascension and declination
G (float): G magnitude
Written by <NAME>, 2021
'''
if G>=13:
return pmra , pmdec
import numpy as np
def sind(x):
return np.sin(np.radians(x))
def cosd(x):
return np.cos(np.radians(x))
table1="""
0.0 9.0 9.0 9.5 9.5 10.0 10.0 10.5 10.5 11.0 11.0 11.5 11.5 11.75 11.75 12.0 12.0 12.25 12.25 12.5 12.5 12.75 12.75 13.0
18.4 33.8 -11.3 14.0 30.7 -19.4 12.8 31.4 -11.8 13.6 35.7 -10.5 16.2 50.0 2.1 19.4 59.9 0.2 21.8 64.2 1.0 17.7 65.6 -1.9 21.3 74.8 2.1 25.7 73.6 1.0 27.3 76.6 0.5
34.9 68.9 -2.9 """
table1 = np.fromstring(table1,sep=" ").reshape((12,5)).T
Gmin = table1[0]
Gmax = table1[1]
#pick the appropriate omegaXYZ for the source’s magnitude:
omegaX = table1[2][(Gmin<=G)&(Gmax>G)][0]
omegaY = table1[3][(Gmin<=G)&(Gmax>G)][0]
omegaZ = table1[4][(Gmin<=G)&(Gmax>G)][0]
pmraCorr = -1*sind(dec)*cosd(ra)*omegaX -sind(dec)*sind(ra)*omegaY + cosd(dec)*omegaZ
pmdecCorr = sind(ra)*omegaX -cosd(ra)*omegaY
return pmra-pmraCorr/1000., pmdec-pmdecCorr/1000.
def PrepareConstraints(self, rv=False, catalog='gaiaedr3.gaia_source', inflateFactor=1.):
'''Retrieves parameters for both objects from Gaia EDR3 archive and computes system attriubtes,
and assigns them to the Fitter object class.
Args:
rv (bool): flag for handling the presence or absence of RV measurements for both objects \
in EDR3. Gets set to True if both objects have Gaia RV measurements. Default = False
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
inflateFactor (flt): Factor by which to inflate the errors on Gaia proper motions to \
account for improper uncertainty estimates. Default = 1.0
Written by <NAME>, 2020
'''
from astroquery.gaia import Gaia
deg_to_mas = 3600000.
mas_to_deg = 1./3600000.
# Retrieve astrometric solution from Gaia EDR3
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid1))
j = job.get_results()
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid2))
k = job.get_results()
if catalog == 'gaiadr2.gaia_source':
# Retrieve RUWE from RUWE catalog for both sources and add to object state:
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid1))
jruwe = job.get_results()
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid2))
kruwe = job.get_results()
self.ruwe1 = jruwe['ruwe'][0]
self.ruwe2 = kruwe['ruwe'][0]
else:
# EDR3 contains ruwe in the main catalog:
self.ruwe1 = j['ruwe'][0]
self.ruwe2 = k['ruwe'][0]
# | |
var_by_name['/'.join(split_name)]
post_init_ops.append(v.assign(copy_from.read_value()))
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
class VariableMgrDistributedAllReduce(VariableMgr):
"""VariableMgr that implements the --distributed_all_reduce mode.
Each GPU has its own copy of the variables. To apply gradients,
the specified all-reduce algorithm is used to reduce the gradients
and replicate the final value to all GPUs.
"""
def __init__(self, benchmark_cnn, all_reduce_spec, job_name,
num_workers):
super(VariableMgrDistributedAllReduce, self).__init__(benchmark_cnn)
self._all_reduce_spec = parse_all_reduce_spec(all_reduce_spec)
self._all_reduce_device_prefixes = build_all_reduce_device_prefixes(
job_name, num_workers)
self._num_workers = num_workers
if not self._all_reduce_spec:
raise ValueError('all_reduce_spec must be specified')
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
"""Create a scope for the named device.
Args:
device_num: index of device for variable scope. (Note that
device_num spans all processes in cluster since a single global
graph is used.)
Returns:
the requested variable_scope
"""
return tf.variable_scope('v%s' % device_num)
def preprocess_device_grads(self, device_grads):
remaining_grads = device_grads
aggregated_grads = []
for spec_tuple in self._all_reduce_spec:
if spec_tuple.limit < 0:
this_grads = remaining_grads
remaining_grads = []
else:
(this_grads, remaining_grads) = split_grads_by_size(
spec_tuple.limit, remaining_grads)
if this_grads:
range_agg_grads = sum_gradients_all_reduce(
self._all_reduce_device_prefixes, this_grads, self._num_workers,
spec_tuple.alg, spec_tuple.shards, self.benchmark_cnn.gpu_indices)
if not aggregated_grads:
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
assert not remaining_grads
full_device_set = []
for grads in device_grads:
g, v = grads[0]
del v
full_device_set.append(g.device)
return (full_device_set, aggregated_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
if device_num >= len(device_grads):
raise ValueError('device_num %d exceeds length of device_grads (%d)' %
(device_num, len(device_grads)))
return device_grads[device_num]
def get_post_init_ops(self):
"""Copy initialized values for variables to other devices."""
global_vars = tf.global_variables()
var_by_name = dict([(v.name, v) for v in global_vars])
post_init_ops = []
for v in global_vars:
split_name = v.name.split('/')
# TODO(b/62630508): use more specific prefix than v or v0.
if split_name[0] == 'v0' or not v.name.startswith('v'):
continue
split_name[0] = 'v0'
copy_from = var_by_name['/'.join(split_name)]
post_init_ops.append(v.assign(copy_from.read_value()))
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
class VariableMgrDistributedFetchFromPS(VariableMgr):
"""Implements --variable_update=parameter_server mode for distributed jobs.
Variables are stored on a parameter server. For each step, each tower gets
a copy of the variables from the parameter server, and sends its gradients
to the param server.
"""
def each_tower_has_variables(self):
return False
def create_outer_variable_scope(self, device_num):
if self.benchmark_cnn.local_parameter_device_flag == 'gpu':
caching_devices = self.benchmark_cnn.raw_devices
else:
caching_devices = [self.benchmark_cnn.cpu_device]
custom_getter = OverrideCachingDevice(
caching_devices, self.benchmark_cnn.cpu_device, 1024*64)
return tf.variable_scope('v', reuse=bool(device_num),
custom_getter=custom_getter)
def preprocess_device_grads(self, device_grads):
# Returns (gradient_devices, gradient_state)
return ([self.benchmark_cnn.param_server_device], device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
assert device_num == 0
return aggregate_gradients_using_copy(gradient_state, use_mean=True)
def get_devices(self):
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
len(self.benchmark_cnn.ps_hosts), tf.contrib.training.byte_size_load_fn)
return [tf.train.replica_device_setter(
worker_device=d, cluster=self.benchmark_cnn.cluster,
ps_strategy=ps_strategy)
for d in self.benchmark_cnn.raw_devices]
class VariableMgrDistributedFetchFromStagedPS(
VariableMgrDistributedFetchFromPS):
"""Extends VariableMgrDistributedFetchFromPS for --staged_vars."""
def __init__(self, benchmark_cnn):
super(VariableMgrDistributedFetchFromStagedPS, self).__init__(benchmark_cnn)
self.staging_vars_on_devices = [dict() for _ in
self.benchmark_cnn.raw_devices]
self.staged_vars_on_cpu = {}
def create_outer_variable_scope(self, device_num):
self._custom_getter = StagedVariableGetter(
device_num, self.benchmark_cnn.raw_devices,
self.benchmark_cnn.cpu_device, self)
return tf.variable_scope(
'v', reuse=bool(device_num), custom_getter=self._custom_getter)
def supports_staged_vars(self):
return True
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable=False):
return self._custom_getter.trainable_variables_on_device(
rel_device_num, abs_device_num, writable=writable)
class VariableMgrDistributedReplicated(VariableMgr):
"""VariableMgr that implements the --distributed_replicated mode.
Each GPU has a copy of the variables, and updates its copy after the
parameter servers are all updated with the gradients from all servers. Only
works with cross_replica_sync=true. Unlike 'replicated', does not use nccl
all-reduce for replicating within a server.
"""
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
return tf.variable_scope(
'v%s' % device_num,
custom_getter=OverrideToLocalVariableIfNotPsVar())
def preprocess_device_grads(self, device_grads):
return ([self.benchmark_cnn.param_server_device], device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state # From 2nd result of preprocess_device_grads.
avg_grads = aggregate_gradients_using_copy_with_device_selection(
self.benchmark_cnn, device_grads, use_mean=True)
# Make shadow variable on a parameter server for each original trainable
# variable.
for i, (g, v) in enumerate(avg_grads):
my_name = PS_SHADOW_VAR_PREFIX + '/' + v.name
if my_name.endswith(':0'): my_name = my_name[:-2]
new_v = tf.get_variable(my_name, dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=True)
avg_grads[i] = (g, new_v)
return avg_grads
def append_apply_gradients_ops(self, gradient_state, opt,
grads, training_ops):
device_grads = gradient_state # From 2nd result of preprocess_device_grads.
# For each variable, apply the combined gradients for this server on
# the parameter server, and then wait for all other servers to do
# this.
for i, (g, v) in enumerate(grads):
apply_gradient_op = opt.apply_gradients([(g, v)])
barrier = self.benchmark_cnn.add_sync_queues_and_barrier(
'replicate_variable_%s' % i, [apply_gradient_op])
with tf.control_dependencies([barrier]):
with tf.device(self.benchmark_cnn.cpu_device):
updated_value = v.read_value()
for my_d in range(len(self.benchmark_cnn.devices)):
training_ops.append(
device_grads[my_d][i][1].assign(updated_value))
def _strip_port(self, s):
if s.endswith(':0'):
return s[:-2]
return s
def get_post_init_ops(self):
# Copy initialized variables for variables on the parameter server
# to the local copy of the variable.
local_vars = tf.local_variables()
local_var_by_name = dict(
[(self._strip_port(v.name), v) for v in local_vars])
post_init_ops = []
for v in tf.global_variables():
if v.name.startswith(PS_SHADOW_VAR_PREFIX + '/v0/'):
prefix = self._strip_port(
v.name[len(PS_SHADOW_VAR_PREFIX + '/v0'):])
for i in range(self.benchmark_cnn.num_gpus):
name = 'v%s%s' % (i, prefix)
if name in local_var_by_name:
copy_to = local_var_by_name[name]
post_init_ops.append(copy_to.assign(v.read_value()))
return post_init_ops
def _remove_shadow_var_prefix_if_present(self, var_name):
if var_name.startswith(PS_SHADOW_VAR_PREFIX + '/'):
return var_name[len(PS_SHADOW_VAR_PREFIX + '/'):]
else:
return var_name
def var_dict_name(self, v):
return self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
def savable_variables(self):
"""Returns a list/dict of savable variables to pass to tf.train.Saver."""
params = {}
for v in tf.global_variables():
assert (v.name.startswith(PS_SHADOW_VAR_PREFIX + '/v0/') or
v.name == 'global_step:0')
# We store variables in the checkpoint with the shadow variable prefix
# removed so we can evaluate checkpoints in non-distributed replicated
# mode. The checkpoints can also be loaded for training in
# distributed_replicated mode.
name = self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
params[name] = v
for v in tf.local_variables():
# Non-trainable variables, such as batch norm moving averages, do not have
# corresponding global shadow variables, so we add them here. Trainable
# local variables have corresponding global shadow variables, which were
# added in the global variable loop above.
if v.name.startswith('v0/') and v not in tf.trainable_variables():
params[self._strip_port(v.name)] = v
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices, the inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= theshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices,
aux_devices=None, num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, tf.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
tf.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(scaled_grads, tf.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, tf.add, tf.add_n)
elif alg == 'pscpu/pscpu':
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices,
# TODO(tucker): devise a way of better specifying the device set
# for the second level.
[aux_devices[0]],
tf.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, tf.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def contains_any(haystack, needles):
"""Tests if any needle is a substring of haystack.
Args:
haystack: a string
needles: list of strings
Returns:
True if any element of needles is a substring of haystack,
False otherwise.
"""
for n in needles:
if n in haystack:
return True
return False
def sum_gradients_all_reduce(dev_prefixes, tower_grads, num_workers,
alg, num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
tower_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs | |
is an image, otherwise False"""
return self._info["type"] == _PHOTO_TYPE
@property
def incloud(self):
"""Returns True if photo is cloud asset and is synched to cloud
False if photo is cloud asset and not yet synched to cloud
None if photo is not cloud asset
"""
return self._info["incloud"]
@property
def iscloudasset(self):
"""Returns True if photo is a cloud asset (in an iCloud library),
otherwise False
"""
if self._db._db_version <= _PHOTOS_4_VERSION:
return (
True
if self._info["cloudLibraryState"] is not None
and self._info["cloudLibraryState"] != 0
else False
)
else:
return True if self._info["cloudAssetGUID"] is not None else False
@property
def isreference(self):
"""Returns True if photo is a reference (not copied to the Photos library), otherwise False"""
return self._info["isreference"]
@property
def burst(self):
"""Returns True if photo is part of a Burst photo set, otherwise False"""
return self._info["burst"]
@property
def burst_selected(self):
"""Returns True if photo is a burst photo and has been selected from the burst set by the user, otherwise False"""
return bool(self._info["burstPickType"] & BURST_SELECTED)
@property
def burst_key(self):
"""Returns True if photo is a burst photo and is the key image for the burst set (the image that Photos shows on top of the burst stack), otherwise False"""
return bool(self._info["burstPickType"] & BURST_KEY)
@property
def burst_default_pick(self):
"""Returns True if photo is a burst image and is the photo that Photos selected as the default image for the burst set, otherwise False"""
return bool(self._info["burstPickType"] & BURST_DEFAULT_PICK)
@property
def burst_photos(self):
"""If photo is a burst photo, returns list of PhotoInfo objects
that are part of the same burst photo set; otherwise returns empty list.
self is not included in the returned list"""
if self._info["burst"]:
burst_uuid = self._info["burstUUID"]
return [
PhotoInfo(db=self._db, uuid=u, info=self._db._dbphotos[u])
for u in self._db._dbphotos_burst[burst_uuid]
if u != self._uuid
]
else:
return []
@property
def live_photo(self):
"""Returns True if photo is a live photo, otherwise False"""
return self._info["live_photo"]
@property
def path_live_photo(self):
"""Returns path to the associated video file for a live photo
If photo is not a live photo, returns None
If photo is missing, returns None"""
photopath = None
if self._db._db_version <= _PHOTOS_4_VERSION:
if self.live_photo and not self.ismissing:
live_model_id = self._info["live_model_id"]
if live_model_id is None:
logging.debug(f"missing live_model_id: {self._uuid}")
photopath = None
else:
folder_id, file_id = _get_resource_loc(live_model_id)
library_path = self._db.library_path
photopath = os.path.join(
library_path,
"resources",
"media",
"master",
folder_id,
"00",
f"jpegvideocomplement_{file_id}.mov",
)
if not os.path.isfile(photopath):
# In testing, I've seen occasional missing movie for live photo
# These appear to be valid -- e.g. live component hasn't been downloaded from iCloud
# photos 4 has "isOnDisk" column we could check
# or could do the actual check with "isfile"
# TODO: should this be a warning or debug?
photopath = None
else:
photopath = None
elif self.live_photo and self.path and not self.ismissing:
filename = pathlib.Path(self.path)
photopath = filename.parent.joinpath(f"{filename.stem}_3.mov")
photopath = str(photopath)
if not os.path.isfile(photopath):
# In testing, I've seen occasional missing movie for live photo
# these appear to be valid -- e.g. video component not yet downloaded from iCloud
# TODO: should this be a warning or debug?
photopath = None
else:
photopath = None
return photopath
@cached_property
def path_derivatives(self):
"""Return any derivative (preview) images associated with the photo as a list of paths, sorted by file size (largest first)"""
if self._db._db_version <= _PHOTOS_4_VERSION:
return self._path_derivatives_4()
if self.shared:
return self._path_derivatives_5_shared()
directory = self._uuid[0] # first char of uuid
derivative_path = (
pathlib.Path(self._db._library_path) / f"resources/derivatives/{directory}"
)
files = list(derivative_path.glob(f"{self.uuid}*.*"))
# previews may be missing from derivatives path
# there are what appear to be low res thumbnails in the "masters" subfolder
thumb_path = (
pathlib.Path(self._db._library_path)
/ f"resources/derivatives/masters/{directory}/{self.uuid}_4_5005_c.jpeg"
)
if thumb_path.exists():
files.append(thumb_path)
files = sorted(files, reverse=True, key=lambda f: f.stat().st_size)
# return list of filename but skip .THM files (these are actually low-res thumbnails in JPEG format but with .THM extension)
derivatives = [str(filename) for filename in files if filename.suffix != ".THM"]
if self.isphoto and len(derivatives) > 1 and derivatives[0].endswith(".mov"):
derivatives[1], derivatives[0] = derivatives[0], derivatives[1]
return derivatives
def _path_derivatives_4(self):
"""Return paths to all derivative (preview) files for Photos <= 4"""
modelid = self._info["modelID"]
if modelid is None:
return []
folder_id, file_id = _get_resource_loc(modelid)
derivatives_root = (
pathlib.Path(self._db._library_path)
/ f"resources/proxies/derivatives/{folder_id}"
)
# photos appears to usually be in "00" subfolder but
# could be elsewhere--I haven't figured out this logic yet
# first see if it's in 00
derivatives_path = derivatives_root / "00" / file_id
if derivatives_path.is_dir():
files = derivatives_path.glob("*")
files = sorted(files, reverse=True, key=lambda f: f.stat().st_size)
return [str(filename) for filename in files]
# didn't find derivatives path
for subdir in derivatives_root.glob("*"):
if subdir.is_dir():
derivatives_path = derivatives_root / subdir / file_id
if derivatives_path.is_dir():
files = derivatives_path.glob("*")
files = sorted(files, reverse=True, key=lambda f: f.stat().st_size)
return [str(filename) for filename in files]
# didn't find a derivatives path
return []
def _path_derivatives_5_shared(self):
"""Return paths to all derivative (preview) files for shared iCloud photos in Photos >= 5"""
directory = self._uuid[0] # first char of uuid
# only 1 derivative for shared photos and it's called 'UUID_4_5005_c.jpeg'
derivative_path = (
pathlib.Path(self._db._library_path)
/ "resources/cloudsharing/resources/derivatives/masters"
/ f"{directory}/{self.uuid}_4_5005_c.jpeg"
)
if derivative_path.exists():
return [str(derivative_path)]
return []
@property
def panorama(self):
"""Returns True if photo is a panorama, otherwise False"""
return self._info["panorama"]
@property
def slow_mo(self):
"""Returns True if photo is a slow motion video, otherwise False"""
return self._info["slow_mo"]
@property
def time_lapse(self):
"""Returns True if photo is a time lapse video, otherwise False"""
return self._info["time_lapse"]
@property
def hdr(self):
"""Returns True if photo is an HDR photo, otherwise False"""
return self._info["hdr"]
@property
def screenshot(self):
"""Returns True if photo is an HDR photo, otherwise False"""
return self._info["screenshot"]
@property
def portrait(self):
"""Returns True if photo is a portrait, otherwise False"""
return self._info["portrait"]
@property
def selfie(self):
"""Returns True if photo is a selfie (front facing camera), otherwise False"""
return self._info["selfie"]
@property
def place(self):
"""Returns PlaceInfo object containing reverse geolocation info"""
# implementation note: doesn't create the PlaceInfo object until requested
# then memoizes the object in self._place to avoid recreating the object
if self._db._db_version <= _PHOTOS_4_VERSION:
try:
return self._place # pylint: disable=access-member-before-definition
except AttributeError:
if self._info["placeNames"]:
self._place = PlaceInfo4(
self._info["placeNames"], self._info["countryCode"]
)
else:
self._place = None
return self._place
else:
try:
return self._place # pylint: disable=access-member-before-definition
except AttributeError:
if self._info["reverse_geolocation"]:
self._place = PlaceInfo5(self._info["reverse_geolocation"])
else:
self._place = None
return self._place
@property
def has_raw(self):
"""returns True if photo has an associated raw image (that is, it's a RAW+JPEG pair), otherwise False"""
return self._info["has_raw"]
@property
def israw(self):
"""returns True if photo is a raw image. For images with an associated RAW+JPEG pair, see has_raw"""
return "raw-image" in self.uti_original if self.uti_original else False
@property
def raw_original(self):
"""returns True if associated raw image and the raw image is selected in Photos
via "Use RAW as Original "
otherwise returns False"""
return self._info["raw_is_original"]
@property
def height(self):
"""returns height of the current photo version in pixels"""
return self._info["height"]
@property
def width(self):
"""returns width of the current photo version in pixels"""
return self._info["width"]
@property
def orientation(self):
"""returns EXIF orientation of the current photo version as int or 0 if current orientation cannot be determined"""
if self._db._db_version <= _PHOTOS_4_VERSION:
return self._info["orientation"]
# For Photos 5+, try to get the adjusted orientation
if not self.hasadjustments:
return self._info["orientation"]
if self.adjustments:
return self.adjustments.adj_orientation
else:
# can't reliably determine orientation for edited photo if adjustmentinfo not available
return 0
@property
def original_height(self):
"""returns height of the original photo version in pixels"""
return self._info["original_height"]
@property
def original_width(self):
"""returns width of the original photo version in pixels"""
return self._info["original_width"]
@property
def original_orientation(self):
"""returns EXIF orientation of the original photo version as int"""
return self._info["original_orientation"]
@property
def original_filesize(self):
"""returns filesize of original photo in bytes as int"""
return self._info["original_filesize"]
@property
def duplicates(self):
"""return list of PhotoInfo objects for possible duplicates (matching signature of original size, date, height, width) or empty list if no matching duplicates"""
signature = self._db._duplicate_signature(self.uuid)
duplicates = []
try:
for uuid in self._db._db_signatures[signature]:
if uuid != self.uuid:
# found a possible duplicate
duplicates.append(self._db.get_photo(uuid))
except KeyError:
# don't expect this to happen | |
import datetime
import os
import sys
import signal
import time
import warnings
import numpy as np
from pandas import DataFrame
from ..utils import (
logger,
check_directory_exists_and_if_not_mkdir,
reflect,
safe_file_dump,
latex_plot_format,
)
from .base_sampler import Sampler, NestedSampler
from ..result import rejection_sample
_likelihood = None
_priors = None
_search_parameter_keys = None
_use_ratio = False
def _initialize_global_variables(
likelihood, priors, search_parameter_keys, use_ratio
):
"""
Store a global copy of the likelihood, priors, and search keys for
multiprocessing.
"""
global _likelihood
global _priors
global _search_parameter_keys
global _use_ratio
_likelihood = likelihood
_priors = priors
_search_parameter_keys = search_parameter_keys
_use_ratio = use_ratio
def _prior_transform_wrapper(theta):
"""Wrapper to the prior transformation. Needed for multiprocessing."""
return _priors.rescale(_search_parameter_keys, theta)
def _log_likelihood_wrapper(theta):
"""Wrapper to the log likelihood. Needed for multiprocessing."""
if _priors.evaluate_constraints({
key: theta[ii] for ii, key in enumerate(_search_parameter_keys)
}):
params = {key: t for key, t in zip(_search_parameter_keys, theta)}
_likelihood.parameters.update(params)
if _use_ratio:
return _likelihood.log_likelihood_ratio()
else:
return _likelihood.log_likelihood()
else:
return np.nan_to_num(-np.inf)
class Dynesty(NestedSampler):
"""
bilby wrapper of `dynesty.NestedSampler`
(https://dynesty.readthedocs.io/en/latest/)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `dynesty.NestedSampler`, see
documentation for that class for further help. Under Other Parameter below,
we list commonly all kwargs and the bilby defaults.
Parameters
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict, dict
Priors to be used in the search.
This has attributes for each parameter to be sampled.
outdir: str, optional
Name of the output directory
label: str, optional
Naming scheme of the output files
use_ratio: bool, optional
Switch to set whether or not you want to use the log-likelihood ratio
or just the log-likelihood
plot: bool, optional
Switch to set whether or not you want to create traceplots
skip_import_verification: bool
Skips the check if the sampler is installed if true. This is
only advisable for testing environments
Other Parameters
================
npoints: int, (1000)
The number of live points, note this can also equivalently be given as
one of [nlive, nlives, n_live_points]
bound: {'none', 'single', 'multi', 'balls', 'cubes'}, ('multi')
Method used to select new points
sample: {'unif', 'rwalk', 'slice', 'rslice', 'hslice'}, ('rwalk')
Method used to sample uniformly within the likelihood constraints,
conditioned on the provided bounds
walks: int
Number of walks taken if using `sample='rwalk'`, defaults to `100`.
Note that the default `walks` in dynesty itself is 25, although using
`ndim * 10` can be a reasonable rule of thumb for new problems.
dlogz: float, (0.1)
Stopping criteria
print_progress: Bool
If true, print information information about the convergence during.
`verbose` has the same effect.
check_point: bool,
If true, use check pointing.
check_point_plot: bool,
If true, generate a trace plot along with the check-point
check_point_delta_t: float (600)
The minimum checkpoint period (in seconds). Should the run be
interrupted, it can be resumed from the last checkpoint.
n_check_point: int, optional (None)
The number of steps to take before checking whether to check_point.
resume: bool
If true, resume run from checkpoint (if available)
exit_code: int
The code which the same exits on if it hasn't finished sampling
print_method: str ('tqdm')
The method to use for printing. The options are:
- 'tqdm': use a `tqdm` `pbar`, this is the default.
- 'interval-$TIME': print to `stdout` every `$TIME` seconds,
e.g., 'interval-10' prints every ten seconds, this does not print every iteration
- else: print to `stdout` at every iteration
"""
default_kwargs = dict(bound='multi', sample='rwalk',
periodic=None, reflective=None,
check_point_delta_t=1800, nlive=1000,
first_update=None, walks=100,
npdim=None, rstate=None, queue_size=1, pool=None,
use_pool=None, live_points=None,
logl_args=None, logl_kwargs=None,
ptform_args=None, ptform_kwargs=None,
enlarge=1.5, bootstrap=None, vol_dec=0.5, vol_check=8.0,
facc=0.2, slices=5,
update_interval=None, print_func=None,
dlogz=0.1, maxiter=None, maxcall=None,
logl_max=np.inf, add_live=True, print_progress=True,
save_bounds=False, n_effective=None,
maxmcmc=5000, nact=5, print_method="tqdm")
def __init__(self, likelihood, priors, outdir='outdir', label='label',
use_ratio=False, plot=False, skip_import_verification=False,
check_point=True, check_point_plot=True, n_check_point=None,
check_point_delta_t=600, resume=True, nestcheck=False, exit_code=130, **kwargs):
super(Dynesty, self).__init__(likelihood=likelihood, priors=priors,
outdir=outdir, label=label, use_ratio=use_ratio,
plot=plot, skip_import_verification=skip_import_verification,
exit_code=exit_code,
**kwargs)
self.n_check_point = n_check_point
self.check_point = check_point
self.check_point_plot = check_point_plot
self.resume = resume
self._periodic = list()
self._reflective = list()
self._apply_dynesty_boundaries()
self.nestcheck = nestcheck
if self.n_check_point is None:
self.n_check_point = 1000
self.check_point_delta_t = check_point_delta_t
logger.info("Checkpoint every check_point_delta_t = {}s"
.format(check_point_delta_t))
self.resume_file = '{}/{}_resume.pickle'.format(self.outdir, self.label)
self.sampling_time = datetime.timedelta()
try:
signal.signal(signal.SIGTERM, self.write_current_state_and_exit)
signal.signal(signal.SIGINT, self.write_current_state_and_exit)
signal.signal(signal.SIGALRM, self.write_current_state_and_exit)
except AttributeError:
logger.debug(
"Setting signal attributes unavailable on this system. "
"This is likely the case if you are running on a Windows machine"
" and is no further concern.")
def __getstate__(self):
""" For pickle: remove external_sampler, which can be an unpicklable "module" """
state = self.__dict__.copy()
if "external_sampler" in state:
del state['external_sampler']
return state
@property
def sampler_function_kwargs(self):
keys = ['dlogz', 'print_progress', 'print_func', 'maxiter',
'maxcall', 'logl_max', 'add_live', 'save_bounds',
'n_effective']
return {key: self.kwargs[key] for key in keys}
@property
def sampler_init_kwargs(self):
return {key: value
for key, value in self.kwargs.items()
if key not in self.sampler_function_kwargs}
def _translate_kwargs(self, kwargs):
if 'nlive' not in kwargs:
for equiv in self.npoints_equiv_kwargs:
if equiv in kwargs:
kwargs['nlive'] = kwargs.pop(equiv)
if 'print_progress' not in kwargs:
if 'verbose' in kwargs:
kwargs['print_progress'] = kwargs.pop('verbose')
if 'walks' not in kwargs:
for equiv in self.walks_equiv_kwargs:
if equiv in kwargs:
kwargs['walks'] = kwargs.pop(equiv)
if "queue_size" not in kwargs:
for equiv in self.npool_equiv_kwargs:
if equiv in kwargs:
kwargs['queue_size'] = kwargs.pop(equiv)
def _verify_kwargs_against_default_kwargs(self):
from tqdm.auto import tqdm
if not self.kwargs['walks']:
self.kwargs['walks'] = 100
if not self.kwargs['update_interval']:
self.kwargs['update_interval'] = int(0.6 * self.kwargs['nlive'])
if self.kwargs['print_func'] is None:
self.kwargs['print_func'] = self._print_func
print_method = self.kwargs["print_method"]
if print_method == "tqdm" and self.kwargs["print_progress"]:
self.pbar = tqdm(file=sys.stdout)
elif "interval" in print_method:
self._last_print_time = datetime.datetime.now()
self._print_interval = datetime.timedelta(seconds=float(print_method.split("-")[1]))
Sampler._verify_kwargs_against_default_kwargs(self)
def _print_func(self, results, niter, ncall=None, dlogz=None, *args, **kwargs):
""" Replacing status update for dynesty.result.print_func """
if "interval" in self.kwargs["print_method"]:
_time = datetime.datetime.now()
if _time - self._last_print_time < self._print_interval:
return
else:
self._last_print_time = _time
# Add time in current run to overall sampling time
total_time = self.sampling_time + _time - self.start_time
# Remove fractional seconds
total_time_str = str(total_time).split('.')[0]
# Extract results at the current iteration.
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
# Adjusting outputs for printing.
if delta_logz > 1e6:
delta_logz = np.inf
if 0. <= logzvar <= 1e6:
logzerr = np.sqrt(logzvar)
else:
logzerr = np.nan
if logz <= -1e6:
logz = -np.inf
if loglstar <= -1e6:
loglstar = -np.inf
if self.use_ratio:
key = 'logz-ratio'
else:
key = 'logz'
# Constructing output.
string = list()
string.append("bound:{:d}".format(bounditer))
string.append("nc:{:3d}".format(nc))
string.append("ncall:{:.1e}".format(ncall))
string.append("eff:{:0.1f}%".format(eff))
string.append("{}={:0.2f}+/-{:0.2f}".format(key, logz, logzerr))
string.append("dlogz:{:0.3f}>{:0.2g}".format(delta_logz, dlogz))
if self.kwargs["print_method"] == "tqdm":
self.pbar.set_postfix_str(" ".join(string), refresh=False)
self.pbar.update(niter - self.pbar.n)
elif "interval" in self.kwargs["print_method"]:
formatted = " ".join([total_time_str] + string)
print("{}it [{}]".format(niter, formatted), file=sys.stdout, flush=True)
else:
formatted = " ".join([total_time_str] + string)
print("{}it [{}]".format(niter, formatted), file=sys.stdout, flush=True)
def _apply_dynesty_boundaries(self):
self._periodic = list()
self._reflective = list()
for ii, key in enumerate(self.search_parameter_keys):
if self.priors[key].boundary == 'periodic':
logger.debug("Setting periodic boundary for {}".format(key))
self._periodic.append(ii)
elif self.priors[key].boundary == 'reflective':
logger.debug("Setting reflective boundary for {}".format(key))
self._reflective.append(ii)
# The periodic kwargs passed into dynesty allows the parameters to
# wander out of the bounds, this includes both periodic and reflective.
# these are then handled in the prior_transform
self.kwargs["periodic"] = self._periodic
self.kwargs["reflective"] = self._reflective
def nestcheck_data(self, out_file):
import nestcheck.data_processing
import pickle
ns_run = nestcheck.data_processing.process_dynesty_run(out_file)
nestcheck_result = "{}/{}_nestcheck.pickle".format(self.outdir, self.label)
with open(nestcheck_result, 'wb') as file_nest:
pickle.dump(ns_run, file_nest)
def _setup_pool(self):
if self.kwargs["pool"] is not None:
logger.info("Using user defined pool.")
self.pool = self.kwargs["pool"]
elif self.kwargs["queue_size"] > 1:
logger.info(
"Setting up multiproccesing pool with {} processes.".format(
self.kwargs["queue_size"]
)
)
import multiprocessing
self.pool = multiprocessing.Pool(
processes=self.kwargs["queue_size"],
initializer=_initialize_global_variables,
initargs=(
self.likelihood,
self.priors,
self._search_parameter_keys,
self.use_ratio
)
)
else:
_initialize_global_variables(
likelihood=self.likelihood,
priors=self.priors,
search_parameter_keys=self._search_parameter_keys,
use_ratio=self.use_ratio
)
self.pool = None
self.kwargs["pool"] = self.pool
def _close_pool(self):
if getattr(self, "pool", None) is not None:
logger.info("Starting to close worker pool.")
self.pool.close()
self.pool.join()
self.pool = None
self.kwargs["pool"] = self.pool
logger.info("Finished closing worker pool.")
def run_sampler(self):
import dynesty
import dill
logger.info("Using dynesty version {}".format(dynesty.__version__))
if self.kwargs.get("sample", "rwalk") == "rwalk":
logger.info(
"Using the bilby-implemented rwalk sample method with ACT estimated walks")
dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_bilby
dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_bilby
if self.kwargs.get("walks") > self.kwargs.get("maxmcmc"):
raise DynestySetupError("You have maxmcmc > walks (minimum mcmc)")
if self.kwargs.get("nact", 5) < 1:
raise DynestySetupError("Unable to run with nact < 1")
elif self.kwargs.get("sample") == "rwalk_dynesty":
self._kwargs["sample"] = "rwalk"
logger.info(
"Using the dynesty-implemented rwalk sample method")
elif self.kwargs.get("sample") == "rstagger_dynesty":
self._kwargs["sample"] = "rstagger"
logger.info(
"Using the dynesty-implemented rstagger | |
<reponame>therooler/pennylane
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a function for generating generalized parameter shift rules and
helper methods for processing shift rules as well as for creating tapes with
shifted parameters."""
import functools
import itertools
import warnings
import numpy as np
import pennylane as qml
def process_shifts(rule, tol=1e-10, batch_duplicates=True):
"""Utility function to process gradient rules.
Args:
rule (array): a ``(M, N)`` array corresponding to ``M`` terms
with parameter shifts. ``N`` has to be either ``2`` or ``3``.
The first column corresponds to the linear combination coefficients;
the last column contains the shift values.
If ``N=3``, the middle column contains the multipliers.
tol (float): floating point tolerance used when comparing shifts/coefficients
Terms with coefficients below ``tol`` will be removed.
batch_duplicates (bool): whether to check the input ``rule`` for duplicate
shift values in its second column.
Returns:
array: The processed shift rule with small entries rounded to 0, sorted
with respect to the absolute value of the shifts, and groups of shift
terms with identical (multiplier and) shift fused into one term each,
if ``batch_duplicates=True``.
This utility function accepts coefficients and shift values as well as optionally
multipliers, and performs the following processing:
- Set all small (within absolute tolerance ``tol``) coefficients and shifts to 0
- Remove terms where the coefficients are 0 (including the ones set to 0 in the previous step)
- Terms with the same shift value (and multiplier) are combined into a single term.
- Finally, the terms are sorted according to the absolute value of ``shift``,
This ensures that a zero-shift term, if it exists, is returned first.
"""
# set all small coefficients, multipliers if present, and shifts to zero.
rule[np.abs(rule) < tol] = 0
# remove columns where the coefficients are 0
rule = rule[~(rule[:, 0] == 0)]
if batch_duplicates:
round_decimals = int(-np.log10(tol))
rounded_rule = np.round(rule[:, 1:], round_decimals)
# determine unique shifts or (multiplier, shift) combinations
unique_mods = np.unique(rounded_rule, axis=0)
if rule.shape[0] != unique_mods.shape[0]:
matches = np.all(rounded_rule[:, np.newaxis] == unique_mods[np.newaxis, :], axis=-1)
# TODO: The following line probably can be done in numpy
coeffs = [np.sum(rule[slc, 0]) for slc in matches.T]
rule = np.hstack([np.stack(coeffs)[:, np.newaxis], unique_mods])
# sort columns according to abs(shift)
return rule[np.argsort(np.abs(rule[:, -1]))]
@functools.lru_cache(maxsize=None)
def eigvals_to_frequencies(eigvals):
r"""Convert an eigenvalue spectrum to frequency values, defined
as the the set of positive, unique differences of the eigenvalues in the spectrum.
Args:
eigvals (tuple[int, float]): eigenvalue spectra
Returns:
tuple[int, float]: frequencies
**Example**
>>> eigvals = (-0.5, 0, 0, 0.5)
>>> eigvals_to_frequencies(eigvals)
(0.5, 1.0)
"""
unique_eigvals = sorted(set(eigvals))
return tuple({j - i for i, j in itertools.combinations(unique_eigvals, 2)})
@functools.lru_cache(maxsize=None)
def frequencies_to_period(frequencies, decimals=5):
r"""Returns the period of a Fourier series as defined
by a set of frequencies.
The period is simply :math:`2\pi/gcd(frequencies)`,
where :math:`\text{gcd}` is the greatest common divisor.
Args:
spectra (tuple[int, float]): frequency spectra
decimals (int): Number of decimal places to round to
if there are non-integral frequencies.
Returns:
tuple[int, float]: frequencies
**Example**
>>> frequencies = (0.5, 1.0)
>>> frequencies_to_period(frequencies)
12.566370614359172
"""
try:
gcd = np.gcd.reduce(frequencies)
except TypeError:
# np.gcd only support integer frequencies
exponent = 10**decimals
frequencies = np.round(frequencies, decimals) * exponent
gcd = np.gcd.reduce(np.int64(frequencies)) / exponent
return 2 * np.pi / gcd
@functools.lru_cache(maxsize=None)
def _get_shift_rule(frequencies, shifts=None):
n_freqs = len(frequencies)
frequencies = qml.math.sort(qml.math.stack(frequencies))
freq_min = frequencies[0]
if len(set(frequencies)) != n_freqs or freq_min <= 0:
raise ValueError(
f"Expected frequencies to be a list of unique positive values, instead got {frequencies}."
)
mu = np.arange(1, n_freqs + 1)
if shifts is None: # assume equidistant shifts
shifts = (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min)
equ_shifts = True
else:
shifts = qml.math.sort(qml.math.stack(shifts))
if len(shifts) != n_freqs:
raise ValueError(
f"Expected number of shifts to equal the number of frequencies ({n_freqs}), instead got {shifts}."
)
if len(set(shifts)) != n_freqs:
raise ValueError(f"Shift values must be unique, instead got {shifts}")
equ_shifts = np.allclose(shifts, (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min))
if len(set(np.round(np.diff(frequencies), 10))) <= 1 and equ_shifts: # equidistant case
coeffs = (
freq_min
* (-1) ** (mu - 1)
/ (4 * n_freqs * np.sin(np.pi * (2 * mu - 1) / (4 * n_freqs)) ** 2)
)
else: # non-equidistant case
sin_matrix = -4 * np.sin(np.outer(shifts, frequencies))
det_sin_matrix = np.linalg.det(sin_matrix)
if abs(det_sin_matrix) < 1e-6:
warnings.warn(
f"Solving linear problem with near zero determinant ({det_sin_matrix}) "
"may give unstable results for the parameter shift rules."
)
coeffs = -2 * np.linalg.solve(sin_matrix.T, frequencies)
coeffs = np.concatenate((coeffs, -coeffs))
shifts = np.concatenate((shifts, -shifts)) # pylint: disable=invalid-unary-operand-type
return np.stack([coeffs, shifts]).T
def _iterate_shift_rule_with_multipliers(rule, order, period):
r"""Helper method to repeat a shift rule that includes multipliers multiple
times along the same parameter axis for higher-order derivatives."""
combined_rules = []
for partial_rules in itertools.product(rule, repeat=order):
c, m, s = np.stack(partial_rules).T
cumul_shift = 0.0
for _m, _s in zip(m, s):
cumul_shift *= _m
cumul_shift += _s
if period is not None:
cumul_shift = np.mod(cumul_shift + 0.5 * period, period) - 0.5 * period
combined_rules.append(np.stack([np.prod(c), np.prod(m), cumul_shift]))
# combine all terms in the linear combination into a single
# array, with column order (coefficients, multipliers, shifts)
return qml.math.stack(combined_rules)
def _iterate_shift_rule(rule, order, period=None):
r"""Helper method to repeat a shift rule multiple times along the same
parameter axis for higher-order derivatives."""
if len(rule[0]) == 3:
return _iterate_shift_rule_with_multipliers(rule, order, period)
# TODO: optimization: Without multipliers, the order of shifts does not matter,
# so that we can only iterate over the symmetric part of the combined_rules tensor.
# This requires the corresponding multinomial prefactors to be included in the coeffs.
combined_rules = np.array(list(itertools.product(rule, repeat=order)))
# multiply the coefficients of each rule
coeffs = np.prod(combined_rules[..., 0], axis=1)
# sum the shifts of each rule
shifts = np.sum(combined_rules[..., 1], axis=1)
if period is not None:
# if a period is provided, make sure the shift value is within [-period/2, period/2)
shifts = np.mod(shifts + 0.5 * period, period) - 0.5 * period
return qml.math.stack([coeffs, shifts]).T
def _combine_shift_rules(rules):
r"""Helper method to combine shift rules for multiple parameters into
simultaneous multivariate shift rules."""
combined_rules = []
for partial_rules in itertools.product(*rules):
c, *m, s = np.stack(partial_rules).T
combined = np.concatenate([[np.prod(c)], *m, s])
combined_rules.append(np.stack(combined))
return np.stack(combined_rules)
@functools.lru_cache()
def generate_shift_rule(frequencies, shifts=None, order=1):
r"""Computes the parameter shift rule for a unitary based on its generator's eigenvalue
frequency spectrum.
To compute gradients of circuit parameters in variational quantum algorithms, expressions for
cost function first derivatives with respect to the variational parameters can be cast into
linear combinations of expectation values at shifted parameter values. The coefficients and
shifts defining the linear combination can be obtained from the unitary generator's eigenvalue
frequency spectrum. Details can be found in
`Wierichs et al. (2022) <https://doi.org/10.22331/q-2022-03-30-677>`__.
Args:
frequencies (tuple[int or float]): The tuple of eigenvalue frequencies. Eigenvalue
frequencies are defined as the unique positive differences obtained from a set of
eigenvalues.
shifts (tuple[int or float]): the tuple of shift values. If unspecified,
equidistant shifts are assumed. If supplied, the length of this tuple should match the
number of given frequencies.
order (int): the order of differentiation to compute the shift rule for
Returns:
tuple: a tuple of coefficients and shifts describing the gradient rule for the
parameter-shift method. For parameter :math:`\phi`, the coefficients :math:`c_i` and the
shifts :math:`s_i` combine to give a gradient rule of the following form:
.. math:: \frac{\partial}{\partial\phi}f = \sum_{i} c_i f(\phi + s_i).
where :math:`f(\phi) = \langle 0|U(\phi)^\dagger \hat{O} U(\phi)|0\rangle`
for some observable :math:`\hat{O}` and the unitary :math:`U(\phi)=e^{iH\phi}`.
Raises:
ValueError: if ``frequencies`` is not a list of unique positive values, or if | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Things to know:
* raw subprocess calls like .communicate expects bytes
* the Command wrappers encapsulate the bytes and expose unicode
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import json
import os
import re
import tempfile
import uuid
import mock
import pytest
from builtins import str
from django.test import TestCase
from django_dynamic_fixture import get
from docker.errors import APIError as DockerAPIError
from docker.errors import DockerException
from mock import Mock, PropertyMock, mock_open, patch
from readthedocs.builds.constants import BUILD_STATE_CLONING
from readthedocs.builds.models import Version
from readthedocs.doc_builder.config import load_yaml_config
from readthedocs.doc_builder.environments import (
BuildCommand,
DockerBuildCommand,
DockerBuildEnvironment,
LocalBuildEnvironment,
)
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
from readthedocs.doc_builder.python_environments import Conda, Virtualenv
from readthedocs.projects.models import Project
from readthedocs.rtd_tests.mocks.environment import EnvironmentMockGroup
from readthedocs.rtd_tests.mocks.paths import fake_paths_lookup
from readthedocs.rtd_tests.tests.test_config_integration import create_load
DUMMY_BUILD_ID = 123
SAMPLE_UNICODE = u'HérÉ îß sömê ünïçó∂é'
SAMPLE_UTF8_BYTES = SAMPLE_UNICODE.encode('utf-8')
class TestLocalBuildEnvironment(TestCase):
"""Test execution and exception handling in environment."""
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version, bulk=False)
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_normal_execution(self):
"""Normal build in passing state."""
self.mocks.configure_mock('process', {
'communicate.return_value': (b'This is okay', '')
})
type(self.mocks.process).returncode = PropertyMock(return_value=0)
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test')
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.successful)
self.assertEqual(len(build_env.commands), 1)
self.assertEqual(build_env.commands[0].output, u'This is okay')
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': u'',
'length': mock.ANY,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
'exit_code': 0,
})
def test_command_not_recorded(self):
"""Normal build in passing state with no command recorded."""
self.mocks.configure_mock('process', {
'communicate.return_value': (b'This is okay', '')
})
type(self.mocks.process).returncode = PropertyMock(return_value=0)
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test', record=False)
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.successful)
self.assertEqual(len(build_env.commands), 0)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was not saved
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': '',
'length': mock.ANY,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_record_command_as_success(self):
self.mocks.configure_mock('process', {
'communicate.return_value': (b'This is okay', '')
})
type(self.mocks.process).returncode = PropertyMock(return_value=1)
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test', record_as_success=True)
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.successful)
self.assertEqual(len(build_env.commands), 1)
self.assertEqual(build_env.commands[0].output, u'This is okay')
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': u'',
'length': mock.ANY,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
'exit_code': 0,
})
def test_incremental_state_update_with_no_update(self):
"""Build updates to a non-finished state when update_on_success=True."""
build_envs = [
LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
),
LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
update_on_success=False,
),
]
for build_env in build_envs:
with build_env:
build_env.update_build(BUILD_STATE_CLONING)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'project': self.project.pk,
'setup_error': '',
'length': mock.ANY,
'error': '',
'setup': '',
'output': '',
'state': BUILD_STATE_CLONING,
'builder': mock.ANY,
})
self.assertIsNone(build_env.failure)
# The build failed before executing any command
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
def test_failing_execution(self):
"""Build in failing state."""
self.mocks.configure_mock('process', {
'communicate.return_value': (b'This is not okay', '')
})
type(self.mocks.process).returncode = PropertyMock(return_value=1)
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test')
self.fail('This should be unreachable')
self.assertTrue(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
self.assertEqual(len(build_env.commands), 1)
self.assertEqual(build_env.commands[0].output, u'This is not okay')
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 1,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': u'',
'length': mock.ANY,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
'exit_code': 1,
})
def test_failing_execution_with_caught_exception(self):
"""Build in failing state with BuildEnvironmentError exception."""
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
raise BuildEnvironmentError('Foobar')
self.assertFalse(self.mocks.process.communicate.called)
self.assertEqual(len(build_env.commands), 0)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The build failed before executing any command
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'length': mock.ANY,
'error': 'Foobar',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
'exit_code': 1,
})
def test_failing_execution_with_unexpected_exception(self):
"""Build in failing state with exception from code."""
build_env = LocalBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
raise ValueError('uncaught')
self.assertFalse(self.mocks.process.communicate.called)
self.assertTrue(build_env.done)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The build failed before executing any command
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'length': mock.ANY,
'error': (
'There was a problem with Read the Docs while building your '
'documentation. Please try again later. However, if this '
'problem persists, please report this to us with your '
'build id (123).'
),
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
class TestDockerBuildEnvironment(TestCase):
"""Test docker build environment."""
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version, bulk=False)
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_container_id(self):
"""Test docker build command."""
docker = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
self.assertEqual(docker.container_id, 'build-123-project-6-pip')
def test_environment_successful_build(self):
"""A successful build exits cleanly and reports the build output."""
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
pass
self.assertTrue(build_env.successful)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': '',
'length': 0,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_environment_successful_build_without_update(self):
"""A successful build exits cleanly and doesn't update build."""
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
update_on_success=False,
)
with build_env:
pass
self.assertTrue(build_env.successful)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.assertFalse(self.mocks.mocks['api_v2.build']().put.called)
def test_environment_failed_build_without_update_but_with_error(self):
"""A failed build exits cleanly and doesn't update build."""
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
update_on_success=False,
)
with build_env:
raise BuildEnvironmentError('Test')
self.assertFalse(build_env.successful)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'Test',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_connection_failure(self):
"""Connection failure on to docker socket should raise exception."""
self.mocks.configure_mock('docker', {'side_effect': DockerException})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
def _inner():
with build_env:
self.fail('Should not hit this')
self.assertRaises(BuildEnvironmentError, _inner)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': (
'There was a problem with Read the Docs while building your '
'documentation. Please try again later. However, if this '
'problem persists, please report this to us with your '
'build id (123).'
),
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_api_failure(self):
"""Failing API error response from docker should raise exception."""
response = Mock(status_code=500, reason='Because')
self.mocks.configure_mock(
'docker_client', {
'create_container.side_effect': DockerAPIError(
'Failure creating container', response,
'Failure creating container')
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
def _inner():
with build_env:
self.fail('Should not hit this')
self.assertRaises(BuildEnvironmentError, _inner)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': u'',
'exit_code': 1,
'length': mock.ANY,
'error': 'Build environment creation failed',
'setup': u'',
'output': u'',
'state': u'finished',
'builder': mock.ANY,
})
def test_api_failure_on_docker_memory_limit(self):
"""Docker exec_create raised memory issue on `exec`"""
response = Mock(status_code=500, reason='Internal Server Error')
self.mocks.configure_mock(
'docker_client', {
'exec_create.side_effect': DockerAPIError(
'Failure creating container', response,
'Failure creating container'),
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo test', cwd='/tmp')
self.assertEqual(build_env.commands[0].exit_code, -1)
self.assertEqual(build_env.commands[0].error, None)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': -1,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': u'',
'exit_code': -1,
'length': mock.ANY,
'error': '',
'setup': u'',
'output': u'',
'state': | |
<reponame>maoyab/OWUS<filename>param_sm_pdf.py
import numpy as np
from scipy.interpolate import interp1d
from scipy.stats import ks_2samp, percentileofscore
from sswm import SM_C_H
class Inverse_bayesian_fitting(object):
def __init__(self, s_obs, unknown_params, save_params, p_ranges, epsi=None, stress_type='dynamic', model_type='A', nbr_sim=20000, burnin=1. / 2., nbins=100):
self.nbins = nbins
self.epsi = epsi
self.stress_type = stress_type
self.s_obs = s_obs
self.burnin = burnin
np.random.seed()
self.nbr_sim = nbr_sim
self.model_type = model_type
self.SM_PDF = SM_C_H
self.full_param_name_list = ['T_GS', 'rf_alpha', 'rf_lambda', 'Eo', 'Td',
'LAI', 'RAI', 'hc', 'Zm',
'Ps0', 'b', 'Ks', 'n', 's_h', 's_fc',
'k_xl_max', 'Px50', 'Pg50']
self.unknown_params = unknown_params
self.save_params = save_params
self.p_ranges = p_ranges
def mcmc_mh(self, theta0_dict):
accepted = 0.
(li, theta_i) = self.init_random_model(theta0_dict)
if (li, theta_i) != (np.nan, np.nan):
result = [None] * (np.int(self.nbr_sim * (1 - self.burnin)))
for it in range(self.nbr_sim):
(acc, l_acc, theta_acc) = self.mcmc_mh_criteria(li, theta_i)
if it >= self.nbr_sim * self.burnin:
theta_list = [getattr(theta_acc, vv) for vv in self.save_params]
result[np.int(it - self.nbr_sim * self.burnin)] = [l_acc, theta_list]
accepted = accepted + acc
li, theta_i = l_acc, theta_acc
return result, accepted / (self.nbr_sim * (1 - self.burnin)) * 100.
else:
return [np.nan], 'max init random'
def init_random_model(self, params0_dict, maxcount=1000):
params = self.make_random_model(params0_dict)
while_count = 0
while self.test_model_consistency(params) < 1 and while_count < maxcount:
params = self.make_random_model(params0_dict)
while_count = while_count + 1
if while_count < maxcount:
if self.epsi is None:
smpdf = self.SM_PDF(params, nbins=self.nbins, stress_type=self.stress_type)
l = self.eval_loglikelihood(smpdf.p0, self.s_obs, params)
else:
smpdf = self.SM_PDF(params, nbins=self.nbins, q=self.epsi, stress_type=self.stress_type)
l = smpdf.epsilon
if l == 0 or np.isnan(l):
l = -np.inf
while_count = 0
while l == - np.inf and while_count < maxcount:
while_count = while_count + 1
params = self.make_random_model(params0_dict)
while self.test_model_consistency(params) < 1:
params = self.make_random_model(params0_dict)
if self.epsi is None:
smpdf = self.SM_PDF(params, nbins=self.nbins, stress_type=self.stress_type)
l = self.eval_loglikelihood(smpdf.p0, self.s_obs, params)
else:
smpdf = self.SM_PDF(params, nbins=self.nbins, q=self.epsi, stress_type=self.stress_type)
l = smpdf.epsilon
if while_count < maxcount:
return l, smpdf
else:
print('x', params)
return np.nan, np.nan
else:
print('x', params)
return np.nan, np.nan
def test_model_consistency(self, params):
[T_GS, rf_alpha, rf_lambda, Eo, Td,
LAI, RAI, hc, Zm,
Ps0, b, Ks, n, s_h, s_fc,
k_xl_max, Px50, Pg50,
] = params
params_ = [T_GS, rf_alpha, rf_lambda, Eo, Td,
LAI, RAI, hc, Zm,
-Ps0, b, Ks, n, s_h, s_fc,
k_xl_max, -Px50, -Pg50]
pi_R = Pg50 / Px50
K_p_max = k_xl_max * LAI / hc * Td / 1000
pi_F = -Eo / (K_p_max * Pg50)
x = (pi_F / 2 + 1) ** 2 - 2 * pi_F * pi_R
beta_ww = 1 - 1 / (2 * pi_R) * (1 + pi_F / 2 - x ** 0.5)
lnan = len([k for k in params if np.isnan(k)])
lneg = len([k for k in params_ if k < 0])
if Px50 > self.p_ranges['Px50'][1] or \
Px50 < self.p_ranges['Px50'][0] or \
Pg50 > self.p_ranges['Pg50'][1] or \
Pg50 < self.p_ranges['Pg50'][0] or \
k_xl_max > self.p_ranges['k_xl_max'][1] or \
k_xl_max < self.p_ranges['k_xl_max'][0] or \
RAI > self.p_ranges['RAI'][1] or \
RAI < self.p_ranges['RAI'][0] or \
beta_ww > self.p_ranges['beta_ww'][1] or \
beta_ww < self.p_ranges['beta_ww'][0] or \
pi_R > self.p_ranges['pi_R'][1] or \
pi_R < self.p_ranges['pi_R'][0] or \
pi_F > self.p_ranges['pi_F'][1] or \
pi_F < self.p_ranges['pi_F'][0] or \
pi_R > self.p_ranges['pi_R'][1] or \
pi_R < self.p_ranges['pi_R'][0] or \
lneg > 0 or \
lnan > 0:
test = 0
else:
test = 1
return test
def eval_logps(self, p0, s_eval, params):
def __ev(s):
p = p0[np.int(np.rint(s * l))]
return np.log(p)
l = (len(p0) - 1)
if s_eval != []:
return [__ev(s) for s in s_eval]
else:
return [-np.inf]
def eval_loglikelihood(self, p0, s_eval, params):
p = self.eval_logps(p0, s_eval, params)
return np.sum(p)
def mcmc_mh_criteria(self, li, theta_i):
lii, theta_ii = self.eval_mh_model(theta_i)
if theta_ii == []:
return [0, li, theta_i]
elif lii > li:
return [1, lii, theta_ii]
elif np.random.uniform(0.0, 1.0) < np.exp(lii - li):
return [1, lii, theta_ii]
else:
return [0, li, theta_i]
def eval_mh_model(self, theta0):
if self.epsi is None:
w = 0.02
else:
w = 0.2
params = self.make_mh_model(theta0, w=w)
if self.test_model_consistency(params) == 1:
if self.epsi is None:
smpdf = self.SM_PDF(params, nbins=self.nbins, stress_type=self.stress_type)
if (smpdf.et_ok == 1) and (np.isnan(smpdf.et_ok) == 0):
l = self.eval_loglikelihood(smpdf.p0, self.s_obs, params)
else:
l = np.nan
else:
smpdf = self.SM_PDF(params, nbins=self.nbins, q=self.epsi, stress_type=self.stress_type)
if (smpdf.et_ok == 1) and (np.isnan(smpdf.et_ok) == 0):
l = smpdf.epsilon
else:
l = np.nan
if np.isnan(l) or l == 0:
l = -np.inf
smpdf = []
else:
l = -np.inf
smpdf = []
return l, smpdf
def make_mh_model(self, theta0, w=0.02):
params = []
for vi in self.full_param_name_list:
if vi in self.unknown_params:
params.append(np.random.normal(getattr(theta0, vi), w * (self.p_ranges[vi][1] - self.p_ranges[vi][0])))
else:
params.append(getattr(theta0, vi))
return params
def make_random_model(self, params0):
params = []
for vi in self.full_param_name_list:
if vi in self.unknown_params:
params.append(np.random.uniform(self.p_ranges[vi][0], self.p_ranges[vi][1]))
else:
params.append(params0[vi])
return params
class Processor(object):
def __init__(self, model_params_estimate, save_params, epsi=None, model_type='A', nbins=100, stress_type='dynamic'):
self.model_type = model_type
self.nbins = nbins
self.epsi = epsi
self.model_params_estimate = model_params_estimate
self.save_params = save_params
self.stress_type = stress_type
self.full_param_name_list = ['T_GS', 'rf_alpha', 'rf_lambda', 'Eo', 'Td',
'LAI', 'RAI', 'hc', 'Zm',
'Ps0', 'b', 'Ks', 'n', 's_h', 's_fc',
'k_xl_max', 'Px50', 'Pg50']
def get_mcmc_mh_results(self, s_obs, params_dict0, p_ranges,
nbr_sim=20000, num_pl=3, burnin=0.5,
efficiency_lim=[0.05, 90]):
max_num_pl = 3 * num_pl
pl_results = []
fail_conv_count = 0
fail_eff_count = 0
it_count = 0
while (len(pl_results) < num_pl) and (it_count < max_num_pl):
it_count = it_count + 1
bf = Inverse_bayesian_fitting(s_obs,
self.model_params_estimate, self.save_params,
p_ranges,
epsi=self.epsi,
stress_type=self.stress_type,
nbr_sim=nbr_sim,
burnin=burnin,
model_type=self.model_type,
nbins=self.nbins)
x = bf.mcmc_mh(params_dict0)
if x[1] != 'max init random':
result, efficiency = x
if (efficiency >= efficiency_lim[0]) and (efficiency < efficiency_lim[1]):
pl_results.append(x)
else:
fail_eff_count = fail_eff_count + 1
else:
print('max init random')
print('it', it_count, len(pl_results), efficiency, fail_eff_count, num_pl)
return pl_results, it_count, fail_conv_count, fail_eff_count
def check_int_convergeance(self, pl_results0, it, gr_th=1.1, max_num_pl=10):
pl_results, efficiency = zip(*pl_results0)
loglikelihood = [list(zip(*r))[0] for r in pl_results]
estimated_params = [zip(*list(zip(*r))[1]) for r in pl_results]
estimated_params = zip(*estimated_params)
gr_lk = self.gelman_rubin_diagnostic([x for x in loglikelihood])
lk_mean = [np.mean(x) for x in loglikelihood]
gr_list = []
for p, est_ in zip(self.model_params_estimate, estimated_params):
gr = self.gelman_rubin_diagnostic([x for x in est_])
gr_list.append(gr)
pl_results_r = pl_results0
it_conv = 0
return pl_results_r, it_conv
def gelman_rubin_diagnostic(self, results):
k = np.float(len(results))
n = np.float(len(results[0]))
means = [np.mean(r) for r in results]
all_mean = np.mean(means)
b = n / (k - 1) * \
np.sum([(mi - all_mean) ** 2 for mi in means])
w = 1. / (k * (n-1)) * \
np.sum([(ri - mi) ** 2 for (result, mi) in zip(results, means) for ri in result])
return ((w * (n - 1) / n + b / n) / w) ** 0.5
def process_raw_results(self, result_dict, pl_results, p_ranges, outfile_format='short'):
def __nse(obs, mod):
mo = np.mean(obs)
a = np.sum([(mi - oi) ** 2 for mi, oi in zip(mod, obs)])
b = np.sum([(oi - mo) ** 2 for oi in obs])
return 1 - a / b
SM_PDF = SM_C_H
pl_results, efficiency = zip(*pl_results)
result_dict['efficiency_estimates'] = efficiency
result_dict['efficiency'] = np.nanmean(efficiency)
loglikelihood = [list(zip(*r))[0] for r in pl_results]
estimated_params = [zip(*list(zip(*r))[1]) for r in pl_results]
estimated_params = zip(*estimated_params)
result_dict['loglikelihood_estimates'] = loglikelihood
result_dict['loglikelihood'] = np.median([np.median(llk) for llk in loglikelihood])
gr = self.gelman_rubin_diagnostic([x for x in loglikelihood])
result_dict['loglikelihood_grd'] = gr
for p, est_ in zip(self.save_params, estimated_params):
gr = self.gelman_rubin_diagnostic([x for x in est_])
result_dict['%s_grd' % p] = gr
eflat = np.array(est_).flatten()
lflat = np.array(loglikelihood).flatten()
e_maxl = eflat[list(lflat).index(np.nanmax(lflat))]
result_dict['%s_maxmaxlike' % p] = e_maxl
result_dict['%s_median' % p] = np.median(eflat)
result_dict['%s_mean' % p] = np.mean(eflat)
result_dict['%s_std' % p] = np.std(eflat)
result_dict['%s_estimates' % p] = est_
if self.epsi is None:
result_dict['%s' % p] = np.median(eflat)
else:
result_dict['%s' % p] = e_maxl
theta = [result_dict[vi] for vi in self.full_param_name_list]
smpdf = SM_PDF(theta, nbins=self.nbins, stress_type=self.stress_type)
p_fitted_norm = smpdf.p0
cdf = np.cumsum(p_fitted_norm)
cdf_m_n = cdf / np.max(cdf)
f = interp1d(cdf, np.linspace(0, 1, len(p_fitted_norm)))
random_p = [np.random.uniform(0, 1) for r in range(365)]
fit_s = np.array(f(random_p))
(kstat, kstatp) = ks_2samp(result_dict['s_obs'], fit_s)
q_obs = [percentileofscore(result_dict['s_obs'], s_obs_i, 'weak') / 100. for s_obs_i in result_dict['s_obs']]
s_mod = [(np.abs(cdf_m_n - qi)).argmin() / np.float(len(p_fitted_norm) - 1) for qi in q_obs]
result_dict['NSE_O'] = __nse(result_dict['s_obs'], s_mod)
s_mod_2 = [(np.abs(cdf_m_n - qi | |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 13:37:29 2018
Class for implementing the scores for the composition UI and also the display image
with all the scores@author: <NAME>
"""
import cv2
import numpy as np
import itertools
from scipy.spatial import distance as dist
from skimage.measure import compare_ssim as ssim
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import pandas as pd
from SaliencyMap import Saliency
class AutoScoreML ():
def __init__(self, extractedFeatures ):
self.df = pd.DataFrame(np.array(extractedFeatures))
def autoScoreML(self):
filepath_01 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoringApril30.csv'
filepath_02 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoringApril30_B.csv'
# =============================================================================
# filepath_03 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_c.csv'
# filepath_04 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_d.csv'
# filepath_05 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring20apr2018_e.csv'
# filepath_06 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_a.csv'
# filepath_07 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_b.csv'
# filepath_08 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring21apr2018_c.csv'
# filepath_09 = 'D:\\google drive\A PhD Project at Godlsmiths\ArtistSupervisionMaya\csv_file\scoring22apr2018_a.csv'
#
# =============================================================================
df_01 = pd.read_csv(filepath_01)
df_02 = pd.read_csv(filepath_02)
# =============================================================================
# df_03 = pd.read_csv(filepath_03)
# df_04 = pd.read_csv(filepath_04)
# df_05 = pd.read_csv(filepath_05)
# df_06 = pd.read_csv(filepath_06)
# df_07 = pd.read_csv(filepath_07)
# df_08 = pd.read_csv(filepath_08)
# df_09 = pd.read_csv(filepath_09)
# =============================================================================
frames= [df_01, df_02
#,df_03, df_04, df_05, df_06, df_07, df_08, df_09
]
df = pd.concat(frames)
df.reset_index(drop = True, inplace = True)
# drop the Null Value
df.dropna(inplace=True)
# select the features to use:
df.drop(['file', 'CompositionUserChoice'], axis=1, inplace=True)
X_train = df.drop('judge', axis = 1)
#y = df['judge']
X_test = self.df
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# construct the ANN
# import the Keras Library and the required packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import os
# load json and create model
json_file = open("D:\\google drive\\A PhD Project at Godlsmiths\\ArtistSupervisionProject\\code\\classifier.json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("D:\\google drive\\A PhD Project at Godlsmiths\\ArtistSupervisionProject\\code\\classifier.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# =============================================================================
# score = loaded_model.evaluate(X_test, y_test, verbose=0)
# print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
# =============================================================================
# predict the test set results
# =============================================================================
y_pred = loaded_model.predict(X_test)
for y in y_pred:
res = np.argmax(y)
return res
class CompositionAnalysis ():
def __init__ (self, image = None, imagepath = None, mask = None):
if imagepath:
self.image = cv2.imread(imagepath)
self.imagepath = imagepath
else:
self.image = image
self.totalPixels = self.image.size
self.gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
# =============================================================================
#
# def _borderCut(self, borderCutted):
#
#
# borderCutted[0:2, :] = 0
# borderCutted[-2:self.image.shape[0], :] = 0
# borderCutted[:, 0:2] = 0
# borderCutted[:, -2:self.image.shape[1]] = 0
#
# return borderCutted
# =============================================================================
def synthesisScores (self):
# return the display image for the UI
rows, cols, depth = self.image.shape
scoreSynthesisImg = np.zeros(self.image.shape, dtype="uint8")
# make solid color for the background
scoreSynthesisImg[:] = (218,218,218)
cv2.line(scoreSynthesisImg, ( int(self.image.shape[1] * 0.6), 20), ( int(self.image.shape[1] * 0.6),self.image.shape[0]), (50,50,140), 1)
cv2.line(scoreSynthesisImg, ( int(self.image.shape[1] * 0.75), 20), ( int(self.image.shape[1] * 0.75),self.image.shape[0]), (60,140,90), 1)
# collect the balance scores:
VisualBalanceScore = ( self.scoreVisualBalance + self.scoreHullBalance ) / 2
# corner balance and line
lineandcornerBalance = (self.cornersBalance + self.verticalandHorizBalanceMean ) / 2
# collect the rythm scores:
#asymmetry = (self.scoreFourier + self.verticalandHorizBalanceMean + self.ssimAsymmetry) / 3
asymmetry = (self.ssimAsymmetry +self.diagonalAsymmetry) / 2
scoreFourier = self.scoreFourier
# collect the gold proportion scores:
goldScore = self.scoreProportionAreaVsGoldenRatio
#score composition
scoreCompMax = max(self.diagonalasymmetryBalance, self.ScoreFourTriangleAdapted,self.ScoreBigTriangle)
ruleOfThird = self.ScoreRuleOfThird
# diagonal balance commposition
#diagonalasymmetryBalance = self.diagonalasymmetryBalance
# spiral
spiralScore = self.scoreSpiralGoldenRatio
# fractal
fractalScoreFromTarget = self.fractalScoreFromTarget
cv2.putText(scoreSynthesisImg, "Balance", (20, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[20:24, 10:int(VisualBalanceScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Rule of Third", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[35:39, 10:int(ruleOfThird*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Composition Max", (20, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[50:54, 10:int(scoreCompMax*cols*0.9)] = (120,60,120)
#cv2.putText(scoreSynthesisImg, "Diagonal Comp", (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
#scoreSynthesisImg[65:70, 10:int(diagonalasymmetryBalance*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Spiral ", (20, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[80:84, 10:int(spiralScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Asymmetry ", (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[95:99, 10:int(asymmetry*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Fourier ", (20, 105), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[110:114, 10:int(scoreFourier*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "CornerLinesBalance ", (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[125:129, 10:int(lineandcornerBalance*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Proportion ", (20, 135), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[140:144, 10:int(goldScore*cols*0.9)] = (120,60,120)
cv2.putText(scoreSynthesisImg, "Fractal ", (20, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
scoreSynthesisImg[155:159, 10:int(fractalScoreFromTarget*cols*0.9)] = (120,60,120)
#cv2.putText(scoreSynthesisImg, "Balance, asymmetry, Proportion, corner, spiral ", (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
#cv2.putText(scoreSynthesisImg, "Possible Comp: {} ".format(selectedComp), (10, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (50, 50, 50), 1)
return scoreSynthesisImg
def fourierOnEdgesDisplay (self):
ImgImpRegionA, contours, keypoints = self._orbSegmentation ( maxKeypoints = 10000, edged = False, edgesdilateOpen = True, method = cv2.RETR_EXTERNAL)
cropped_img_lf = ImgImpRegionA[0:int(ImgImpRegionA.shape[0]), 0: int(ImgImpRegionA.shape[1] / 2) ]
cropped_img_rt = ImgImpRegionA[0:int(ImgImpRegionA.shape[0]), int(ImgImpRegionA.shape[1] / 2): ImgImpRegionA.shape[1] ]
#imgDftGray = self._returnDFT(ImgImpRegionA)
imgDftGraylf = self._returnDFT(cropped_img_lf)
imgDftGrayRt = self._returnDFT(cropped_img_rt)
# number of pixels in left and number of pixels in right
numberOfWhite_lf = (imgDftGraylf>0).sum()
numberOfWhite_Rt = (imgDftGrayRt > 0).sum()
# create the stiched picture
stichedDft = self.image.copy()
stichedDft = np.concatenate((imgDftGraylf,imgDftGrayRt ), axis = 1)
score = (abs(numberOfWhite_lf - numberOfWhite_Rt)) / (numberOfWhite_lf + numberOfWhite_Rt)
# to penalise the change in rithm
scoreFourier = np.exp(-score * self.image.shape[0]/2)
#cv2.putText(stichedDft, "diff: {:.3f}".format(score), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1)
self.scoreFourier = scoreFourier
return stichedDft, scoreFourier
def _returnDFT (self, imageForDft):
ImgImpRegionA = imageForDft
ImgImpRegionA = cv2.cvtColor(ImgImpRegionA, cv2.COLOR_BGR2GRAY)
#dft = cv2.dft(np.float32(self.gray),flags = cv2.DFT_COMPLEX_OUTPUT)
dft = cv2.dft(np.float32(ImgImpRegionA),flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
cv2.normalize( magnitude_spectrum, magnitude_spectrum, alpha = 0 , beta = 1 , norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
imgDftGray = np.array(magnitude_spectrum * 255, dtype = np.uint8)
meanThres = np.mean(imgDftGray)
_, imgDftGray = cv2.threshold(imgDftGray,meanThres, 255, cv2.THRESH_BINARY)
imgDftGray = cv2.cvtColor(imgDftGray, cv2.COLOR_GRAY2BGR)
return imgDftGray
def HOGcompute (self):
gray = self.gray.copy()
# h x w in pixels
cell_size = (8, 8)
# h x w in cells
block_size = (2, 2)
# number of orientation bins
nbins = 9
# Using OpenCV's HOG Descriptor
# winSize is the size of the image cropped to a multiple of the cell size
hog = cv2.HOGDescriptor(_winSize=(gray.shape[1] // cell_size[1] * cell_size[1],
gray.shape[0] // cell_size[0] * cell_size[0]),
_blockSize=(block_size[1] * cell_size[1],
block_size[0] * cell_size[0]),
_blockStride=(cell_size[1], cell_size[0]),
_cellSize=(cell_size[1], cell_size[0]),
_nbins=nbins)
# Create numpy array shape which we use to create hog_feats
n_cells = (gray.shape[0] // cell_size[0], gray.shape[1] // cell_size[1])
# We index blocks by rows first.
# hog_feats now contains the gradient amplitudes for each direction,
# for each cell of its group for each group. Indexing is by rows then columns.
hog_feats = hog.compute(gray).reshape(n_cells[1] - block_size[1] + 1,
n_cells[0] - block_size[0] + 1,
block_size[0], block_size[1], nbins).transpose((1, 0, 2, 3, 4))
# Create our gradients array with nbin dimensions to store gradient orientations
gradients = np.zeros((n_cells[0], n_cells[1], nbins))
# Create array of dimensions
cell_count = np.full((n_cells[0], n_cells[1], 1), 0, dtype=int)
# Block Normalization
for off_y in range(block_size[0]):
for off_x in range(block_size[1]):
gradients[off_y:n_cells[0] - block_size[0] + off_y + 1,
off_x:n_cells[1] - block_size[1] + off_x + 1] += \
hog_feats[:, :, off_y, off_x, :]
cell_count[off_y:n_cells[0] - block_size[0] + off_y + 1,
off_x:n_cells[1] - block_size[1] + off_x + 1] += 1
# Average gradients
gradients /= cell_count
# =============================================================================
# # Plot HOGs using Matplotlib
# # angle is 360 / nbins * direction
# print (gradients.shape)
#
# color_bins = 5
# plt.pcolor(gradients[:, :, color_bins])
# plt.gca().invert_yaxis()
# plt.gca().set_aspect('equal', adjustable='box')
# plt.colorbar()
# plt.show()
# cv2.destroyAllWindows()
# =============================================================================
return
def goldenProportionOnCnts(self, numberOfCnts = 25, method = cv2.RETR_CCOMP, minArea = 2):
edgedForProp = self._edgeDetection( scalarFactor = 1, meanShift = 0, edgesdilateOpen = True, kernel = 3)
goldenPropImg = self.image.copy()
# create the contours from the segmented image
| |
------
InvalidOutcome
If `outcome` does not exist in the sample space.
"""
if not self.has_outcome(outcome, null=True):
raise InvalidOutcome(outcome)
idx = self._outcomes_index.get(outcome, None)
if idx is None:
p = self.ops.zero
else:
p = self.pmf[idx]
return p
def __setitem__(self, outcome, value):
"""
Sets the probability associated with `outcome`.
Parameters
----------
outcome : outcome
Any hashable and equality comparable object in the sample space.
If `outcome` does not exist in the sample space, then an
InvalidOutcome exception is raised.
value : float
The probability or log probability of the outcome.
Returns
-------
p : float
The probability (or log probability) of the outcome.
Raises
------
InvalidOutcome
If `outcome` does not exist in the sample space.
Notes
-----
Setting the value of the outcome never deletes the outcome, even if the
value is equal to the null probabilty. After a setting operation,
the outcome will always exist in `outcomes` and `pmf`.
Setting a new outcome in a sparse distribution is costly. It is better
to know the non-null outcomes before creating the distribution.
"""
if not self.has_outcome(outcome, null=True):
raise InvalidOutcome(outcome)
idx = self._outcomes_index.get(outcome, None)
new_outcome = idx is None
if not new_outcome:
# If the distribution is dense, we will be here.
# We *could* delete if the value was zero, but we will make
# setting always set, and deleting always deleting (when sparse).
self.pmf[idx] = value
else:
# A new outcome in a sparse distribution.
# We add the outcome and its value, regardless if the value is zero.
# 1. Add the new outcome and probability
self.outcomes = self.outcomes + (outcome,)
self._outcomes_index[outcome] = len(self.outcomes) - 1
pmf = [p for p in self.pmf] + [value]
# 2. Reorder
outcomes, pmf, index = reorder(self.outcomes, pmf,
self._sample_space,
index=self._outcomes_index)
# 3. Store
self.outcomes = tuple(outcomes)
self._outcomes_index = index
self.pmf = np.array(pmf, dtype=float)
def copy(self, base=None):
"""
Returns a (deep) copy of the distribution.
Parameters
----------
base : 'linear', 'e', or float
Optionally, copy and change the base of the copied distribution.
If `None`, then the copy will keep the same base.
"""
# For some reason, we can't just return a deepcopy of self.
# It works for linear distributions but not for log distributions.
from copy import deepcopy
# Make an exact copy of the PRNG.
prng = np.random.RandomState()
prng.set_state(self.prng.get_state())
d = _make_distribution(outcomes=deepcopy(self.outcomes),
pmf=np.array(self.pmf, copy=True),
sample_space=deepcopy(self._sample_space),
base=self.ops.base,
prng=prng,
sparse=self._meta['is_sparse'])
if base is not None:
d.set_base(base)
return d
def sample_space(self):
"""
Returns an iterator over the ordered outcome space.
"""
return iter(self._sample_space)
def has_outcome(self, outcome, null=True):
"""
Returns `True` if `outcome` exists in the sample space.
Parameters
----------
outcome : outcome
The outcome to be tested.
null : bool
Specifies if null outcomes are acceptable. If `True`, then null
outcomes are acceptable. Thus, the only requirement on `outcome`
is that it exist in the distribution's sample space. If `False`,
then null outcomes are not acceptable. Thus, `outcome` must exist
in the distribution's sample space and also have a nonnull
probability in order to return `True`.
Notes
-----
This is an O(1) operation.
"""
if null:
# Make sure the outcome exists in the sample space, which equals
# the alphabet for distributions.
z = outcome in self._sample_space
else:
# Must be valid and have positive probability.
try:
z = self[outcome] > self.ops.zero
except InvalidOutcome:
z = False
return z
def is_approx_equal(self, other, rtol=None, atol=None):
"""
Returns `True` is `other` is approximately equal to this distribution.
For two distributions to be equal, they must have the same sample space
and must also agree on the probabilities of each outcome.
Parameters
----------
other : distribution
The distribution to compare against.
rtol : float
The relative tolerance to use when comparing probabilities.
See :func:`dit.math.close` for more information.
atol : float
The absolute tolerance to use when comparing probabilities.
See :func:`dit.math.close` for more information.
Notes
-----
The distributions need not have the length, but they must have the
same base.
"""
if rtol is None:
rtol = ditParams['rtol']
if atol is None:
atol = ditParams['atol']
# We assume the distributions are properly normalized.
# Potentially nonzero probabilities from self must equal those from
# others. No need to check the other way around since we will verify
# that the sample spaces are equal.
for outcome in self.outcomes:
if not close(self[outcome], other[outcome], rtol=rtol, atol=atol):
return False
# Outcome spaces must be equal.
if self._sample_space != other._sample_space:
return False
return True
def is_dense(self):
"""
Returns `True` if the distribution is dense and `False` otherwise.
"""
return not self.is_sparse()
def is_sparse(self):
"""
Returns `True` if the distribution is sparse and `False` otherwise.
"""
return self._meta['is_sparse']
def normalize(self):
"""
Normalize the distribution, in-place.
Returns
-------
z : float
The previous normalization constant. This will be negative if
the distribution represents log probabilities.
"""
ops = self.ops
pmf = self.pmf
z = ops.add_reduce(pmf)
ops.mult_inplace(pmf, ops.invert(z))
return z
def set_base(self, base):
"""
Changes the base of the distribution.
If the distribution is a linear distribution and the base is changed,
the the distribution will be subsequently represent log distributions.
If the distribution is a log distribution, then it can be converted to
another base by passing in some other base. Alternatively, one can
convert to a linear distribution by passing 'linear'.
Generally, it is dangerous to change the base in-place, as numerical
errors can be introduced (especially when converting from very negative
log probabilities to linear probabilities). Additionally, functions or
classes that hold references to the distribution may not expect a
change in base. For this reason, one should prefer to use self.copy()
along with the `base` parameter.
Parameters
----------
base : float or string
The desired base for the distribution. If 'linear', then the
distribution's pmf will represent linear probabilities. If any
positive float (other than 1) or 'e', then the pmf will represent
log probabilities with the specified base.
See Also
--------
copy
"""
from .math import LinearOperations, LogOperations
from .params import validate_base
# Sanitize inputs
base = validate_base(base)
# Determine the conversion targets.
from_log = self.is_log()
if base == 'linear':
to_log = False
new_ops = LinearOperations()
else:
to_log = True
new_ops = LogOperations(base)
# If self.ops is None, then we are initializing the distribution.
# self.pmf will be set by the __init__ function.
if self.ops is not None:
# Then we are converting.
old_ops = self.ops
# In order to do conversions, we need a numerical value for base.
old_base = old_ops.get_base(numerical=True)
# Caution: The in-place multiplication ( *= ) below will work only
# if pmf has a float dtype. If not (e.g., dtype=int), then the
# multiplication gives incorrect results due to coercion. The
# __init__ function is responsible for guaranteeing the dtype.
# So we proceed assuming that in-place multiplication works for us.
if from_log and to_log:
# Convert from one log base to another.
## log_b(x) = log_b(a) * log_a(x)
self.pmf *= new_ops.log(old_base)
elif not from_log and not to_log:
# No conversion: from linear to linear.
pass
elif from_log and not to_log:
# Convert from log to linear.
## x = b**log_b(x)
self.pmf = old_base**self.pmf
else:
# Convert from linear to log.
## x = log_b(x)
self.pmf = new_ops.log(self.pmf)
self.ops = new_ops
def make_dense(self):
"""
Make pmf contain all outcomes in the sample space.
This does not change the sample space.
Returns
-------
n : int
The number of null outcomes added.
"""
L = len(self)
# Recall, __getitem__ is a view to the dense distribution.
outcomes = tuple(self.sample_space())
pmf = [self[o] for o in outcomes]
self.pmf = np.array(pmf, dtype=float)
self.outcomes = outcomes
self._outcomes_index = dict(zip(outcomes, range(len(outcomes))))
self._meta['is_sparse'] = False
n = len(self) - L
return n
def make_sparse(self, trim=True):
"""
Allow the pmf to omit null outcomes.
This does not change the sample space.
Parameters
----------
trim : bool
If `True`, then remove all null | |
<gh_stars>0
import re
import os
import json
from IxNetRestApi import IxNetRestApiException
from ixnetwork_restpy.files import Files
import datetime
class FileMgmt(object):
def __init__(self, ixnObj=None):
"""
Description
Initialize default attributes.
Parameter
ixnObj: (Object): The parent object.
"""
self.ixnObj = ixnObj
self.ixNetwork = ixnObj.ixNetwork
def setMainObject(self, mainObject):
"""
Description
For Robot support only. Setting the parent object.
Parameter
mainObject: (Object): The parent object.
"""
self.ixnObj = mainObject
def loadConfigFile(self, configFile, localFile=True):
"""
Description
Load a saved config file.
Parameters
configFile: (str): The full path including the saved config filename.
If the config file is in a Windows filesystem, the format is
c:\\path\\bgp.ixncfg
If you are executing the script from Linux and the config file is in
local Linux filesystem, the format is /path/bgp.ixncfg and
localFile=True.
localFile: (bool): For Windows API server and Connection Mgr running on a Windows
server only. Set to False if the config file is in the Windows API server filesystem.
"""
self.ixnObj.logInfo("Loading Config File {}".format(configFile))
try:
self.ixNetwork.LoadConfig(Files(configFile, local_file=localFile))
except Exception as err:
self.ixnObj.logInfo("Error with Load config {}".format(err))
raise Exception("Failed to load config file {} ".format(configFile))
def copyFileWindowsToRemoteWindows(self, windowsPathAndFileName, localPath,
renameDestinationFile=None, includeTimestamp=False):
"""
Description
Copy files from the IxNetwork API Server c: drive to local Linux filesystem.
The filename to be copied will remain the same filename unless you set
renameDestinationFile to something you otherwise preferred. You could also include a
timestamp for the destination file.
Parameters
windowsPathAndFileName: (str): The full path and filename to retrieve from Windows API
server.
localPath: (str): The remote Windows destination path to put the file to.
renameDestinationFile: (str): You could rename the destination file.
includeTimestamp: (bool): If False, each time you copy the same file will be
overwritten.
"""
self.ixnObj.logInfo('\n copyFileWindowsToRemoteWindows: From: %s to %s\n' %
(windowsPathAndFileName, localPath))
fileName = windowsPathAndFileName.split('\\')[-1]
fileName = fileName.replace(' ', '_')
if renameDestinationFile:
fileName = renameDestinationFile
if includeTimestamp:
fileName = self._addTimestampToFile(fileName)
destinationPath = localPath + '\\' + fileName
try:
self.ixNetwork.CopyFile(windowsPathAndFileName, destinationPath)
except Exception as err:
self.ixnObj.logInfo("Error with file transfer {}".format(err))
raise Exception("Copy File from {} to {} Failed".format(windowsPathAndFileName,
destinationPath))
def copyFileWindowsToLocalLinux(self, windowsPathAndFileName, localPath,
renameDestinationFile=None, includeTimestamp=False):
"""
Description
Copy files from the IxNetwork API Server c: drive to local Linux filesystem.
The filename to be copied will remain the same filename unless you set
renameDestinationFile to something you otherwise preferred. You could also include a
timestamp for the destination file.
Parameters
windowsPathAndFileName: (str): The full path and filename to retrieve from Windows
client.
localPath: (str): The Linux destination path to put the file to.
renameDestinationFile: (str): You could rename the destination file.
includeTimestamp: (bool): If False, each time you copy the same file will be
overwritten.
"""
self.ixnObj.logInfo('\n copyFileWindowsToLocalLinux: From: %s to %s\n' %
(windowsPathAndFileName, localPath))
fileName = windowsPathAndFileName.split('\\')[-1]
fileName = fileName.replace(' ', '_')
if renameDestinationFile:
fileName = renameDestinationFile
if includeTimestamp:
fileName = self._addTimestampToFile(fileName)
destinationPath = localPath + '/' + fileName
try:
self.ixNetwork.CopyFile(windowsPathAndFileName, destinationPath)
except Exception as err:
self.ixnObj.logInfo("Error with file transfer {}".format(err))
raise Exception("\n copyFileWindowsToLocalLinux Error: Failed to download file from "
"IxNetwork API Server ")
def copyFileWindowsToLocalWindows(self, windowsPathAndFileName, localPath,
renameDestinationFile=None, includeTimestamp=False):
"""
Description
Copy files from the Windows IxNetwork API Server to a local c: drive destination.
The filename to be copied will remain the same filename unless you set
renameDestinationFile to something you otherwise preferred. You could include a
timestamp for the destination file.
Parameters
windowsPathAndFileName: (str): The full path and filename to retrieve from Windows
client.
localPath: (str): The Windows local filesystem. Ex: C:\\Results.
renameDestinationFile: (str): You could name the destination file.
includeTimestamp: (bool): If False, each time you copy the same file will be
overwritten.
Example:
WindowsPathAndFileName = 'C:\\Users\\hgee\\AppData\\Local\\Ixia\\IxNetwork\\data\\result
\\DP.Rfc2544Tput\\9e1a1f04-fca5-42a8-b3f3-74e5d165e68c\\Run0001\\TestReport.pdf'
localPath = 'C:\\Results'
"""
self.ixnObj.logInfo('\n copyFileWindowsToLocalWindows: From: %s to %s\n\n' %
(windowsPathAndFileName, localPath))
fileName = windowsPathAndFileName.split('\\')[-1]
fileName = fileName.replace(' ', '_')
if renameDestinationFile:
fileName = renameDestinationFile
if includeTimestamp:
fileName = self._addTimestampToFile(fileName)
destinationPath = localPath + '\\' + fileName
self.ixnObj.logInfo('Copying from {} -> {}'.format(windowsPathAndFileName, destinationPath))
self.ixNetwork.CopyFile(windowsPathAndFileName, destinationPath)
def _addTimestampToFile(self, filename):
"""
Function used internally by API rfc2544_quicktest
:param filename: filename for which timestamp to be added
"""
currentTimestamp = datetime.datetime.now().strftime('%H%M%S')
if '\\' in filename:
filename = filename.split('\\')[-1]
if '/' in filename:
filename = filename.split('/')[-1]
newFilename = filename.split('.')[0]
newFileExtension = filename.split('.')[1]
newFileWithTimestamp = '{}_{}.{}'.format(newFilename, currentTimestamp, newFileExtension)
return newFileWithTimestamp
def copyFileLinuxToLocalLinux(self, linuxApiServerPathAndFileName, localPath,
renameDestinationFile=None, includeTimestamp=False,
linuxApiServerPathExtension=None):
"""
Description
Copy files from Linux API Server to local Linux filesystem. The filename to be copied
will remain the same filename unless you set renameDestinationFile to something you
otherwise preferred.
You could also include a timestamp for the destination file.
Parameters
linuxApiServerPathAndFileName: (str): The full path and filename to retrieve.
linuxApiServerPathExtension: (str): Not using in Resrpy
localPath: (str): The Linux destination path to put the file to.
renameDestinationFile: (str): You could rename the destination file.
includeTimestamp: (bool): If False, each time you copy the same file will be
overwritten.
"""
self.ixnObj.logInfo('\n copyFileLinuxToLocalLinux: From: %s to %s\n' %
(linuxApiServerPathAndFileName, localPath))
fileName = linuxApiServerPathAndFileName.split('/')[-1]
fileName = fileName.replace(' ', '_')
if renameDestinationFile:
fileName = renameDestinationFile
if includeTimestamp:
fileName = self._addTimestampToFile(fileName)
destinationPath = localPath + '/' + fileName
try:
self.ixNetwork.CopyFile(linuxApiServerPathAndFileName, destinationPath)
except Exception as err:
self.ixnObj.logInfo("Error with file transfer {}".format(err))
raise Exception("\n copyFileLinuxToLocalLinux Error: Failed to download file from "
"IxNetwork API Server ")
def convertIxncfgToJson(self, ixncfgFile, destinationPath):
"""
Description
This function takes the input .ixncfg config file to be loaded and then convert it
to json format. The filename will be the same as the input .ixncfg filename, but the
extension will be .json. The converted .json file will be saved in the path
variable destinationPath.
Parameters
ixncfgFile: (str): The binary IxNetwork .ixncfg file.
destinationPath: (str): The destination path to save the .json config file.
"""
self.ixnObj.logInfo("convertIxncfgToJson")
self.loadConfigFile(ixncfgFile)
filename = re.findall(r'[^\/|\\]+(?=\.)', ixncfgFile)[0]
if self.ixnObj.serverOs in ['windows', 'windowsConnectionMgr']:
jsonFilename = destinationPath + '\\' + filename + '.json'
destinationPath = jsonFilename.replace('\\', '\\\\')
if self.ixnObj.serverOs == 'linux':
destinationPath = destinationPath+'/'+filename + '.json'
self.exportJsonConfigFile(destinationPath)
def importJsonConfigObj(self, dataObj, option='modify', silentMode=False, timeout=90):
"""
Description
For newConfig:
This is equivalent to loading a saved .ixncfg file.
To use this API, your script should have read a JSON config into an object variable.
Then pass in the json object to the data parameter.
For modify:
Import the modified JSON data object to make a configuration modification
on the API server.
Supports one xpath at a time.
Example: {"xpath": "/traffic/trafficItem[1]",
"enabled": True,
"name": "Topo-BGP"}
Parameters
data: (json object): The JSON config object.
option: (str): newConfig|modify
silentMode: (bool): Not required in Restpy
timeout: (int): Not required in Restpy
Note
arg2 value must be a string of JSON data: '{"xpath": "/traffic/trafficItem[1]",
"enabled": false}'
"""
if option == 'modify':
arg3 = False
if option == 'newConfig':
arg3 = True
try:
self.ixNetwork.ResourceManager.ImportConfig(Arg2=json.dumps(dataObj), Arg3=arg3)
except Exception as e:
print(e)
raise Exception('\nimportJsonConfigObj Error')
def importJsonConfigFile(self, jsonFileName, option='modify'):
"""
Description
To import a JSON config file to IxNetwork.
You could state it to import as a modified config or creating a new config.
The benefit of importing an actual JSON config file is so you could manually use
IxNetwork Resource Manager to edit any part of the JSON config and add to the
current configuration
Parameters
jsonFileName: (json object): The JSON config file. Could include absolute path also.
option: (str): newConfig|modify
"""
if option == 'modify':
arg3 = False
if option == 'newConfig':
arg3 = True
try:
self.ixNetwork.ResourceManager.ImportConfigFile(Arg2=Files(jsonFileName), Arg3=arg3)
except Exception as err:
self.ixnObj.logInfo("Error with importJsonConfig {}".format(err))
raise Exception('\nimportJsonConfigObj Error')
def exportJsonConfigFile(self, jsonFileName, xpathList=None):
"""
Description
Export the current configuration to a JSON format config file and copy it to local
filesystem.
Parameters
jsonFileName: (str): The JSON config file name to create. Could include absolute path
also.
xpathList: <list>
To get entire configuration = ['/descendant-or-self::*']
To get code fragments such as /vport = ['/vport/descendant-or-self::*']
Requirements
self.ixnObj.waitForComplete()
self.copyFileLinuxToLocalLinux()
self.copyFileWindowsToLocalLinux()
self.jsonReadConfig()
self.jsonWriteToFile()
Example
restObj.exportJsonConfigFile(jsonFileName='/path/exportedJsonConfig.json')
"""
if xpathList is None:
xpathList = ['/descendant-or-self::*']
self.ixnObj.logInfo('Storing the exported file to: %s' % jsonFileName)
try:
ret = self.ixNetwork.ResourceManager.ExportConfig(Arg2=xpathList, Arg3=True,
Arg4='json')
convStrToDict = json.loads(ret)
with open(jsonFileName, 'w') as fp:
json.dump(convStrToDict, fp)
except Exception as err:
| |
<reponame>cancerregulome/gidget<gh_stars>1-10
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import miscIO
import sys
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtHeaderTokens(aTokens):
patientList = []
typeDict = {}
for a in aTokens:
if (a.upper().startswith("TCGA-")):
patientID = a[8:12].upper()
if (patientID not in patientList):
patientList += [patientID]
if (len(a) >= 15):
typeID = a[13:15]
if (typeID not in typeDict.keys()):
typeDict[typeID] = 0
typeDict[typeID] += 1
else:
print " WARNING : no typeID ??? <%s> " % a
if (len(patientList) > 0):
print " "
print " # of unique patients : ", len(patientList)
print " sample type counts : ", typeDict
print " "
print " "
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def lookAtLine(aLine):
if (1):
print len(aLine)
numTab = 0
numLF = 0
numCR = 0
numSpace = 0
numDigit = 0
numLetter = 0
numLinesOut = 0
i1 = 0
for ii in range(len(aLine)):
ordVal = ord(aLine[ii])
if (1):
if (ordVal == 9):
# this is a tab ...
numTab += 1
elif (ordVal == 10):
numLF += 1
elif (ordVal == 13):
numCR += 1
elif (ordVal == 32):
numSpace += 1
elif ((ordVal >= 48 and ordVal <= 57) or (ordVal == 46)):
numDigit += 1
elif ((ordVal >= 65 and ordVal <= 90) or (ordVal >= 97 and ordVal <= 122)):
numLetter += 1
elif (ordVal < 32 or ordVal > 126):
print " %6d %3d " % (ii, ordVal)
else:
# print " %6d <%s> %3d " % ( ii, aLine[ii], ord ( aLine[ii]
# ) )
doNothing = 1
if (ordVal == 13):
i2 = ii
# print " --> writing out from %d to %d " % ( i1, i2 )
# print " <%s> " % aLine[i1:i2]
numLinesOut += 1
## if ( numLinesOut == 5 ): sys.exit(-1)
## fhOut.write ( "%s\n" % aLine[i1:i2] )
i1 = i2 + 1
print numTab, numLF, numCR, numSpace, numDigit, numLetter
print numLinesOut
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (len(sys.argv) != 2 and len(sys.argv) != 3):
print ' Usage : %s <filename> [hist-file] ' % sys.argv[0]
print " ERROR -- bad command line arguments "
sys.exit(-1)
inFilename = sys.argv[1]
if (len(sys.argv) == 3):
histFilename = sys.argv[2]
noHist = 0
else:
noHist = 1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
numLines = 10
for iLine in range(numLines):
aLine = fh.readline()
# look for carriage return / line-feed ?
## lookAtLine ( aLine )
## aLine = aLine.strip()
aTokens = aLine.split('\t')
if (len(aTokens) > 15):
print len(aTokens), aTokens[:5], aTokens[-5:]
else:
print len(aTokens), aTokens
numLines = miscIO.num_lines(fh)
print "\n\n total # of lines in file : %d " % numLines
fh.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
aLine = fh.readline()
aTokens = aLine.split('\t')
numA = len(aTokens)
print " number of header tokens : ", numA
lookAtHeaderTokens(aTokens)
done = 0
iLine = 0
while not done:
bLine = fh.readline()
iLine += 1
# print bLine
bTokens = bLine.split('\t')
# print len(bTokens), bTokens
numB = len(bTokens)
if (numB < 2):
done = 1
continue
if (numA != numB):
print " wrong number of tokens ??? ", numB, numA, iLine
print bTokens
print bLine
sys.exit(-1)
for ii in range(numA):
if (bTokens[ii] == ''):
print " WARNING ... blank token ", ii
print bTokens
print bLine
## sys.exit(-1)
fh.close()
# sys.exit(-1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fh = file(inFilename)
aLine = fh.readline()
bLine = fh.readline()
fh.close()
try:
if (aLine[-1] == '\n'):
aLine = aLine[:-1]
if (bLine[-1] == '\n'):
bLine = bLine[:-1]
except:
print " ERROR ??? bad data file ??? ", inFilename
sys.exit(-1)
aTokens = aLine.split('\t')
bTokens = bLine.split('\t')
numA = len(aTokens)
numB = len(bTokens)
print numA, numB
if (numA != numB):
print " ERROR ??? first two lines do not have the same numbers of tokens ??? "
sys.exit(-1)
if (numA < 50):
for ii in range(numA):
print ii, "\t", aTokens[ii], "\t:\t", bTokens[ii]
print " "
print " "
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print " opening file <%s> " % inFilename
fh = file(inFilename)
aLine = fh.readline()
# hdrTokens has the list of column ids (patients presumably)
hdrTokens = aLine.split('\t')
numCol = len(hdrTokens)
if (numCol > 15):
print numCol, hdrTokens[:5], hdrTokens[-5:]
else:
print numCol, hdrTokens
# now we make a data matrix, the first dimension will be the column #
print " --> first dimension of dataMatrix is %d " % numCol
dataMatrix = [0] * numCol
for iCol in range(numCol):
dataMatrix[iCol] = []
done = 0
isBinary = 0
numBinary = 0
numNotB = 0
while not done:
bLine = fh.readline()
try:
if ( bLine[-1] == '\n' ): bLine = bLine[:-1]
except:
doNothing = 1
## bLine = bLine.strip()
# each bTokens will have a feature name, followed by a list of feature
# values
bTokens = bLine.split('\t')
if (len(bTokens) != numCol):
done = 1
print " DONE ", numCol, len(bTokens)
print bTokens
print " "
else:
# dataMatrix[0]
for iCol in range(numCol):
if ( bTokens[iCol] == "" ):
dataMatrix[iCol] += ["NA"]
else:
dataMatrix[iCol] += [bTokens[iCol]]
if (iCol > 0):
if (bTokens[iCol]!="NA" and bTokens[iCol]!=""):
if (bTokens[iCol] == "0" or bTokens[iCol] == "1"):
numBinary += 1
else:
numNotB += 1
## print " dataMatrix[%d] has %d values " % ( iCol, len(dataMatrix[iCol]) )
# print numBinary, numNotB
if (numBinary > numNotB * 1000):
isBinary = 1
fh.close()
print " "
print len(dataMatrix), len(dataMatrix[0])
# print dataMatrix[:5]
print dataMatrix[0][:5] # this is all of the feature IDs
print dataMatrix[1][:5] # this is data for the first patient
print dataMatrix[-1][:5] # this is data for the last patient
print " "
numRow = len(dataMatrix[0])
numNA = 0
notNA = 0
numNAbyRow = [0] * numRow
maxNA = 0
# if this looks like a purely binary feature matrix, then
# count up the number of ones and 0s ...
if (isBinary):
bitD = {}
bitD["0"] = 0
bitD["1"] = 1
for iCol in range(1, numCol):
for iRow in range(numRow):
curVal = dataMatrix[iCol][iRow]
if (curVal in bitD.keys()):
bitD[curVal] += 1
print " "
print " binary counts : ", bitD, 10000. * (float(bitD["1"]) / float(bitD["0"] + bitD["1"])), (numRow - 1), (numCol - 1)
maxOn = 0
maxCol = -1
for iCol in range(1, numCol):
numOn = 0
featName = hdrTokens[iCol]
if (featName.lower().find("unknown") >= 0):
continue
for iRow in range(numRow):
if (dataMatrix[iCol][iRow] == "1"):
numOn += 1
if (numOn > maxOn):
maxCol = iCol
maxOn = numOn
print " most mutated patient : ", maxCol, hdrTokens[maxCol], maxOn
print " "
# if this file looks like a feature matrix with "data types",
# then lets count up NAs by data type ...
haveDataTypes = 0
if (dataMatrix[0][0][1] == ':'):
if (dataMatrix[0][0][6] == ':'):
haveDataTypes = 1
NAbyDataType = {}
AVbyDataType = {}
for iRow in range(numRow):
dataType = dataMatrix[0][iRow][:6]
if (dataType not in NAbyDataType.keys()):
NAbyDataType[dataType] = 0
AVbyDataType[dataType] = 0
for iCol in range(1, numCol):
for iRow in range(numRow):
if (dataMatrix[iCol][iRow] == ""):
print " ERROR ??? blank entry ??? ", iCol, iRow
print dataMatrix[iCol - 5:iCol + 5][iRow]
print dataMatrix[iCol][iRow - 5:iRow + 5]
sys.exit(-1)
if (haveDataTypes):
dataType = dataMatrix[0][iRow][:6]
if ((dataMatrix[iCol][iRow] == "NA") or (dataMatrix[iCol][iRow] == "na") or (dataMatrix[iCol][iRow] == | |
"""Test the Basic ICN Layer implementation"""
import multiprocessing
import time
import unittest
from PiCN.Layers.ICNLayer import BasicICNLayer
from PiCN.Layers.ICNLayer.ContentStore import ContentStoreMemoryExact
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseMemoryPrefix
from PiCN.Layers.ICNLayer.PendingInterestTable import PendingInterstTableMemoryExact
from PiCN.Packets import Name, Interest, Content, Nack, NackReason
from PiCN.Processes import PiCNSyncDataStructFactory
class test_BasicICNLayer(unittest.TestCase):
"""Test the Basic ICN Layer implementation"""
def setUp(self):
#setup icn_layer
self.icn_layer = BasicICNLayer(log_level=255)
synced_data_struct_factory = PiCNSyncDataStructFactory()
synced_data_struct_factory.register("cs", ContentStoreMemoryExact)
synced_data_struct_factory.register("fib", ForwardingInformationBaseMemoryPrefix)
synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact)
synced_data_struct_factory.create_manager()
cs = synced_data_struct_factory.manager.cs()
fib = synced_data_struct_factory.manager.fib()
pit = synced_data_struct_factory.manager.pit()
cs.set_cs_timeout(2)
pit.set_pit_timeout(2)
pit.set_pit_retransmits(2)
self.icn_layer.cs = cs
self.icn_layer.fib = fib
self.icn_layer.pit = pit
#setup queues icn_routing layer
self.queue1_icn_routing_up = multiprocessing.Queue()
self.queue1_icn_routing_down = multiprocessing.Queue()
#add queues to ICN layer
self.icn_layer.queue_from_lower = self.queue1_icn_routing_up
self.icn_layer.queue_to_lower = self.queue1_icn_routing_down
def tearDown(self):
self.icn_layer.stop_process()
def test_ICNLayer_interest_forward_basic(self):
"""Test ICN layer with no CS and PIT entry"""
self.icn_layer.start_process()
to_faceid = 1
from_faceid = 2
#Add entry to the fib
name = Name("/test/data")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_faceid], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_faceid, interest])
try:
faceid, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(faceid, to_faceid)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_faceid])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).faceids[0], from_faceid)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
def test_ICNLayer_interest_forward_longest_match(self):
"""Test ICN layer with no CS and no PIT entry and longest match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
#Add entry to the fib
name = Name("/test")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_face_id, interest])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).faceids[0], from_face_id)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_interest_forward_deduplication(self):
"""Test ICN layer with no CS and no PIT entry and deduplication"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
# Add entry to the fib
name = Name("/test")
interest1 = Interest("/test/data")
interest2 = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
# forward entry
self.queue1_icn_routing_up.put([from_face_id_1, interest1])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.queue1_icn_routing_up.put([from_face_id_2, interest2], block=True)
self.assertTrue(self.queue1_icn_routing_down.empty())
time.sleep(3)
# check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest1)
time.sleep(0.3) # sleep required, since there is no blocking get before the checks
# check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(len(self.icn_layer.pit.find_pit_entry(interest1.name).faceids), 2)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).faceids, [from_face_id_1, from_face_id_2])
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).name, interest1.name)
def test_ICNLayer_interest_forward_content_match(self):
"""Test ICN layer with CS entry matching"""
self.icn_layer.start_process()
from_face_id = 2
interest = Interest("/test/data")
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get content
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, content)
self.assertEqual(face_id, from_face_id)
def test_ICNLayer_interest_forward_content_no_match(self):
"""Test ICN layer with CS entry no match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
interest = Interest("/test/data/bla")
name = Name("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get data from fib
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertTrue(data, interest)
self.assertTrue(face_id, to_face_id)
self.assertTrue(self.queue1_icn_routing_up.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_content_no_pit(self):
"""Test receiving a content object with no PIT entry"""
self.icn_layer.start_process()
from_face_id = 2
content = Content("/test/data")
self.queue1_icn_routing_up.put([from_face_id, content])
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_pit(self):
"""Test receiving a content object with PIT entry"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id = 2
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id, content_in_face_id, None, None)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id, from_face_id)
self.assertEqual(data, content)
def test_ICNLayer_content_two_pit_entries(self):
"""Test receiving a content object with two PIT entries"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, content_in_face_id, None, False)
self.icn_layer.pit.add_pit_entry(name, from_face_id_2, content_in_face_id, None, False)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id_1, data1 = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id_1, from_face_id_1)
self.assertEqual(data1, content)
face_id_2, data2 = self.queue1_icn_routing_down.get()
self.assertEqual(face_id_2, from_face_id_2)
self.assertEqual(data2, content)
def test_ICNLayer_ageing_pit(self):
"""Test PIT ageing"""
self.icn_layer.start_process()
from_face_id_1 = 1
to_face_id = 2
name = Name("/test/data")
interest = Interest(name)
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, to_face_id, interest, False)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
# test retransmit 1
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test retransmit 2
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# Wait for timeout
time.sleep(2)
# test retransmit 3 to get number of retransmit
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test remove pit entry
self.icn_layer.ageing()
# nack = self.icn_layer.queue_to_lower.get(timeout=8.0) # invalid, no PIT Timeout Nack anymore
# self.assertEqual(nack, [1, Nack(rinterest.name, NackReason.PIT_TIMEOUT, rinterest)])
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 0)
def test_ICNLayer_ageing_cs(self):
"""Test CS ageing and static entries"""
self.icn_layer.start_process()
name1 = Name("/test/data")
content1 = Content(name1, "HelloWorld")
name2 = Name("/data/test")
content2 = Content(name2, "Goodbye")
self.icn_layer.cs.add_content_object(content1)
self.icn_layer.cs.add_content_object(content2, static=True)
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
#Test aging 1
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
time.sleep(2)
# Test aging 2
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
def test_ICNLayer_content_from_app_layer_no_pit(self):
"""get content from app layer when there is no pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
time.sleep(1)
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_from_app_layer(self):
"""get content from app layer when there is a pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [face_id, c])
def test_ICNLayer_content_to_app_layer_no_pit(self):
"""get content to app layer no pit"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
from_face_id = 1
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty())
def test_ICNLayer_content_to_app_layer(self):
"""get content to app layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = -1
from_face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1, interest=None, local_app=True)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [1, c])
def test_ICNLayer_interest_from_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_higher.put([0, i])
try:
to_faceid, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_faceid, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertTrue(self.icn_layer.pit.find_pit_entry(n).local_app[0])
def test_ICNLayer_interest_from_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry --> interest not for higher layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id, i, local_app=False)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0])
self.icn_layer.queue_from_higher.put([0, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0]) #Just forward, not from local app
def test_ICNLayer_interest_to_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[1], i)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
def test_ICNLayer_interest_to_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = [1]
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, face_id, True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id[0], i, local_app=False)
self.icn_layer.queue_from_lower.put([from_face_id, i])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) #--> deduplication by pit entry
def test_ICNLayer_interest_to_app_layer_cs(self):
"""Test sending and interest message from APP with a CS entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
c = Content(n, "Hello World")
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.cs.add_content_object(c)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, from_face_id)
self.assertEqual(data, c)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) # --> was answered by using Content from cache
def test_ICNLayer_issue_nack_no_content_no_fib_from_lower(self):
"""Test if ICN Layer issues Nack if no content and no fib entry is available from lower"""
self.icn_layer.start_process()
interest = Interest("/test/data")
nack = Nack(interest.name, NackReason.NO_ROUTE, interest=interest)
self.icn_layer.queue_from_lower.put([1, interest])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
fid = data[0]
packet = data[1]
self.assertEqual(fid, 1)
self.assertEqual(packet, nack)
def test_ICNLayer_issue_nack_no_content_no_fib_from_higher(self):
"""Test if ICN Layer issues Nack if no | |
= minimum_range(pe.chars, pe.ranges)
bitset = unique_range(pe.chars, pe.ranges) >> offset
def match_bitset(px):
if px.pos < px.epos:
shift = ord(px.inputs[px.pos]) - offset
if shift >= 0 and (bitset & (1 << shift)) != 0:
px.pos += 1
return True
return False
return match_bitset
def ManyRange(self, pe, step):
bitset = unique_range(pe.chars, pe.ranges) # >> offset
def match_manybitset(px):
while px.pos < px.epos:
shift = ord(px.inputs[px.pos]) # - offset
if shift >= 0 and (bitset & (1 << shift)) != 0:
px.pos += 1
continue
return False
return match_manybitset
def AndRange(self, pe, step):
bitset = unique_range(pe.chars, pe.ranges) # >> offset
def match_andbitset(px):
if px.pos < px.epos:
shift = ord(px.inputs[px.pos]) # - offset
if shift >= 0 and (bitset & (1 << shift)) != 0:
return True
return False
return match_andbitset
def NotRange(self, pe, step):
bitset = unique_range(pe.chars, pe.ranges) # >> offset
def match_notbitset(px):
if px.pos < px.epos:
shift = ord(px.inputs[px.pos]) # - offset
if shift >= 0 and (bitset & (1 << shift)) != 0:
return False
return True
return match_notbitset
def PAnd(self, pe, step):
pf = self.emit(pe.e, step)
def match_and(px):
pos = px.pos
if pf(px):
# backtracking
px.headpos = max(px.pos, px.headpos)
px.pos = pos
return True
return False
return match_and
def PNot(self, pe, step):
pf = self.emit(pe.e, step)
def match_not(px):
pos = px.pos
ast = px.ast
if not pf(px):
# backtracking
px.headpos = max(px.pos, px.headpos)
px.pos = pos
px.ast = ast
return True
return False
return match_not
def PMany(self, pe, step):
pf = self.emit(pe.e, step)
def match_many(px):
pos = px.pos
ast = px.ast
while pf(px) and pos < px.pos:
pos = px.pos
ast = px.ast
px.headpos = max(px.pos, px.headpos)
px.pos = pos
px.ast = ast
return True
return match_many
def PMany1(self, pe, step):
pf = self.emit(pe.e, step)
def match_many1(px):
if pf(px):
pos = px.pos
ast = px.ast
while pf(px) and pos < px.pos:
pos = px.pos
ast = px.ast
px.headpos = max(px.pos, px.headpos)
px.pos = pos
px.ast = ast
return True
return False
return match_many1
def POption(self, pe, step):
pf = self.emit(pe.e, step)
def match_option(px):
pos = px.pos
ast = px.ast
if not pf(px):
px.headpos = max(px.pos, px.headpos)
px.pos = pos
px.ast = ast
return True
return match_option
# Seq
def PSeq(self, pe, step):
# if len(pe) == 2:
# return self.Seq2(pe)
# if len(pe) == 3:
# return self.Seq3(pe)
#
pfs = []
for e in pe:
pfs.append(self.emit(e, step))
step += e.minLen()
pfs = tuple(pfs)
def match_seq(px):
for pf in pfs:
if not pf(px):
return False
return True
return match_seq
# Ore
def POre(self, pe: POre, step):
# pe2 = Ore.expand(pe)
# if not isinstance(pe2, Ore):
# return self.emit(pe2)
# pe = pe2
if pe.isDict():
dic = pe.trieDict()
DEBUG('DIC', dic)
return lambda px: match_trie(px, dic)
pfs = tuple(map(lambda e: self.emit(e, step), pe))
def match_ore(px):
pos = px.pos
ast = px.ast
for pf in pfs:
if pf(px):
return True
px.headpos = max(px.pos, px.headpos)
px.pos = pos
px.ast = ast
return False
return match_ore
def PRef(self, pe, step):
uname = pe.uname()
generated = self.generated
if uname not in generated:
generated[uname] = lambda px: generated[uname](px)
return generated[uname]
# Tree Construction
def PNode(self, pe, step):
pf = self.emit(pe.e, step)
node = pe.tag
def make_tree(px):
pos = px.pos
prev = px.ast
px.ast = None
if pf(px):
px.ast = PTree(prev, node, pos, px.pos, px.ast)
return True
#px.ast = prev
return False
return make_tree
def PEdge(self, pe, step):
pf = self.emit(pe.e, step)
edge = pe.edge
# if edge == '': return pf
def match_edge(px):
pos = px.pos
prev = px.ast
px.ast = None
if pf(px):
px.ast = PTree(prev, edge, pos, -px.pos, px.ast)
return True
#px.ast = prev
return False
return match_edge
def PFold(self, pe, step):
pf = self.emit(pe.e, step)
node = pe.tag
edge = pe.edge
def match_fold(px):
pos = px.pos
#pprev = px.ast
prev, pt = splitPTree(px.ast)
px.ast = pt if edge == '' else PTree(None, edge, 0, -pos, pt)
if pf(px):
px.ast = PTree(prev, node, pos, px.pos, px.ast)
return True
#px.ast = pprev
return False
return match_fold
def PAbs(self, pe, step):
pf = self.emit(pe.e, step)
def match_abs(px):
ast = px.ast
if pf(px):
px.ast = ast
return True
return False
return match_abs
# StateTable
# def adddict(px, s):
# if len(s) == 0:
# return
# key = s[0]
# if key in px.memo:
# l = px.memo[key]
# slen = len(s)
# for i in range(len(l)):
# if slen > len(l[i]):
# l.insert(i, s)
# return
# l.append(s)
# else:
# px.memo[key] = [s]
# def Lazy(self, pe, step): # @lazy(A)
# name = pe.e.name
# peg = self.peg
# return peg.newRef(name).gen(**option) if name in peg else pe.e.gen(**option)
def Skip(self, pe, step): # @skip()
def skip(px):
px.pos = min(px.headpos, px.epos)
return True
return skip
def Symbol(self, pe, step): # @symbol(A)
params = pe.params
sid = self.getsid(str(params[0]))
pf = self.emit(pe.e, step)
def match_symbol(px):
pos = px.pos
if pf(px):
px.state = State(sid, px.inputs[pos:px.pos], px.state)
return True
return False
return match_symbol
def Scope(self, pe, step):
pf = self.emit(pe.e, step)
def scope(px):
state = px.state
res = pf(px)
px.state = state
return res
return scope
def Exists(self, pe, step): # @Match(A)
params = pe.params
sid = self.getsid(str(params[0]))
return lambda px: px.getstate(px.state, sid) != None
def Match(self, pe, step): # @Match(A)
params = pe.params
sid = self.getsid(str(params[0]))
#pf = self.emit(pe.e)
def match(px):
state = px.getstate(px.state, sid)
if state is not None and px.inputs.startswith(state.val, px.pos):
px.pos += len(state.val)
return True
return False
return match
def Def(self, pe, step):
params = pe.params
name = str(params[0])
pf = self.emit(pe.e, step)
def define_dict(px):
pos = px.pos
if pf(px):
s = px.inputs[pos:px.pos]
if len(s) == 0:
return True
if name in px.memo:
d = px.memo[name]
else:
d = {}
px.memo[name] = d
key = s[0]
if not key in d:
d[key] = [s]
return True
l = d[key]
slen = len(s)
for i in range(len(l)):
if slen > len(l[i]):
l.insert(i, s)
break
return True
return False
return define_dict
def In(self, pe, step): # @in(NAME)
params = pe.params
name = str(params[0])
def refdict(px):
if name in px.memo and px.pos < px.epos:
d = px.memo[name]
key = px.inputs[px.pos]
if key in d:
for s in d[key]:
if px.inputs.startswith(s, px.pos):
px.pos += len(s)
return True
return False
return refdict
'''
if fname == 'on': # @on(!A, e)
name = str(params[0])
pf = pe.e.gen(**option)
if name.startswith('!'):
sid = getsid(name[1:])
def off(px):
state = px.state
px.state = State(sid, False, px.state)
res = pf(px)
px.state = state
return res
return off
else:
sid = getsid(name[1:])
def on(px):
state = px.state
px.state = State(sid, False, px.state)
res = pf(px)
px.state = state
return res
return on
if fname == 'if': # @if(A)
sid = getsid(str(params[0]))
def cond(px):
state = getstate(px.state, sid)
return state != None and state.val
return cond
'''
generator = Generator()
def generate(peg, **options):
return generator.generate(peg, **options)
# ParseTree
UNKNOWN_URN = '(unknown source)'
def rowcol(urn, inputs, spos):
inputs = inputs[:spos + (1 if len(inputs) > spos else 0)]
rows = inputs.split(b'\n' if isinstance(inputs, bytes) else '\n')
return urn, spos, len(rows), len(rows[-1])-1
def nop(s): return s
class ParseTree(list):
def __init__(self, tag, inputs, spos=0, epos=None, urn=UNKNOWN_URN):
self.tag_ = tag
self.inputs_ = inputs
self.spos_ = spos
self.epos_ = epos if epos is not None else len(inputs)
self.urn_ = urn
def gettag(self):
return self.tag_
def start(self):
return rowcol(self.urn_, self.inputs_, self.spos_)
def end(self):
return rowcol(self.urn_, self.inputs_, self.epos_)
def decode(self):
inputs, spos, epos = self.inputs_, self.spos_, self.epos_
LF = b'\n' if isinstance(inputs, bytes) else '\n'
rows = inputs[:spos + (1 if len(inputs) > spos else 0)]
rows = rows.split(LF)
linenum, column = len(rows), len(rows[-1])-1
begin = inputs.rfind(LF, 0, spos) + 1
#print('@', spos, begin, inputs)
end = inputs.find(LF, spos)
#print('@', spos, begin, inputs)
if end == -1:
end = len(inputs)
#print('@[', begin, spos, end, ']', epos)
line = inputs[begin:end] # .replace('\t', ' ')
mark = []
endcolumn = column + (epos - spos)
for i, c in enumerate(line):
if column <= i and i <= endcolumn:
mark.append('^' | |
<reponame>sn0b4ll/Incident-Playbook
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from django.utils import timezone
from dfirtrack.settings import BASE_DIR
from dfirtrack_main.importer.file.csv import system_cron
from dfirtrack_main.tests.system_importer.config_functions import set_csv_import_username, set_csv_import_filename, set_csv_import_path
import os
import urllib.parse
def create_file_no_read_permission(csv_import_path, csv_import_filename):
""" create a file and remove all permissions """
# build csv file path
csv_path = f'{csv_import_path}/{csv_import_filename}'
# create file
csv_file = open(csv_path, 'w')
# write content to file
csv_file.write('This is no valid CSV file but that does not matter at the moment.')
# close file
csv_file.close()
# remove all permissions
os.chmod(csv_path, 0000)
# return to test function
return
class SystemImporterFileCsvCheckConfigContentFileSystemViewTestCase(TestCase):
""" system importer file CSV view tests """
@classmethod
def setUpTestData(cls):
""" one-time setup """
# create users
test_user = User.objects.create_user(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
User.objects.create_user(username='message_user', password='<PASSWORD>')
# change config
set_csv_import_username(test_user)
""" path not existing """
def test_system_importer_file_csv_check_content_file_system_create_cron_path_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/path_not_existing')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/cron/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'CSV import path does not exist. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_cron_path_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/path_not_existing')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_system')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import path does not exist. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import path does not exist. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_instant_path_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/path_not_existing')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'CSV import path does not exist. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
""" path no read permission """
def test_system_importer_file_csv_check_content_file_system_create_cron_path_no_read_permission(self):
""" test importer view """
# change config
set_csv_import_path('/root')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/cron/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'No read permission for CSV import path. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_cron_path_no_read_permission(self):
""" test importer view """
# change config
set_csv_import_path('/root')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_system')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] No read permission for CSV import path. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] No read permission for CSV import path. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_instant_path_no_read_permission(self):
""" test importer view """
# change config
set_csv_import_path('/root')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'No read permission for CSV import path. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
""" file not existing """
def test_system_importer_file_csv_check_content_file_system_create_cron_file_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/tmp')
# change config
set_csv_import_filename('filename_not_existing.abc')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/cron/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'CSV import file does not exist. Check config or provide file!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_cron_file_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/tmp')
# change config
set_csv_import_filename('filename_not_existing.abc')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_system')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import file does not exist. Check config or provide file!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import file does not exist. Check config or provide file!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_instant_file_not_existing(self):
""" test importer view """
# change config
set_csv_import_path('/tmp')
# change config
set_csv_import_filename('filename_not_existing.abc')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'CSV import file does not exist. Check config or provide file!')
self.assertEqual(messages[0].level_tag, 'error')
""" file no read permission """
def test_system_importer_file_csv_check_content_file_system_create_cron_file_no_read_permission(self):
""" test importer view """
# get timestamp string
t1 = timezone.now().strftime('%Y%m%d_%H%M%S')
# set file system attributes
csv_import_path = '/tmp'
csv_import_filename = f'{t1}_create_cron_no_read_permission.csv'
# create file
create_file_no_read_permission(csv_import_path, csv_import_filename)
# change config
set_csv_import_path(csv_import_path)
# change config
set_csv_import_filename(csv_import_filename)
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/cron/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'No read permission for CSV import file. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_cron_file_no_read_permission(self):
""" test importer view """
# get timestamp string
t1 = timezone.now().strftime('%Y%m%d_%H%M%S')
# set file system attributes
csv_import_path = '/tmp'
csv_import_filename = f'{t1}_cron_no_read_permission.csv'
# create file
create_file_no_read_permission(csv_import_path, csv_import_filename)
# change config
set_csv_import_path(csv_import_path)
# change config
set_csv_import_filename(csv_import_filename)
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_system')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] No read permission for CSV import file. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] No read permission for CSV import file. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_instant_file_no_read_permission(self):
""" test importer view """
# get timestamp string
t1 = timezone.now().strftime('%Y%m%d_%H%M%S')
# set file system attributes
csv_import_path = '/tmp'
csv_import_filename = f'{t1}_instant_no_read_permission.csv'
# create file
create_file_no_read_permission(csv_import_path, csv_import_filename)
# change config
set_csv_import_path(csv_import_path)
# change config
set_csv_import_filename(csv_import_filename)
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'No read permission for CSV import file. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
""" file empty """
def test_system_importer_file_csv_check_content_file_system_create_cron_file_empty(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_06_empty.csv')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/cron/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'CSV import file is empty. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_cron_file_empty(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_06_empty.csv')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_system')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import file is empty. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] CSV import file is empty. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_check_content_file_system_instant_file_empty(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_06_empty.csv')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_system', password='<PASSWORD>')
| |
self.label10.grid(row=12, column=0, padx=40, pady=2, sticky='w')
self.entry10 = tk.Entry(master, width=13, textvariable=self.var10)
self.entry10.grid(row=12, column=1, padx=5, pady=2, sticky='w',
columnspan = 2)
self.errtx10 = tk.Label(master, text='', fg='red' )
self.errtx10.grid(row=12, column=4, padx=5, pady=2, sticky='w')
# Maintenance Tolerance Band (km)
self.label11 = tk.Label(master, text=self.txt11 )
self.label11.grid(row=13, column=0, padx=40, pady=2, sticky='w')
self.entry11 = tk.Entry(master, width=8, textvariable=self.var11)
self.entry11.grid(row=13, column=1, padx=5, pady=2, sticky='w')
self.errtx11 = tk.Label(master, text='', fg='red' )
self.errtx11.grid(row=13, column=4, padx=5, pady=2, sticky='w')
# Toggle for normal or FRO Computation
self.label12 = tk.Label(master, text=self.txt12 )
self.label12.grid(row=14, column=0, padx=40, pady=2, sticky='w')
self.entry12 = tk.Checkbutton(master, text='True/False',
variable=self.var12)
self.entry12.grid(row=14, column=1, padx=5, pady=2, sticky='w')
self.errtx12 = tk.Label(master, text='', fg='red' )
self.errtx12.grid(row=14, column=4, padx=5, pady=2, sticky='w')
# Spacecraft wet mass (kg)
self.label13 = tk.Label(master, text=self.txt13 )
self.label13.grid(row=15, column=0, padx=40, pady=2, sticky='w')
self.entry13 = tk.Entry(master, width=8, textvariable=self.var13)
self.entry13.grid(row=15, column=1, padx=5, pady=2, sticky='w')
self.errtx13 = tk.Label(master, text='', fg='red' )
self.errtx13.grid(row=15, column=4, padx=5, pady=2, sticky='w')
# Specific impulse Isp minimum for x-axis (s)
self.label14 = tk.Label(master, text=self.txt14 )
self.label14.grid(row=16, column=0, padx=40, pady=2, sticky='w')
self.entry14 = tk.Entry(master, width=8, textvariable=self.var14)
self.entry14.grid(row=16, column=1, padx=5, pady=2, sticky='w')
self.errtx14 = tk.Label(master, text='', fg='red' )
self.errtx14.grid(row=16, column=4, padx=5, pady=2, sticky='w')
# Specific impulse Isp maximum for x-axis (s)
self.label15 = tk.Label(master, text=self.txt15 )
self.label15.grid(row=17, column=0, padx=40, pady=2, sticky='w')
self.entry15 = tk.Entry(master, width=8, textvariable=self.var15)
self.entry15.grid(row=17, column=1, padx=5, pady=2, sticky='w')
self.errtx15 = tk.Label(master, text='', fg='red' )
self.errtx15.grid(row=17, column=4, padx=5, pady=2, sticky='w')
#####################################################################
#####################################################################
### ###
### Configure the plot area in the GUI ###
### ###
#####################################################################
#####################################################################
# Now, we add a sub-frame in the tkinter GUI so that we can embed the
# the output plots for orbit maintenance and decay
self.toolbarFrame = tk.Frame(master)
self.toolbarFrame.grid(row=2, column=5, padx=20, pady=10,
columnspan=4, rowspan=18, sticky='s')
# Create the 2D matplotlib figure, with three subplots.
self.Fig = Figure(figsize=(7,5), dpi = master.winfo_fpixels('2.0c'),
linewidth=8, edgecolor="#DDDDDD")
self.Axis211 = self.Fig.add_subplot(211) # Plot propulsion sizing
self.Axis223 = self.Fig.add_subplot(223) # Plot altitude profile
self.Axis224 = self.Fig.add_subplot(224) # Plot SMA profile
self.Fig.set_tight_layout(True)
self.FigPlot = FigureCanvasTkAgg(self.Fig, self.toolbarFrame)
self.FigPlot.get_tk_widget().pack(expand=True)
# Plotting of altitudes (titles and axis only)
self.Axis223.set_ylabel('Altitude (km)')
self.Axis223.set_xlabel('Date-Time')
# Plotting of Kozai-Izsak mean semi-major axes (titles and axis only)
self.Axis224.set_ylabel('Mean Semimajor Axis (km)')
self.Axis224.set_xlabel('Date-Time')
# Thruster sizing profile of Isp Against Mass (titles and axis only)
self.Axis211.set_ylabel('Mass of Fuel Required (kg)')
self.Axis211.set_xlabel('Specific Impulse (s)')
# At this point, you can insert plots if you want. For example,
# self.orbAxis.scatter([1,2,3],[1,2,3])
self.FigPlot.draw()
# Add the matplotlib navigation toolbar.
self.toolbar = NavigationToolbar2Tk(self.FigPlot, self.toolbarFrame)
self.toolbar.update()
#####################################################################
#####################################################################
### ###
### Finally, define string containers for error and warning ###
### messages to inform the user if input conditions violate ###
### formatting or physical principles. If the length of this ###
### string variable > 0, then it triggers an error message. ###
### ###
#####################################################################
#####################################################################
self.error_msgprint = '' # Error message to print.
#########################################################################
#########################################################################
### ###
### Method to load default values from config.txt ###
### ###
#########################################################################
#########################################################################
def cfg_R(self):
# First, ask the user if he/she wishes to proceed.
cfg_R_msg = 'Load parameters from the "config.txt" file? \n'
cfg_R_msg += '(This will overwrite existing inputs in the GUI!)'
cfg_R_ask = tk.messagebox.askyesno('Load Config', cfg_R_msg)
if cfg_R_ask == False:
return None
# Else, continue with loading the configuration file.
cwd = dirname(dirname(abspath(__file__))) # Current working directory
iwd = cwd + '\config\config.txt' # Inputs files
inputfile = open(iwd,'r') # Open the config.txt file
inps = {} # Create a dictionary to store all the input
integers = [ 'orbsim' ]
floats = ['sc_Cd','sc_Ad','orb_a','orb_e','orb_i','orb_R','orb_w',
'orb_m','orbm_tolr','sc_mass','isp_min','isp_max']
#####################################################################
#####################################################################
### ###
### Parsing through the config.txt file to extract parameters ###
### ###
#####################################################################
#####################################################################
# Now we parse through the config.txt file.
for line in inputfile:
# Check for input entry with an 'I', then split and format.
if line[0] == 'I':
line_inp = line[3:].split()
# Now, let's try to parse parameters meant to be integers.
if line_inp[0] in integers:
try:
inps[ line_inp[0] ] = int(line_inp[1])
except ValueError:
errmsg = 'Error, expected an integer when reading '
errmsg = errmsg + line_inp[0] + ' in config.txt! \n'
print(errmsg)
self.error_msgprint += errmsg
inps[ line_inp[0] ] = 'Invalid'
# then we parse parameters meant to be floats.
elif line_inp[0] in floats:
try:
inps[ line_inp[0] ] = float(line_inp[1])
except ValueError:
errmsg = 'Error, expected a float when reading '
errmsg = errmsg + line_inp[0] + ' in config.txt! \n'
print(errmsg)
self.error_msgprint += errmsg
inps[ line_inp[0] ] = 'Invalid'
# For all other parameters, just log them down as strings.
else:
inps[ line_inp[0] ] = line_inp[1]
# Close the file when done
inputfile.close()
# Prepare a dictionary to convert month strings into integers.
months_dict = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4,
'May':5, 'Jun':6, 'Jul':7, 'Aug':8,
'Sep':9, 'Oct':10,'Nov':11,'Dec':12}
#####################################################################
#####################################################################
### ###
### Parsing through the inputs dictionary to verify parameters ###
### ###
#####################################################################
#####################################################################
# 0. First, check for the orbit simulation program (no `errtx00`).
if inps['orbsim'] == 1:
errmsg = ''
self.entry00a.select()
self.entry00b.deselect()
self.entry00c.deselect()
self.errtx00.configure(text='')
elif inps['orbsim'] == 2:
errmsg = ''
self.entry00a.deselect()
self.entry00b.select()
self.entry00c.deselect()
self.errtx00.configure(text='')
elif inps['orbsim'] == 3:
errmsg = ''
self.entry00a.deselect()
self.entry00b.deselect()
self.entry00c.select()
self.errtx00.configure(text='')
else:
errmsg = 'Invalid simulation option! Check config.txt! \n'
self.entry00a.deselect()
self.entry00b.deselect()
self.entry00c.deselect()
self.errtx00.configure(text='!')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 1. Check for the first epoch string.
self.var01.set(inps['tstart'])
if inps['tstart'].count('-') == 3:
inp_v01s = inps['tstart'].split('-')
# Check if it can be converted into a datetime object.
try:
time_str1 = inp_v01s[3].split(':')
inp_v01d = datetime.datetime(int(inp_v01s[2]),
int(months_dict[inp_v01s[1]]),
int(inp_v01s[0]),
int(time_str1[0]),
int(time_str1[1]),
int(float(time_str1[2])))
errmsg = ''
self.errtx01.configure(text='')
# If not, throw an exception and add it to the error log.
except:
errmsg = 'Error! Invalid date and time parameters! \n'
self.errtx01.configure(text='!')
# Else, throw a formatting error.
else:
errmsg = 'Error! Invalid date time format in config.txt! \n'
self.errtx01.configure(text='!')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 2. Check for the final epoch string
self.var02.set(inps['tfinal'])
if inps['tfinal'].count('-') == 3:
inp_v02s = inps['tfinal'].split('-')
# Check if it can be converted into a datetime object.
try:
time_str2 = inp_v02s[3].split(':')
inp_v02d = datetime.datetime(int(inp_v02s[2]),
int(months_dict[inp_v02s[1]]),
int(inp_v02s[0]),
int(time_str2[0]),
int(time_str2[1]),
int(float(time_str2[2])))
# Check if the final epoch is after the initial epoch.
if inp_v02d <= inp_v01d:
errmsg = 'Error! The epoch final is before start! \n'
self.errtx02.configure(text='!')
else:
errmsg = ''
self.errtx02.configure(text='')
# If not, throw an exception and add it to the error log.
except:
errmsg = 'Error! Invalid date and time parameters! \n'
self.errtx02.configure(text='!')
# Else, throw a formatting error.
else:
errmsg = 'Error! Invalid date time format in config.txt! \n'
self.errtx02.configure(text='!')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 3. Check for Spacecraft Atmospheric Drag Coefficient (Cd)
self.var03.set(inps['sc_Cd'])
if type(inps['sc_Cd']) == float and inps['sc_Cd'] > 0.0:
errmsg = ''
self.errtx03.configure(text='')
else:
errmsg = 'Error! Drag coefficient must be a positive float! \n'
self.errtx03.configure(text='!')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 4. Check for Spacecraft Atmospheric Drag Surface Area (m^2)
self.var04.set(inps['sc_Ad'])
if type(inps['sc_Ad']) == float and inps['sc_Ad'] > 0.0:
errmsg = ''
self.errtx04.configure(text='')
else:
errmsg = 'Error! Drag surface area must be a positive float! \n'
self.errtx04.configure(text='!')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 5. Check for Orbit Semi-Major Axis (km)
self.var05.set(inps['orb_a'])
if type(inps['orb_a']) != float:
errmsg = 'Error! Semi-major axis must be a float! \n'
self.errtx05.configure(text='!')
elif inps['orb_a'] < 6378.14:
errmsg = 'Error! Semi-major axis below Earth surface! \n'
self.errtx05.configure(text='!')
elif inps['orb_a'] > 385000.0:
errmsg = 'Error! Semi-major axis beyond Earth orbit! \n'
self.errtx05.configure(text='!')
else:
errmsg = ''
self.errtx05.configure(text='')
self.error_msgprint += errmsg
#####################################################################
#####################################################################
# 6. Check for Orbit Eccentricity (no units)
self.var06.set(inps['orb_e'])
if type(inps['orb_e']) != float:
errmsg = 'Error! Eccentricity must be a float! \n'
self.errtx06.configure(text='!')
elif inps['orb_e'] < 0:
errmsg = 'Error! Eccentricity cannot be < 0! \n'
| |
import os
import random
from tqdm import tqdm
from sklearn.metrics import accuracy_score, matthews_corrcoef, f1_score
from scipy.stats import pearsonr
import numpy as np
import torch
import torch.nn as nn
from torch_geometric.data import DataLoader as GraphDataLoader
from transformers import AutoTokenizer, AdamW, get_linear_schedule_with_warmup
from tensorboardX import SummaryWriter
import config
import utils
from data_loader import WrapperDataset, WiCLMDataset, WiCAuxDataset, RTELMDataset, RTEAuxDataset, STS_BLMDataset, STS_BAuxDataset, MRPCLMDataset, MRPCAuxDataset, SST_2LMDataset, SST_2AuxDataset, WNLI_TranslatedLMDataset, WNLI_TranslatedAuxDataset, IITP_Product_ReviewsLMDataset, IITP_Product_ReviewsAuxDataset, MIDAS_DiscourseLMDataset, MIDAS_DiscourseAuxDataset, DPIL_Subtask_1LMDataset, DPIL_Subtask_1AuxDataset, DPIL_Subtask_2LMDataset, DPIL_Subtask_2AuxDataset, CoLA_LMDataset, CoLAAuxDataset, KhondokerIslam_BengaliLMDataset, KhondokerIslam_BengaliAuxDataset, Rezacsedu_SentimentLMDataset, Rezacsedu_SentimentAuxDataset, BEmoCLMDataset, BEmoCLMDatasetAuxDataset, Seid_Amharic_SentimentLMDataset, Seid_Amharic_SentimentAuxDataset, Seid_Amharic_Cleaned_SentimentAuxDataset, Germeval2018LMDataset, Germeval2018_Subtask_1AuxDataset, Germeval2018_Subtask_2AuxDataset
from model import WordLevelNet, SentenceLevelNet
seed_dict = {1: 42, 2: 98, 3: 3, 4: 9, 5: 7}
seed = seed_dict[config.exp_no]
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def train(model, train_loader, loss_fn, optimizer, scheduler, dev_loader, epochs=config.epochs):
writer = SummaryWriter(config.model_folder)
if(config.scoring == 'loss'):
best_val_score = np.inf
else:
best_val_score = 0
for epoch in range(epochs):
model.train()
total_loss = 0
y_actual_list = list()
y_pred_list = list()
print(f'Epoch: {epoch}')
for batch in tqdm(train_loader):
if(len(batch) == 3):
[input_ids, token_type_ids, attention_mask], batch_aux_1, batch_aux_2 = batch
else:
[input_ids, token_type_ids, attention_mask], batch_aux_1 = batch
batch_aux_2 = None
optimizer.zero_grad()
if(batch_aux_2 is not None):
assert(torch.equal(batch_aux_1.y, batch_aux_2.y))
if(config.is_classification_task):
y_actual = torch.argmax(batch_aux_1.y, dim=1)
y_actual_list += list(y_actual.cpu().data.numpy())
if(config.debug == True):
latent, y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
elif(config.debug == False):
y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
# assert(torch.isnan(y_out).any() == False)
output_probs = y_out
y_pred = torch.argmax(output_probs, dim=1)
y_pred_list += list(y_pred.cpu().data.numpy())
else:
y_actual = batch_aux_1.y
y_actual_list += list(y_actual.cpu().data.numpy().flatten())
if(config.debug == True):
latent, y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
elif(config.debug == False):
y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
y_pred_list += list(y_out.cpu().data.numpy().flatten())
loss = loss_fn(y_out, y_actual)
loss.backward()
if(config.lm_model_name.startswith('bert') or config.lm_model_name.startswith('albert')):
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
total_loss += loss.item()
optimizer.step()
scheduler.step()
if(config.is_classification_task):
accuracy = accuracy_score(y_actual_list, y_pred_list) * 100
mcc = matthews_corrcoef(y_actual_list, y_pred_list) * 100
macro_f1 = f1_score(y_actual_list, y_pred_list, average='macro') * 100
else:
pearson, _ = pearsonr(y_actual_list, y_pred_list)
pearson = pearson * 100
writer.add_scalar('Loss/train', total_loss/len(train_loader), epoch)
if(config.is_classification_task):
writer.add_scalar('Accuracy/train', accuracy, epoch)
writer.add_scalar('Matthews/train', mcc, epoch)
writer.add_scalar('Macro_F1/train', macro_f1, epoch)
else:
writer.add_scalar('Pearson/train', pearson, epoch)
print(f'Training scores')
if(config.is_classification_task):
print(f'Loss: {total_loss/len(train_loader)}, Accuracy: {accuracy}, Matthews: {mcc}, Macro_F1: {macro_f1}')
else:
print(f'Loss: {total_loss/len(train_loader)}, Pearson: {pearson}')
val_scores = dict()
if(config.is_classification_task):
val_scores['loss'], val_scores['accuracy'], val_scores['matthews'], val_scores['macro_f1'], predictions, actuals = test(model=model, test_loader=dev_loader, loss_fn=loss_fn, writer=writer, epoch=epoch)
else:
val_scores['loss'], val_scores['pearson'] = test(model=model, test_loader=dev_loader, loss_fn=loss_fn, writer=writer, epoch=epoch)
if(config.scoring == 'loss'):
if(val_scores[config.scoring] <= best_val_score):
best_model = model
best_val_loss = val_scores['loss']
if(config.is_classification_task):
best_val_accuracy = val_scores['accuracy']
best_val_mcc = val_scores['matthews']
best_val_macro_f1 = val_scores['macro_f1']
else:
best_val_pearson = val_scores['pearson']
best_val_score = best_val_loss
torch.save(model.state_dict(), config.model_name)
else:
if(val_scores[config.scoring] >= best_val_score):
best_model = model
best_val_loss = val_scores['loss']
if(config.is_classification_task):
best_val_accuracy = val_scores['accuracy']
best_val_mcc = val_scores['matthews']
best_val_macro_f1 = val_scores['macro_f1']
else:
best_val_pearson = val_scores['pearson']
if(config.scoring == 'accuracy'):
best_val_score = best_val_accuracy
elif(config.scoring == 'matthews'):
best_val_score = best_val_mcc
elif(config.scoring == 'pearson'):
best_val_score = best_val_pearson
elif(config.scoring == 'macro_f1'):
best_val_score = best_val_macro_f1
torch.save(model.state_dict(), config.model_name)
writer.close()
if(config.is_classification_task):
print(f'Scoring: {config.scoring}, Validation Loss: {best_val_loss}, Validation Accuracy: {best_val_accuracy}, Validation Matthews: {best_val_mcc}, Validation Macro_F1: {best_val_macro_f1}')
else:
print(f'Scoring: {config.scoring}, Validation Loss: {best_val_loss}, Validation Pearson: {best_val_pearson}')
return best_model
def test(model, test_loader, loss_fn, writer=None, epoch=None):
model.eval()
total_loss = 0
y_actual_list = list()
y_pred_list = list()
for batch in tqdm(test_loader):
if(len(batch) == 3):
[input_ids, token_type_ids, attention_mask], batch_aux_1, batch_aux_2 = batch
else:
[input_ids, token_type_ids, attention_mask], batch_aux_1 = batch
batch_aux_2 = None
if(batch_aux_2 is not None):
assert(torch.equal(batch_aux_1.y, batch_aux_2.y))
if(config.is_classification_task):
y_actual = torch.argmax(batch_aux_1.y, dim=1)
y_actual_list += list(y_actual.cpu().data.numpy())
if(config.debug == True):
latent, y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
elif(config.debug == False):
y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
output_probs = y_out
y_pred = torch.argmax(output_probs, dim=1)
y_pred_list += list(y_pred.cpu().data.numpy())
else:
y_actual = batch_aux_1.y
y_actual_list += list(y_actual.cpu().data.numpy().flatten())
if(config.debug == True):
latent, y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
elif(config.debug == False):
y_out = model(input_ids, token_type_ids, attention_mask, batch_aux_1, batch_aux_2)
y_pred_list += list(y_out.cpu().data.numpy().flatten())
loss = loss_fn(y_out, y_actual)
total_loss += loss.item()
if(config.is_classification_task):
accuracy = accuracy_score(y_actual_list, y_pred_list) * 100
mcc = matthews_corrcoef(y_actual_list, y_pred_list) * 100
macro_f1 = f1_score(y_actual_list, y_pred_list, average='macro') * 100
else:
pearson, _ = pearsonr(y_actual_list, y_pred_list)
pearson = pearson * 100
if(writer and epoch):
writer.add_scalar('Loss/test', total_loss/len(test_loader), epoch)
if(config.is_classification_task):
writer.add_scalar('Accuracy/test', accuracy, epoch)
writer.add_scalar('Matthews/test', mcc, epoch)
writer.add_scalar('Macro_F1/test', macro_f1, epoch)
else:
writer.add_scalar('Pearson/test', pearson, epoch)
print(f'Testing scores')
if(config.is_classification_task):
print(f'Loss: {total_loss/len(test_loader)}, Accuracy: {accuracy}, Matthews: {mcc}, Macro_F1: {macro_f1}')
return total_loss/len(test_loader), accuracy, mcc, macro_f1, y_pred_list, y_actual_list
else:
print(f'Loss: {total_loss/len(test_loader)}, Pearson: {pearson}')
return total_loss/len(test_loader), pearson
def WiC_main():
assert(config.experiment == 'WiC')
tokenizer = AutoTokenizer.from_pretrained(config.lm_model_name, do_lower_case=True)
DT_G = utils.load_DT()
model = WordLevelNet(tokenizer=tokenizer, num_output_classes=len(config.WiC_labels))
model.to(config.device)
print(f'Total number of parameters: {sum(p.numel() for p in model.parameters())}')
print(f'Total number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
if(config.train):
WiC_train_data_frame = utils.get_WiC_data_frame(config.WiC_train_data_path, config.WiC_train_gold_path)
train_LM_dataset = WiCLMDataset(WiC_train_data_frame, tokenizer)
train_dataset_aux_1 = WiCAuxDataset(root='../data/WiC/train/', data_frame=WiC_train_data_frame, DT_G=DT_G, is_sentence_1=True)
train_dataset_aux_2 = WiCAuxDataset(root='../data/WiC/train/', data_frame=WiC_train_data_frame, DT_G=DT_G, is_sentence_2=True)
train_loader = GraphDataLoader(WrapperDataset(train_LM_dataset, train_dataset_aux_1, train_dataset_aux_2), batch_size=config.batch_size)
WiC_dev_data_frame = utils.get_WiC_data_frame(config.WiC_dev_data_path, config.WiC_dev_gold_path)
dev_LM_dataset = WiCLMDataset(WiC_dev_data_frame, tokenizer)
dev_dataset_aux_1 = WiCAuxDataset(root='../data/WiC/dev/', data_frame=WiC_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = WiCAuxDataset(root='../data/WiC/dev/', data_frame=WiC_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0,}, {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],},]
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate, betas=(config.beta_1, config.beta_2), weight_decay=config.weight_decay)
t_total = (len(train_loader.dataset) // config.batch_size) * float(config.epochs)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup_ratio*t_total, num_training_steps=t_total)
loss_fn = nn.CrossEntropyLoss()
print(f'Training: {config.experiment}')
train(model=model, train_loader=train_loader, loss_fn=loss_fn, optimizer=optimizer, scheduler=scheduler, dev_loader=dev_loader)
else:
WiC_dev_data_frame = utils.get_WiC_data_frame(config.WiC_dev_data_path, config.WiC_dev_gold_path)
dev_LM_dataset = WiCLMDataset(WiC_dev_data_frame, tokenizer)
dev_dataset_aux_1 = WiCAuxDataset(root='../data/WiC/dev/', data_frame=WiC_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = WiCAuxDataset(root='../data/WiC/dev/', data_frame=WiC_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
loss_fn = nn.CrossEntropyLoss()
model.load_state_dict(torch.load(config.model_name))
print(f'Testing')
return test(model=model, test_loader=dev_loader, loss_fn=loss_fn)
def RTE_main():
assert(config.experiment == 'RTE')
tokenizer = AutoTokenizer.from_pretrained(config.lm_model_name, do_lower_case=True)
DT_G = utils.load_DT()
model = SentenceLevelNet(tokenizer=tokenizer, num_output_classes=len(config.RTE_labels))
model.to(config.device)
print(f'Total number of parameters: {sum(p.numel() for p in model.parameters())}')
print(f'Total number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
if(config.train):
RTE_train_data_frame = utils.get_RTE_data_frame(config.RTE_train_data_path)
train_LM_dataset = RTELMDataset(RTE_train_data_frame, tokenizer)
train_dataset_aux_1 = RTEAuxDataset(root='../data/glue_data/RTE/train/', data_frame=RTE_train_data_frame, DT_G=DT_G, is_sentence_1=True)
train_dataset_aux_2 = RTEAuxDataset(root='../data/glue_data/RTE/train/', data_frame=RTE_train_data_frame, DT_G=DT_G, is_sentence_2=True)
train_loader = GraphDataLoader(WrapperDataset(train_LM_dataset, train_dataset_aux_1, train_dataset_aux_2), batch_size=config.batch_size)
RTE_dev_data_frame = utils.get_RTE_data_frame(config.RTE_dev_data_path)
dev_LM_dataset = RTELMDataset(RTE_dev_data_frame, tokenizer)
dev_dataset_aux_1 = RTEAuxDataset(root='../data/glue_data/RTE/dev/', data_frame=RTE_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = RTEAuxDataset(root='../data/glue_data/RTE/dev/', data_frame=RTE_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0,}, {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],},]
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate, betas=(config.beta_1, config.beta_2), weight_decay=config.weight_decay)
t_total = (len(train_loader.dataset) // config.batch_size) * float(config.epochs)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup_ratio*t_total, num_training_steps=t_total)
loss_fn = nn.CrossEntropyLoss()
print(f'Training: {config.experiment}')
train(model=model, train_loader=train_loader, loss_fn=loss_fn, optimizer=optimizer, scheduler=scheduler, dev_loader=dev_loader)
else:
RTE_dev_data_frame = utils.get_RTE_data_frame(config.RTE_dev_data_path)
dev_LM_dataset = RTELMDataset(RTE_dev_data_frame, tokenizer)
dev_dataset_aux_1 = RTEAuxDataset(root='../data/glue_data/RTE/dev/', data_frame=RTE_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = RTEAuxDataset(root='../data/glue_data/RTE/dev/', data_frame=RTE_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
loss_fn = nn.CrossEntropyLoss()
model.load_state_dict(torch.load(config.model_name))
print(f'Testing')
return test(model=model, test_loader=dev_loader, loss_fn=loss_fn)
def STS_B_main():
assert(config.experiment == 'STS_B')
tokenizer = AutoTokenizer.from_pretrained(config.lm_model_name, do_lower_case=True)
DT_G = utils.load_DT()
model = SentenceLevelNet(tokenizer=tokenizer, num_output_classes=1)
model.to(config.device)
print(f'Total number of parameters: {sum(p.numel() for p in model.parameters())}')
print(f'Total number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
if(config.train):
STS_B_train_data_frame = utils.get_STS_B_data_frame(config.STS_B_train_data_path)
train_LM_dataset = STS_BLMDataset(STS_B_train_data_frame, tokenizer)
train_dataset_aux_1 = STS_BAuxDataset('../data/glue_data/STS-B/train/', data_frame=STS_B_train_data_frame, DT_G=DT_G, is_sentence_1=True)
train_dataset_aux_2 = STS_BAuxDataset('../data/glue_data/STS-B/train/', data_frame=STS_B_train_data_frame, DT_G=DT_G, is_sentence_2=True)
train_loader = GraphDataLoader(WrapperDataset(train_LM_dataset, train_dataset_aux_1, train_dataset_aux_2), batch_size=config.batch_size)
STS_B_dev_data_frame = utils.get_STS_B_data_frame(config.STS_B_dev_data_path)
dev_LM_dataset = STS_BLMDataset(STS_B_dev_data_frame, tokenizer)
dev_dataset_aux_1 = STS_BAuxDataset('../data/glue_data/STS-B/dev/', data_frame=STS_B_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = STS_BAuxDataset('../data/glue_data/STS-B/dev/', data_frame=STS_B_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0,}, {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],},]
optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate, betas=(config.beta_1, config.beta_2), weight_decay=config.weight_decay)
t_total = (len(train_loader.dataset) // config.batch_size) * float(config.epochs)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.warmup_ratio*t_total, num_training_steps=t_total)
loss_fn = nn.MSELoss()
print(f'Training: {config.experiment}')
train(model=model, train_loader=train_loader, loss_fn=loss_fn, optimizer=optimizer, scheduler=scheduler, dev_loader=dev_loader)
else:
STS_B_dev_data_frame = utils.get_STS_B_data_frame(config.STS_B_dev_data_path)
dev_LM_dataset = STS_BLMDataset(STS_B_dev_data_frame, tokenizer)
dev_dataset_aux_1 = STS_BAuxDataset('../data/glue_data/STS-B/dev/', data_frame=STS_B_dev_data_frame, DT_G=DT_G, is_sentence_1=True)
dev_dataset_aux_2 = STS_BAuxDataset('../data/glue_data/STS-B/dev/', data_frame=STS_B_dev_data_frame, DT_G=DT_G, is_sentence_2=True)
dev_loader = GraphDataLoader(WrapperDataset(dev_LM_dataset, dev_dataset_aux_1, dev_dataset_aux_2), batch_size=config.batch_size)
loss_fn = nn.MSELoss()
model.load_state_dict(torch.load(config.model_name))
print(f'Testing')
return test(model=model, test_loader=dev_loader, loss_fn=loss_fn)
def MRPC_main():
assert(config.experiment == 'MRPC')
tokenizer = AutoTokenizer.from_pretrained(config.lm_model_name, do_lower_case=True)
DT_G = utils.load_DT()
model = SentenceLevelNet(tokenizer=tokenizer, num_output_classes=len(config.MRPC_labels))
model.to(config.device)
print(f'Total number of parameters: {sum(p.numel() for p in model.parameters())}')
print(f'Total number of trainable parameters: {sum(p.numel() for | |
if not self.items:
return Type.Array(Type.Any())
item_type = Type.unify(
[item.type for item in self.items], check_quant=self._check_quant, force_string=True
)
if isinstance(item_type, Type.Any):
raise Error.IndeterminateType(self, "unable to unify array item types")
return Type.Array(item_type, optional=False, nonempty=True)
def typecheck(self, expected: Optional[Type.Base]) -> Base:
""
if not self.items and isinstance(expected, Type.Array):
# the literal empty array satisfies any array type
return self
return super().typecheck(expected) # pyre-ignore
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Array:
""
assert isinstance(self.type, Type.Array)
return Value.Array(
self.type.item_type,
[item.eval(env, stdlib).coerce(self.type.item_type) for item in self.items],
)
class Pair(Base):
"""
Pair literal
"""
left: Base
"""
:type: WDL.Expr.Base
Left-hand expression in the pair literal
"""
right: Base
"""
:type: WDL.Expr.Base
Right-hand expression in the pair literal
"""
def __init__(self, pos: SourcePosition, left: Base, right: Base) -> None:
super().__init__(pos)
self.left = left
self.right = right
def __str__(self):
return "({}, {})".format(str(self.left), str(self.right))
@property
def children(self) -> Iterable[SourceNode]:
yield self.left
yield self.right
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
return Type.Pair(self.left.type, self.right.type)
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
""
assert isinstance(self.type, Type.Pair)
lv = self.left.eval(env, stdlib)
rv = self.right.eval(env, stdlib)
return Value.Pair(self.left.type, self.right.type, (lv, rv))
class Map(Base):
"""
Map literal
"""
items: List[Tuple[Base, Base]]
"""
:type: List[Tuple[WDL.Expr.Base,WDL.Expr.Base]]
Expressions for the map literal keys and values
"""
def __init__(self, pos: SourcePosition, items: List[Tuple[Base, Base]]) -> None:
super().__init__(pos)
self.items = items
def __str__(self):
items = []
for item in self.items:
items.append("{}: {}".format(str(item[0]), str(item[1])))
return "{{{}}}".format(", ".join(items))
@property
def children(self) -> Iterable[SourceNode]:
for k, v in self.items:
yield k
yield v
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
if not self.items:
return Type.Map((Type.Any(), Type.Any()), literal_keys=set())
kty = Type.unify([k.type for (k, _) in self.items], check_quant=self._check_quant)
if isinstance(kty, Type.Any):
raise Error.IndeterminateType(self, "unable to unify map key types")
vty = Type.unify(
[v.type for (_, v) in self.items], check_quant=self._check_quant, force_string=True
)
if isinstance(vty, Type.Any):
raise Error.IndeterminateType(self, "unable to unify map value types")
literal_keys = None
if kty == Type.String():
# If the keys are string constants, record them in the Type object
# for potential later use in struct coercion. (Normally the Type
# encodes the common type of the keys, but not the keys themselves)
literal_keys = set()
for k, _ in self.items:
if (
literal_keys is not None
and isinstance(k, String)
and len(k.parts) == 3
and isinstance(k.parts[1], str)
):
literal_keys.add(k.parts[1])
else:
literal_keys = None
return Type.Map((kty, vty), literal_keys=literal_keys)
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
""
assert isinstance(self.type, Type.Map)
eitems = []
for k, v in self.items:
eitems.append((k.eval(env, stdlib), v.eval(env, stdlib)))
# TODO: complain of duplicate keys
return Value.Map(self.type.item_type, eitems)
class Struct(Base):
"""
Struct literal
"""
members: Dict[str, Base]
"""
:type: Dict[str,WDL.Expr.Base]
The struct literal is modelled initially as a bag of keys and values, which
can be coerced to a specific struct type during typechecking.
"""
def __init__(self, pos: SourcePosition, members: List[Tuple[str, Base]]):
super().__init__(pos)
self.members = {}
for (k, v) in members:
if k in self.members:
raise Error.MultipleDefinitions(self.pos, "duplicate keys " + k)
self.members[k] = v
def __str__(self):
members = []
for member in self.members:
members.append('"{}": {}'.format(member, str(self.members[member])))
# Returns a Map literal instead of a struct literal as these are version dependant
return "{{{}}}".format(", ".join(members))
@property
def children(self) -> Iterable[SourceNode]:
return self.members.values()
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
member_types = {}
for k, v in self.members.items():
member_types[k] = v.type
return Type.Object(member_types)
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
ans = {}
for k, v in self.members.items():
ans[k] = v.eval(env, stdlib)
assert isinstance(self.type, Type.Object)
return Value.Struct(self.type, ans)
class IfThenElse(Base):
"""
Ternary conditional expression
"""
condition: Base
"""
:type: WDL.Expr.Base
A Boolean expression for the condition
"""
consequent: Base
"""
:type: WDL.Expr.Base
Expression evaluated when the condition is true
"""
alternative: Base
"""
:type: WDL.Expr.Base
Expression evaluated when the condition is false
"""
def __init__(
self, pos: SourcePosition, condition: Base, consequent: Base, alternative: Base
) -> None:
super().__init__(pos)
self.condition = condition
self.consequent = consequent
self.alternative = alternative
def __str__(self):
return "if {} then {} else {}".format(
str(self.condition), str(self.consequent), str(self.alternative)
)
@property
def children(self) -> Iterable[SourceNode]:
yield self.condition
yield self.consequent
yield self.alternative
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
# check for Boolean condition
if self.condition.type != Type.Boolean():
raise Error.StaticTypeMismatch(
self, Type.Boolean(), self.condition.type, "in if condition"
)
ty = Type.unify(
[self.consequent.type, self.alternative.type], check_quant=self._check_quant
)
if isinstance(ty, Type.Any):
raise Error.StaticTypeMismatch(
self,
self.consequent.type,
self.alternative.type,
"(unable to unify consequent & alternative types)",
)
return ty
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
""
if self.condition.eval(env, stdlib).expect(Type.Boolean()).value:
ans = self.consequent.eval(env, stdlib)
else:
ans = self.alternative.eval(env, stdlib)
return ans
class Ident(Base):
"""
An identifier referencing a named value or call output.
``Ident`` nodes are wrapped in ``Get`` nodes, as discussed below.
"""
name: str
""":type: str
Name, possibly including a dot-separated namespace
"""
referee: "Union[None, Tree.Decl, Tree.Call, Tree.Scatter, Tree.Gather]"
"""
After typechecking within a task or workflow, stores the AST node to which the identifier
refers: a ``WDL.Tree.Decl`` for value references; a ``WDL.Tree.Call`` for call outputs; a
``WDL.Tree.Scatter`` for scatter variables; or a ``WDL.Tree.Gather`` object representing a
value or call output that resides within a scatter or conditional section.
"""
def __init__(self, pos: SourcePosition, name: str) -> None:
super().__init__(pos)
assert name and not name.endswith(".") and not name.startswith(".") and ".." not in name
self.name = name
self.referee = None
def __str__(self):
return self.name
@property
def children(self) -> Iterable[SourceNode]:
return []
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
# The following Env.resolve will never fail, as Get._infer_type does
# the heavy lifting for us.
b = type_env.resolve_binding(self.name)
ans = b.value
# referee comes from the type environment's info value
referee = b.info
if referee:
assert referee.__class__.__name__ in [
"Decl",
"Call",
"Scatter",
"Gather",
], referee.__class__.__name__
self.referee = referee
return ans
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
""
return env[self.name]
@property
def _ident(self) -> str:
return self.name
class _LeftName(Base):
# This AST node is a placeholder involved in disambiguating dot-separated
# identifiers (e.g. "leftname.midname.rightname") as elaborated in the Get
# docstring below. The parser, lacking the context to resolve this syntax,
# creates this node simply to represent the leftmost (sometimes only) name,
# as the innard of a Get node, potentially (not necessarily) with a
# member name. Later during typechecking, Get._infer_type folds _LeftName
# into an `Ident` expression; the library user should never have to work
# with _LeftName.
name: str
def __init__(self, pos: SourcePosition, name: str) -> None:
super().__init__(pos)
assert name
self.name = name
def __str__(self):
return self.name
def _infer_type(self, type_env: Env.Bindings[Type.Base]) -> Type.Base:
raise NotImplementedError()
def _eval(
self, env: Env.Bindings[Value.Base], stdlib: "Optional[StdLib.Base]" = None
) -> Value.Base:
raise NotImplementedError()
@property
def _ident(self) -> str:
return self.name
class Get(Base):
"""
AST node representing access to a value by identifier (including namespaced
ones), or accessing a member of a pair or struct as ``.member``.
The entaglement of these two cases is inherent in WDL. Consider the syntax
``leftname.midname.rightname``. One interpretation is that ``leftname`` is
an identifier for a struct value, and ``.midname.rightname`` represents a
chain of struct member accesses. But another possibility is that
``leftname`` is a call, ``midname`` is a struct output of that call, and
``rightname`` is a member of that struct. These cases can't be
distinguished by the syntax parser alone, but must be resolved during
typechecking with reference to the calls and identifiers available in the
environment.
The typechecker does conveniently resolve such cases, and to minimize the
extent to which it has to restructure the AST in doing so, all identifiers
(with or without a namespace) are represented as a ``Get`` node wrapping an
``Ident`` node. The ``Get`` node may specify a member name to access, but
may not if the identifier is to be accessed directly. On the other hand,
the expression inside a ``Get`` node need not be a simple identifier, e.g.
``arr[1].memb.left`` is be represented as:
``Get(Get(Apply("_at", Get(Ident("arr")), 1),"memb"),"left")``
"""
expr: Base
"""
:type: | |
this method would rebuild a new instance of DistributedOptimizer.
Which has basic Optimizer function and special features for distributed training.
Args:
optimizer(Optimizer): The executor to run for init server.
strategy(DistributedStrategy): Extra properties for distributed optimizer.
It is recommended to use DistributedStrategy in fleet.init(). The strategy
here is for compatibility. If the strategy in fleet.distributed_optimizer()
is not None, then it will overwrite the DistributedStrategy in fleet.init(),
which will take effect in distributed training.
Returns:
Fleet: instance of fleet.
Examples:
.. code-block:: python
import paddle
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
"""
self.user_defined_optimizer = optimizer
if strategy is not None:
if self._is_collective:
warnings.warn(
"It is recommended to use DistributedStrategy "
"in fleet.init(). The strategy here is only for compatibility. "
"If the strategy in fleet.distributed_optimizer() is "
"not None, then it will overwrite the DistributedStrategy in fleet.init(), "
"which will take effect in distributed training.")
self._user_defined_strategy = copy.deepcopy(strategy)
self._context = {}
# TODO(shenliang03): This is a temporary solution to support amp. In the case of a dynamic graph,
# the optimizer is returned directly. This problem will be fixed in the future.
if paddle.fluid.framework.in_dygraph_mode():
return optimizer
return self
@dygraph_only
def distributed_model(self, model):
"""
Return distributed data parallel model (Only work in dygraph mode)
Args:
model (Layer): the user-defind model which inherits Layer.
Returns:
distributed data parallel model which inherits Layer.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
assert model is not None
self.model = paddle.DataParallel(
model,
comm_buffer_size=self._user_defined_strategy.fuse_grad_size_in_MB,
last_comm_buffer_size=self._user_defined_strategy.
last_comm_group_size_MB,
find_unused_parameters=self._user_defined_strategy.
find_unused_parameters)
return self.model
@dygraph_only
def state_dict(self):
"""
Get state dict information from optimizer.
(Only work in dygraph mode)
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.state_dict()
@dygraph_only
def set_state_dict(self, state_dict):
"""
Load optimizer state dict.
(Only work in dygraph mode)
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict()
paddle.save(state_dict, "paddle_dy")
para_state_dict = paddle.load("paddle_dy")
adam.set_state_dict(para_state_dict)
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.set_state_dict(state_dict)
@dygraph_only
def set_lr(self, value):
"""
Set the value of the learning rate manually in the optimizer.
(Only work in dygraph mode)
Args:
value (float|Tensor): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.set_lr(value)
@dygraph_only
def get_lr(self):
"""
Get current step learning rate.
(Only work in dygraph mode)
Returns:
float: The learning rate of the current step.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
lr = adam.get_lr()
print(lr) # 0.01
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.get_lr()
@dygraph_only
def step(self):
"""
Execute the optimizer once.
(Only work in dygraph mode)
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.step()
@dygraph_only
def clear_grad(self):
"""
Clear the gradients of all optimized parameters for model.
(Only work in dygraph mode)
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.clear_grad()
def amp_init(self,
place,
scope=None,
test_program=None,
use_fp16_test=False):
"""
Init the amp training, such as cast fp32 parameters to fp16 type.
Args:
place(CUDAPlace): place is used to initialize
fp16 parameters with fp32 values.
scope(Scope): The scope is used to find fp32 parameters.
test_program(Program): The program is used for testing.
use_fp16_test(bool): Whether to use fp16 testing.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.enable_static()
def run_example_code():
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
# 1) Use fp16_guard to control the range of fp16 kernels used.
with paddle.static.amp.fp16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden)
# 2) Create the optimizer and set `multi_precision` to True.
# Setting `multi_precision` to True can avoid the poor accuracy
# or the slow convergence in a way.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
# 3) These ops in `custom_black_list` will keep in the float32 computation type.
amp_list = paddle.static.amp.CustomOpLists(
custom_black_list=['pool2d'])
# 4) The entry of Paddle AMP.
# Enable pure fp16 training by setting `use_pure_fp16` to True.
optimizer = paddle.static.amp.decorate(
optimizer,
amp_list,
init_loss_scaling=128.0,
use_dynamic_loss_scaling=True,
use_pure_fp16=True)
# If you don't use the default_startup_program(), you sholud pass
# your defined `startup_program` into `minimize`.
optimizer.minimize(loss)
exe.run(paddle.static.default_startup_program())
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
# If you want to perform the testing process, you should pass `test_program` into `amp_init`.
optimizer.amp_init(place, scope=paddle.static.global_scope())
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
run_example_code()
"""
# imitate target optimizer retrieval
amp_optimizer = None
for optimizer in self.strategy_compiler._get_applied_meta_optimizer():
if hasattr(optimizer, 'amp_init'):
amp_optimizer = optimizer
break
if amp_optimizer is None:
if hasattr(self.user_defined_optimizer, 'amp_init'):
amp_optimizer = self.user_defined_optimizer
assert amp_optimizer is not None, \
"amp_init can only be used when the amp(auto mixed precision) strategy is turned on."
return amp_optimizer.amp_init(place, scope, test_program, use_fp16_test)
def _final_strategy(self):
if "valid_strategy" not in self._context:
print(
"WARNING: You may need to call minimize function before this function is called"
)
return {}
else:
return self._context["valid_strategy"]
def _get_applied_meta_list(self):
if "applied_meta_list" not in self._context:
print(
"WARNING: You | |
15 %)', 'abstract': '', 'values': [43], 'color': '#95f4f0', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Scattered (1 to 4 %)', 'abstract': '', 'values': [44], 'color': '#bbfffc', 'alpha': 1},
# {'title': 'Cultivated Aquatic Vegetated: Woody Closed (> 65 %)', 'abstract': '', 'values': [45], 'color': '#2bd2cb', 'alpha': 1},
# {'title': 'Cultivated Aquatic Vegetated: Woody Open (40 to 65 %)', 'abstract': '', 'values': [46], 'color': '#49ded8', 'alpha': 1},
# {'title': 'Cultivated Aquatic Vegetated: Woody Open (15 to 40 %)', 'abstract': '', 'values': [47], 'color': '#6ee9e4', 'alpha': 1},
# {'title': 'Cultivated Aquatic Vegetated: Woody Sparse (4 to 15 %)', 'abstract': '', 'values': [48], 'color': '#95f4f0', 'alpha': 1},
# {'title': 'Cultivated Aquatic Vegetated: Woody Scattered (1 to 4 %)', 'abstract': '', 'values': [49], 'color': '#bbfffc', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Herbaceous Closed (> 65 %)', 'abstract': '', 'values': [50], 'color': '#52e7c4', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Herbaceous Open (40 to 65 %)', 'abstract': '', 'values': [51], 'color': '#71edd0', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Herbaceous Open (15 to 40 %)', 'abstract': '', 'values': [52], 'color': '#90f3dc', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Herbaceous Sparse (4 to 15 %)', 'abstract': '', 'values': [53], 'color': '#aff9e8', 'alpha': 1},
{'title': 'Cultivated Aquatic Vegetated: Herbaceous Scattered (1 to 4 %)', 'abstract': '', 'values': [54], 'color': '#cffff4', 'alpha': 1},
{'title': '', 'abstract': '', 'values': [55], 'color': '#1ebf79', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody', 'abstract': '', 'values': [56], 'color': '#128e94', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous', 'abstract': '', 'values': [57], 'color': '#70ea86', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Closed (> 65 %)', 'abstract': '', 'values': [58], 'color': '#19ad6d', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Open (40 to 65 %)', 'abstract': '', 'values': [59], 'color': '#35b884', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Open (15 to 40 %)', 'abstract': '', 'values': [60], 'color': '#5dc39b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Sparse (4 to 15 %)', 'abstract': '', 'values': [61], 'color': '#87ceb2', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Scattered (1 to 4 %)', 'abstract': '', 'values': [62], 'color': '#b0dac9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Closed (> 65 %)', 'abstract': '', 'values': [63], 'color': '#19ad6d', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Closed (> 65 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [64], 'color': '#19ad6d', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Closed (> 65 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [65], 'color': '#19ad6d', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (40 to 65 %)', 'abstract': '', 'values': [66], 'color': '#35b884', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (40 to 65 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [67], 'color': '#35b884', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (40 to 65 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [68], 'color': '#35b884', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (15 to 40 %)', 'abstract': '', 'values': [69], 'color': '#5dc39b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (15 to 40 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [70], 'color': '#5dc39b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Open (15 to 40 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [71], 'color': '#5dc39b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %)', 'abstract': '', 'values': [72], 'color': '#87ceb2', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [73], 'color': '#87ceb2', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Sparse (4 to 15 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [74], 'color': '#87ceb2', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %)', 'abstract': '', 'values': [75], 'color': '#b0dac9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [76], 'color': '#b0dac9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Woody Scattered (1 to 4 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [77], 'color': '#b0dac9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %)', 'abstract': '', 'values': [78], 'color': '#27cc8b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [79], 'color': '#27cc8b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Closed (> 65 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [80], 'color': '#27cc8b', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %)', 'abstract': '', 'values': [81], 'color': '#42d89f', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [82], 'color': '#42d89f', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (40 to 65 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [83], 'color': '#42d89f', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %)', 'abstract': '', 'values': [84], 'color': '#63e3b4', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [85], 'color': '#63e3b4', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Open (15 to 40 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [86], 'color': '#63e3b4', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %)', 'abstract': '', 'values': [87], 'color': '#87efc9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [88], 'color': '#87efc9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Sparse (4 to 15 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [89], 'color': '#87efc9', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %)', 'abstract': '', 'values': [90], 'color': '#abfadd', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %) Water > 3 months (semi-) permenant', 'abstract': '', 'values': [91], 'color': '#abfadd', 'alpha': 1},
{'title': 'Natural Aquatic Vegetated: Herbaceous Scattered (1 to 4 %) Water < 3 months (temporary or seasonal)', 'abstract': '', 'values': [92], 'color': '#abfadd', 'alpha': 1},
{'title': 'Artificial Surface', 'abstract': '', 'values': [93], 'color': '#da5c69', 'alpha': 1},
{'title': '', 'abstract': '', 'values': [94], 'color': '#f3ab69', 'alpha': 1},
{'title': 'Natural Surface: Sparsely vegetated', 'abstract': '', 'values': [95], 'color': '#ffe68c', 'alpha': 1},
{'title': 'Natural Surface: Very sparsely vegetated', 'abstract': '', 'values': [96], 'color': '#fad26e', 'alpha': 1},
{'title': 'Natural Surface: Bare areas, unvegetated', 'abstract': '', 'values': [97], 'color': '#f3ab69', 'alpha': 1},
{'title': 'Water', 'abstract': '', 'values': [98], 'color': '#4d9fdc', 'alpha': 1},
{'title': '', 'abstract': '', 'values': [99], 'color': '#4d9fdc', 'alpha': 1},
{'title': 'Water: Tidal area', 'abstract': '', 'values': [100], 'color': '#bbdce9', 'alpha': 1},
{'title': 'Water: Perennial (> 9 months)', 'abstract': '', 'values': [101], 'color': '#1b55ba', 'alpha': 1},
{'title': 'Water: Non-perennial (7 to 9 months)', 'abstract': '', 'values': [102], 'color': '#3479c9', 'alpha': 1},
{'title': 'Water: Non-perennial (4 to 6 months)', 'abstract': '', 'values': [103], 'color': '#4f9dd9', 'alpha': 1},
{'title': 'Water: Non-perennial (1 to 3 months)', 'abstract': '', 'values': [104], 'color': '#85cafd', 'alpha': 1},
# {'title': 'Water: (Snow)', 'abstract': '', 'values': [105], 'color': '#fafafa', 'alpha': 1},
]
},
# "pq_masks": [
# {
# "band": "land",
# "invert": True,
# "enum": 0,
# }
# ],
"legend": {
"show_legend": True,
"url": "https://dea-public-data-dev.s3.ap-southeast-2.amazonaws.com/lccs/level4-web-legend.png"
},
}
layers = {
"title": "DEA Land Cover",
"name": "",
"layers": [
{
"title": "DEA Land Cover Calendar Year (Landsat)",
"name": "ga_ls_landcover",
"abstract": """DEA Land Cover Calendar Year (Landsat)
Land cover is the observed physical cover on the Earth's surface including trees, shrubs, grasses, soils, exposed rocks, water bodies, plantations, crops and built structures. A consistent, Australia-wide land cover product helps the understanding of how the different parts of the environment change and inter-relate. Earth observation data recorded over a period of time allows the observation of the state of land cover at specific times and therefore the way that land cover changes.
For service status information, see | |
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x not in (3, minterm[i]):
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1: # pragma: no branch
if _compare_term(x, z):
assert z not in essential
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([t, x, y, z], minterms, dontcares)
(y & z) | (z & ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([t, x, y, z], minterms, dontcares)
z & (y | ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in Diofant.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> b = (~x & ~y & ~z) | (~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> sympify(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))):
truthtable.append(t)
if deep:
from ..simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None: # pragma: no branch
return POSform(variables, truthtable)
else:
raise ValueError('form can be cnf or dnf only')
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(_finger(eq))
{(0, 0, 1, 0, 2): [x],
(0, 0, 1, 0, 3): [a, b],
(0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = {fi: [0] * 5 for fi in f}
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(d.items()):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping | |
optional
Value of the random seed used for the conditional luminosity function.
This variable is set to ``1235`` default.
dv : `float`, optional
Value for the ``velocity bias`` parameter. It is the difference
between the galaxy and matter velocity profiles.
.. math::
dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}}
where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the
matter velocity.
sigma_clf_c : `float`, optional
Value of the scatter in log(L) for central galaxies, when being
assigned during the `conditional luminosity function` (CLF).
This variable is set to ``0.1417`` by default.
sample : {'18', '19', '20', '21'}, `str`, optional
Luminosity of the SDSS volume-limited sample to analyze.
This variable is set to ``'19'`` by default.
Options:
- ``'18'``: :math:`M_r = 18` volume-limited sample
- ``'19'``: :math:`M_r = 19` volume-limited sample
- ``'20'``: :math:`M_r = 20` volume-limited sample
- ``'21'``: :math:`M_r = 21` volume-limited sample
type_am : {'mr', 'mstar'}, `str`, optional
Type of Abundance matching used in the catalogue. This
variable is set to ``'mr'`` by default.
Options:
- ``'mr'``: Luminosity-based abundance matching used
- ``'mstar'``: Stellar-mass-based abundance matching used.
perf_opt : `bool`, optional
If `True`, it chooses to analyze the ``perfect`` version of
the synthetic galaxy/group galaxy catalogues. Otherwise,
it downloads the catalogues with group-finding errors
included. This variable is set to ``False`` by default.
Returns
---------
catl_pre_str : `str`
String of the prefix for each file based on `input` parameters.
"""
file_msg = cfutils.Program_Msg(__file__)
## Checking input parameters
# `catl_kind`
check_input_params(catl_kind, 'catl_kind', check_type='type')
check_input_params(catl_kind, 'catl_kind', check_type='vals')
# `hod_n`
check_input_params(hod_n, 'hod_n', check_type='type')
check_input_params(hod_n, 'hod_n', check_type='vals')
# `halotype`
check_input_params(halotype, 'halotype', check_type='type')
check_input_params(halotype, 'halotype', check_type='vals')
# `clf_method`
check_input_params(clf_method, 'clf_method', check_type='type')
check_input_params(clf_method, 'clf_method', check_type='vals')
# `clf_seed`
check_input_params(clf_seed, 'clf_seed', check_type='type')
# `dv`
check_input_params(dv, 'dv', check_type='type')
# `sigma_clf_c`
check_input_params(sigma_clf_c, 'sigma_clf_c', check_type='type')
# `sample`
check_input_params(sample, 'sample', check_type='type')
check_input_params(sample, 'sample', check_type='vals')
# `type_am`
check_input_params(type_am, 'type_am', check_type='type')
check_input_params(type_am, 'type_am', check_type='vals')
# `perf_opt`
check_input_params(perf_opt, 'perf_opt', check_type='type')
# Setting `perf_opt` to `False` if necessary
if (catl_kind == 'data'):
perf_opt = False
# Extra parameters
sample_Mr = 'Mr{0}'.format(sample)
##
## Parsing prefix path
# `Data`
if (catl_kind == 'data'):
# List of variables to include in string
catl_pre_arr = ['data', sample_Mr, type_am]
# Prefix string
catl_pre_str = '{0}_{1}_am_{2}'
catl_pre_str = catl_pre_str.format(*catl_pre_arr)
# `Mocks`
if (catl_kind == 'mocks'):
# List of variables to include in string
catl_pre_arr = [sample_Mr,
halotype,
dv,
hod_n,
clf_seed,
clf_method,
sigma_clf_c,
type_am,
perf_opt]
# Prefix string
catl_pre_str = '{0}_halo_{1}_dv_{2}_hn_{3}_clfs_{4}_clfm_{5}_'
catl_pre_str += 'sigclf_{6}_am_{7}_pf_{8}'
catl_pre_str = catl_pre_str.format(*catl_pre_arr)
return catl_pre_str
# Prefix path to catalogues
def catl_prefix_path(catl_kind=md.catl_kind, hod_n=md.hod_n,
halotype=md.halotype, clf_method=md.clf_method, clf_seed=md.clf_seed,
dv=md.dv, sigma_clf_c=md.sigma_clf_c, sample=md.sample, type_am=md.type_am,
perf_opt=md.perf_opt):
"""
Prefix of the paths based on the type of catalogues and input parameters
chosen. It returns the typical path to the galaxy/group catalogues.
Parameters
-----------
catl_kind : {``data``, ``mocks``} `str`
Kind of catalogues to download. This variable is set to
``mocks`` by default.
Options:
- ``data``: Downloads the SDSS DR7 real catalogues.
- ``mocks``: Downloads the synthetic catalogues of SDSS DR7.
hod_n : `int`, optional
Number of the HOD model to use. This value is set to `0` by
default.
halotype : {'so', 'fof'}, `str`, optional
Type of dark matter definition to use. This value is set to
``so`` by default.
Options:
- ``so``: Spherical Overdensity halo definition.
- ``fof``: Friends-of-Friends halo definition.
clf_method : {1, 2, 3}, `int`, optional
Method for assigning galaxy properties to mock galaxies.
This variable dictates how galaxies are assigned
luminosities or stellar masses based on their galaxy type
and host halo's mass. This variable is set to ``1`` by
default.
Options:
- ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`)
- ``2``: (g-r) colour dictates active/passive designation and draws values independently.
- ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy.
clf_seed : `int`, optional
Value of the random seed used for the conditional luminosity function.
This variable is set to ``1235`` default.
dv : `float`, optional
Value for the ``velocity bias`` parameter. It is the difference
between the galaxy and matter velocity profiles.
.. math::
dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}}
where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the
matter velocity.
sigma_clf_c : `float`, optional
Value of the scatter in log(L) for central galaxies, when being
assigned during the `conditional luminosity function` (CLF).
This variable is set to ``0.1417`` by default.
sample : {'18', '19', '20', '21'}, `str`, optional
Luminosity of the SDSS volume-limited sample to analyze.
This variable is set to ``'19'`` by default.
Options:
- ``'18'``: :math:`M_r = 18` volume-limited sample
- ``'19'``: :math:`M_r = 19` volume-limited sample
- ``'20'``: :math:`M_r = 20` volume-limited sample
- ``'21'``: :math:`M_r = 21` volume-limited sample
type_am : {'mr', 'mstar'}, `str`, optional
Type of Abundance matching used in the catalogue. This
variable is set to ``'mr'`` by default.
Options:
- ``'mr'``: Luminosity-based abundance matching used
- ``'mstar'``: Stellar-mass-based abundance matching used.
perf_opt : `bool`, optional
If `True`, it chooses to analyze the ``perfect`` version of
the synthetic galaxy/group galaxy catalogues. Otherwise,
it downloads the catalogues with group-finding errors
included. This variable is set to ``False`` by default.
Returns
---------
catl_prefix : `str`
Prefix of the paths based on the type of catalogues and input
parameters.
"""
file_msg = cfutils.Program_Msg(__file__)
## Checking input parameters
# `catl_kind`
check_input_params(catl_kind, 'catl_kind', check_type='type')
check_input_params(catl_kind, 'catl_kind', check_type='vals')
# `hod_n`
check_input_params(hod_n, 'hod_n', check_type='type')
check_input_params(hod_n, 'hod_n', check_type='vals')
# `halotype`
check_input_params(halotype, 'halotype', check_type='type')
check_input_params(halotype, 'halotype', check_type='vals')
# `clf_method`
check_input_params(clf_method, 'clf_method', check_type='type')
check_input_params(clf_method, 'clf_method', check_type='vals')
# `clf_seed`
check_input_params(clf_seed, 'clf_seed', check_type='type')
# `dv`
check_input_params(dv, 'dv', check_type='type')
# `sigma_clf_c`
check_input_params(sigma_clf_c, 'sigma_clf_c', check_type='type')
# `sample`
check_input_params(sample, 'sample', check_type='type')
check_input_params(sample, 'sample', check_type='vals')
# `type_am`
check_input_params(type_am, 'type_am', check_type='type')
check_input_params(type_am, 'type_am', check_type='vals')
# `perf_opt`
check_input_params(perf_opt, 'perf_opt', check_type='type')
# Setting `perf_opt` to `False` if necessary
if (catl_kind == 'data'):
perf_opt = False
# Extra parameters
sample_Mr = 'Mr{0}'.format(sample)
##
## Parsing prefix path
# `Data`
if (catl_kind == 'data'):
catl_path_prefix = os.path.join('data',
type_am,
sample_Mr)
# `Mocks`
if (catl_kind == 'mocks'):
catl_path_prefix = os.path.join(
'mocks',
'halos_{0}'.format(halotype),
'dv_{0}'.format(dv),
'hod_model_{0}'.format(hod_n),
'clf_seed_{0}'.format(clf_seed),
'clf_method_{0}'.format(clf_method),
'sigma_c_{0}'.format(sigma_clf_c),
type_am,
sample_Mr)
return catl_path_prefix
# Catalogue prefix of the catalogues
def catl_prefix_main(catl_type='memb', catl_kind=md.catl_kind, hod_n=md.hod_n,
halotype=md.halotype, clf_method=md.clf_method, clf_seed=md.clf_seed,
dv=md.dv, sigma_clf_c=md.sigma_clf_c, sample=md.sample, type_am=md.type_am,
perf_opt=md.perf_opt):
"""
Prefix of the paths based on the type of catalogues and input parameters
chosen.
Parameters
-----------
catl_type : {``memb``, ``gal``, ``group``} `str`, optional
Type of catalog to analyze. This option is set to ``memb`` by
default.
Options:
- ``memb``: Analyzes the member galaxy catalogues of a group catalog
- ``gal``: Analyzes a simple galaxy catalogue
- ``group``: Analyzes a ``group`` galaxy catalogues with galaxy groups.
catl_kind : {``data``, ``mocks``} `str`
Kind of catalogues to download. This variable is set to
``mocks`` by default.
Options:
- ``data``: Downloads the SDSS DR7 real catalogues.
- ``mocks``: Downloads the synthetic catalogues of SDSS DR7.
hod_n : `int`, optional
Number of the HOD model to use. This value is set to `0` by
default.
halotype : {'so', 'fof'}, `str`, optional
Type of dark matter definition to use. This value is set to
``so`` by default.
Options:
- ``so``: Spherical Overdensity halo definition.
- ``fof``: Friends-of-Friends halo definition.
clf_method : {1, 2, 3}, `int`, optional
Method for assigning galaxy properties to mock galaxies.
This variable dictates how galaxies are assigned
luminosities or stellar masses based on their galaxy type
and host halo's mass. This variable is set to ``1`` by
default.
Options:
- ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`)
- ``2``: (g-r) colour dictates active/passive designation and draws values independently.
- ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy.
clf_seed : `int`, optional
Value of the random seed used for the conditional luminosity function.
This variable is set to ``1235`` default.
dv : `float`, optional
Value for the ``velocity bias`` parameter. It is the difference
between the galaxy and matter velocity profiles.
.. math::
dv = \\frac{v_{g} - | |
paramflags )
libvlc_vlm_get_media_instance_seekable.errcheck = check_vlc_exception
libvlc_vlm_get_media_instance_seekable.__doc__ = """Is libvlc instance seekable ?
\bug will always return 0
@param p_instance a libvlc instance
@param psz_name name of vlm media instance
@param i_instance instance id
@param p_e an initialized exception pointer
@return 1 if seekable, 0 if not
"""
if hasattr(dll, 'mediacontrol_RGBPicture__free'):
prototype=ctypes.CFUNCTYPE(None, ctypes.POINTER(RGBPicture))
paramflags=( (1, ), )
mediacontrol_RGBPicture__free = prototype( ("mediacontrol_RGBPicture__free", dll), paramflags )
mediacontrol_RGBPicture__free.__doc__ = """Free a RGBPicture structure.
@param pic: the RGBPicture structure
"""
if hasattr(dll, 'mediacontrol_StreamInformation__free'):
prototype=ctypes.CFUNCTYPE(None, ctypes.POINTER(MediaControlStreamInformation))
paramflags=( (1, ), )
mediacontrol_StreamInformation__free = prototype( ("mediacontrol_StreamInformation__free", dll), paramflags )
mediacontrol_StreamInformation__free.__doc__ = """Free a StreamInformation structure.
@param pic: the StreamInformation structure
"""
if hasattr(dll, 'mediacontrol_exception_create'):
prototype=ctypes.CFUNCTYPE(MediaControlException)
paramflags= tuple()
mediacontrol_exception_create = prototype( ("mediacontrol_exception_create", dll), paramflags )
mediacontrol_exception_create.__doc__ = """Instanciate and initialize an exception structure.
@return the exception
"""
if hasattr(dll, 'mediacontrol_exception_init'):
prototype=ctypes.CFUNCTYPE(None, MediaControlException)
paramflags=( (1, ), )
mediacontrol_exception_init = prototype( ("mediacontrol_exception_init", dll), paramflags )
mediacontrol_exception_init.__doc__ = """Initialize an existing exception structure.
@param p_exception the exception to initialize.
"""
if hasattr(dll, 'mediacontrol_exception_cleanup'):
prototype=ctypes.CFUNCTYPE(None, MediaControlException)
paramflags=( (1, ), )
mediacontrol_exception_cleanup = prototype( ("mediacontrol_exception_cleanup", dll), paramflags )
mediacontrol_exception_cleanup.__doc__ = """Clean up an existing exception structure after use.
@param p_exception the exception to clean up.
"""
if hasattr(dll, 'mediacontrol_exception_free'):
prototype=ctypes.CFUNCTYPE(None, MediaControlException)
paramflags=( (1, ), )
mediacontrol_exception_free = prototype( ("mediacontrol_exception_free", dll), paramflags )
mediacontrol_exception_free.__doc__ = """Free an exception structure created with mediacontrol_exception_create().
@return the exception
"""
if hasattr(dll, 'mediacontrol_new'):
prototype=ctypes.CFUNCTYPE(MediaControl, ctypes.c_int, ListPOINTER(ctypes.c_char_p), MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_new = prototype( ("mediacontrol_new", dll), paramflags )
mediacontrol_new.__doc__ = """Create a MediaControl instance with parameters
@param argc the number of arguments
@param argv parameters
@param exception an initialized exception pointer
@return a mediacontrol_Instance
"""
if hasattr(dll, 'mediacontrol_new_from_instance'):
prototype=ctypes.CFUNCTYPE(MediaControl, Instance, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_new_from_instance = prototype( ("mediacontrol_new_from_instance", dll), paramflags )
mediacontrol_new_from_instance.__doc__ = """Create a MediaControl instance from an existing libvlc instance
@param p_instance the libvlc instance
@param exception an initialized exception pointer
@return a mediacontrol_Instance
"""
if hasattr(dll, 'mediacontrol_get_libvlc_instance'):
prototype=ctypes.CFUNCTYPE(Instance, MediaControl)
paramflags=( (1, ), )
mediacontrol_get_libvlc_instance = prototype( ("mediacontrol_get_libvlc_instance", dll), paramflags )
mediacontrol_get_libvlc_instance.__doc__ = """Get the associated libvlc instance
@param self: the mediacontrol instance
@return a libvlc instance
"""
if hasattr(dll, 'mediacontrol_get_media_player'):
prototype=ctypes.CFUNCTYPE(MediaPlayer, MediaControl)
paramflags=( (1, ), )
mediacontrol_get_media_player = prototype( ("mediacontrol_get_media_player", dll), paramflags )
mediacontrol_get_media_player.__doc__ = """Get the associated libvlc_media_player
@param self: the mediacontrol instance
@return a libvlc_media_player_t instance
"""
if hasattr(dll, 'mediacontrol_get_media_position'):
prototype=ctypes.CFUNCTYPE(ctypes.POINTER(MediaControlPosition), MediaControl, PositionOrigin, PositionKey, MediaControlException)
paramflags=(1,), (1,), (1,), (1,)
mediacontrol_get_media_position = prototype( ("mediacontrol_get_media_position", dll), paramflags )
mediacontrol_get_media_position.__doc__ = """Get the current position
@param self the mediacontrol instance
@param an_origin the position origin
@param a_key the position unit
@param exception an initialized exception pointer
@return a mediacontrol_Position
"""
if hasattr(dll, 'mediacontrol_set_media_position'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.POINTER(MediaControlPosition), MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_set_media_position = prototype( ("mediacontrol_set_media_position", dll), paramflags )
mediacontrol_set_media_position.__doc__ = """Set the position
@param self the mediacontrol instance
@param a_position a mediacontrol_Position
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_start'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.POINTER(MediaControlPosition), MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_start = prototype( ("mediacontrol_start", dll), paramflags )
mediacontrol_start.__doc__ = """Play the movie at a given position
@param self the mediacontrol instance
@param a_position a mediacontrol_Position
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_pause'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_pause = prototype( ("mediacontrol_pause", dll), paramflags )
mediacontrol_pause.__doc__ = """Pause the movie at a given position
@param self the mediacontrol instance
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_resume'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_resume = prototype( ("mediacontrol_resume", dll), paramflags )
mediacontrol_resume.__doc__ = """Resume the movie at a given position
@param self the mediacontrol instance
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_stop'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_stop = prototype( ("mediacontrol_stop", dll), paramflags )
mediacontrol_stop.__doc__ = """Stop the movie at a given position
@param self the mediacontrol instance
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_exit'):
prototype=ctypes.CFUNCTYPE(None, MediaControl)
paramflags=( (1, ), )
mediacontrol_exit = prototype( ("mediacontrol_exit", dll), paramflags )
mediacontrol_exit.__doc__ = """Exit the player
@param self the mediacontrol instance
"""
if hasattr(dll, 'mediacontrol_set_mrl'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.c_char_p, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_set_mrl = prototype( ("mediacontrol_set_mrl", dll), paramflags )
mediacontrol_set_mrl.__doc__ = """Set the MRL to be played.
@param self the mediacontrol instance
@param psz_file the MRL
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_get_mrl'):
prototype=ctypes.CFUNCTYPE(ctypes.c_char_p, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_get_mrl = prototype( ("mediacontrol_get_mrl", dll), paramflags )
mediacontrol_get_mrl.__doc__ = """Get the MRL to be played.
@param self the mediacontrol instance
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_snapshot'):
prototype=ctypes.CFUNCTYPE(ctypes.POINTER(RGBPicture), MediaControl, ctypes.POINTER(MediaControlPosition), MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_snapshot = prototype( ("mediacontrol_snapshot", dll), paramflags )
mediacontrol_snapshot.__doc__ = """Get a snapshot
@param self the mediacontrol instance
@param a_position the desired position (ignored for now)
@param exception an initialized exception pointer
@return a RGBpicture
"""
if hasattr(dll, 'mediacontrol_display_text'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.c_char_p, ctypes.POINTER(MediaControlPosition), ctypes.POINTER(MediaControlPosition), MediaControlException)
paramflags=(1,), (1,), (1,), (1,), (1,)
mediacontrol_display_text = prototype( ("mediacontrol_display_text", dll), paramflags )
mediacontrol_display_text.__doc__ = """ Displays the message string, between "begin" and "end" positions.
@param self the mediacontrol instance
@param message the message to display
@param begin the begin position
@param end the end position
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_get_stream_information'):
prototype=ctypes.CFUNCTYPE(ctypes.POINTER(MediaControlStreamInformation), MediaControl, PositionKey, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_get_stream_information = prototype( ("mediacontrol_get_stream_information", dll), paramflags )
mediacontrol_get_stream_information.__doc__ = """ Get information about a stream
@param self the mediacontrol instance
@param a_key the time unit
@param exception an initialized exception pointer
@return a mediacontrol_StreamInformation
"""
if hasattr(dll, 'mediacontrol_sound_get_volume'):
prototype=ctypes.CFUNCTYPE(ctypes.c_short, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_sound_get_volume = prototype( ("mediacontrol_sound_get_volume", dll), paramflags )
mediacontrol_sound_get_volume.__doc__ = """Get the current audio level, normalized in [0..100]
@param self the mediacontrol instance
@param exception an initialized exception pointer
@return the volume
"""
if hasattr(dll, 'mediacontrol_sound_set_volume'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.c_short, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_sound_set_volume = prototype( ("mediacontrol_sound_set_volume", dll), paramflags )
mediacontrol_sound_set_volume.__doc__ = """Set the audio level
@param self the mediacontrol instance
@param volume the volume (normalized in [0..100])
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_set_visual'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaControl, ctypes.c_ulong, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_set_visual = prototype( ("mediacontrol_set_visual", dll), paramflags )
mediacontrol_set_visual.__doc__ = """Set the video output window
@param self the mediacontrol instance
@param visual_id the Xid or HWND, depending on the platform
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_get_rate'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_get_rate = prototype( ("mediacontrol_get_rate", dll), paramflags )
mediacontrol_get_rate.__doc__ = """Get the current playing rate, in percent
@param self the mediacontrol instance
@param exception an initialized exception pointer
@return the rate
"""
if hasattr(dll, 'mediacontrol_set_rate'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.c_int, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_set_rate = prototype( ("mediacontrol_set_rate", dll), paramflags )
mediacontrol_set_rate.__doc__ = """Set the playing rate, in percent
@param self the mediacontrol instance
@param rate the desired rate
@param exception an initialized exception pointer
"""
if hasattr(dll, 'mediacontrol_get_fullscreen'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, MediaControl, MediaControlException)
paramflags=(1,), (1,)
mediacontrol_get_fullscreen = prototype( ("mediacontrol_get_fullscreen", dll), paramflags )
mediacontrol_get_fullscreen.__doc__ = """Get current fullscreen status
@param self the mediacontrol instance
@param exception an initialized exception pointer
@return the fullscreen status
"""
if hasattr(dll, 'mediacontrol_set_fullscreen'):
prototype=ctypes.CFUNCTYPE(None, MediaControl, ctypes.c_int, MediaControlException)
paramflags=(1,), (1,), (1,)
mediacontrol_set_fullscreen = prototype( ("mediacontrol_set_fullscreen", dll), paramflags )
mediacontrol_set_fullscreen.__doc__ = """Set fullscreen status
@param self the mediacontrol instance
@param b_fullscreen the desired status
@param exception an initialized exception pointer
"""
### Start of footer.py ###
class MediaEvent(ctypes.Structure):
_fields_ = [
('media_name', ctypes.c_char_p),
('instance_name', ctypes.c_char_p),
]
class EventUnion(ctypes.Union):
_fields_ = [
('meta_type', ctypes.c_uint),
('new_child', ctypes.c_uint),
('new_duration', ctypes.c_longlong),
('new_status', ctypes.c_int),
('media', ctypes.c_void_p),
('new_state', ctypes.c_uint),
# Media instance
('new_position', ctypes.c_float),
('new_time', ctypes.c_longlong),
('new_title', ctypes.c_int),
('new_seekable', ctypes.c_longlong),
('new_pausable', ctypes.c_longlong),
# FIXME: Skipped MediaList and MediaListView...
('filename', ctypes.c_char_p),
('new_length', ctypes.c_longlong),
('media_event', MediaEvent),
]
class Event(ctypes.Structure):
_fields_ = [
('type', EventType),
('object', ctypes.c_void_p),
('u', EventUnion),
]
# Decorator for callback methods
callbackmethod=ctypes.CFUNCTYPE(None, Event, ctypes.c_void_p)
# Example callback method
@callbackmethod
def debug_callback(event, data):
print "Debug callback method"
print "Event:", event.type
print "Data", data
if __name__ == '__main__':
try:
from msvcrt import getch
except ImportError:
def getch():
import tty
import termios
fd=sys.stdin.fileno()
old_settings=termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch=sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
@callbackmethod
def end_callback(event, data):
print "End of stream"
sys.exit(0)
if sys.argv[1:]:
instance=Instance()
media=instance.media_new(sys.argv[1])
player=instance.media_player_new()
player.set_media(media)
player.play()
event_manager=player.event_manager()
event_manager.event_attach(EventType.MediaPlayerEndReached, end_callback, None)
def print_info():
"""Print information about the media."""
media=player.get_media()
print "State:", player.get_state()
print "Media:", media.get_mrl()
try:
print "Current time:", player.get_time(), "/", media.get_duration()
print "Position:", player.get_position()
print "FPS:", player.get_fps()
print "Rate:", player.get_rate()
print "Video size: (%d, %d)" % (player.video_get_width(), player.video_get_height())
except Exception:
pass
def forward():
"""Go forward 1s"""
player.set_time(player.get_time() + 1000)
def one_frame_forward():
"""Go forward one frame"""
player.set_time(player.get_time() + long(1000 / (player.get_fps() or 25)))
def one_frame_backward():
"""Go backward one frame"""
player.set_time(player.get_time() - long(1000 / (player.get_fps() or 25)))
def backward():
"""Go backward 1s"""
player.set_time(player.get_time() - 1000)
def print_help():
"""Print help
"""
print "Commands:"
for k, m in keybindings.iteritems():
print " %s: %s" % (k, (m.__doc__ or m.__name__).splitlines()[0])
print " 1-9: go to the given fraction of the movie"
def quit_app():
"""Exit."""
sys.exit(0)
keybindings={
'f': player.toggle_fullscreen,
' ': player.pause,
'+': forward,
'-': backward,
'.': one_frame_forward,
',': one_frame_backward,
'?': print_help,
'i': print_info,
| |
*args)
def name(self, *args):
"""
name(self) -> str
"""
return _casadi.DM_name(self, *args)
def dep(self, *args):
"""
dep(self, int ch) -> DM
"""
return _casadi.DM_dep(self, *args)
def n_dep(self, *args):
"""
n_dep(self) -> int
"""
return _casadi.DM_n_dep(self, *args)
def set_precision(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_precision(int precision)
streams.
"""
return _casadi.DM_set_precision(*args)
set_precision = staticmethod(set_precision)
def set_width(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_width(int width)
streams.
"""
return _casadi.DM_set_width(*args)
set_width = staticmethod(set_width)
def set_scientific(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_scientific(bool scientific)
streams.
"""
return _casadi.DM_set_scientific(*args)
set_scientific = staticmethod(set_scientific)
def rng(*args):
"""
rng(int seed)
"""
return _casadi.DM_rng(*args)
rng = staticmethod(rng)
def rand(*args):
"""
Create a matrix with uniformly distributed random numbers.
rand(int nrow, int ncol) -> DM
rand((int,int) rc) -> DM
rand(Sparsity sp) -> DM
"""
return _casadi.DM_rand(*args)
rand = staticmethod(rand)
def export_code(self, *args):
"""
Export matrix in specific language.
export_code(self, str lang, dict options)
lang: only 'matlab' supported for now
::
* options:
* inline: Indicates if you want everything on a single line (default: False)
* name: Name of exported variable (default: 'm')
* indent_level: Level of indentation (default: 0)
* spoof_zero: Replace numerical zero by a 1e-200 (default: false)
* might be needed for matlab sparse construct,
* which doesn't allow numerical zero
*
"""
return _casadi.DM_export_code(self, *args)
def info(self, *args):
"""
Obtain information about sparsity
info(self) -> dict
"""
return _casadi.DM_info(self, *args)
def from_info(*args):
"""
from_info(dict info) -> DM
"""
return _casadi.DM_from_info(*args)
from_info = staticmethod(from_info)
def to_file(self, *args):
"""
Export numerical matrix to file
to_file(self, str filename, str format)
Supported formats: .mtx Matrix Market
"""
return _casadi.DM_to_file(self, *args)
def __init__(self, *args):
"""
DM()
DM(Sparsity sp)
DM(float val)
DM([[float]] m)
DM([int] x)
DM(IM x)
DM(DM m)
DM([SXElem] x)
DM(SX x)
DM(int nrow, int ncol)
DM(Sparsity sp, DM d)
"""
this = _casadi.new_DM(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def assign(self, *args):
"""
assign(self, DM rhs)
"""
return _casadi.DM_assign(self, *args)
@property
def shape(self):
return (self.size1(),self.size2())
def reshape(self,arg):
return _casadi.reshape(self,arg)
@property
def T(self):
return _casadi.transpose(self)
def __getitem__(self, s):
if isinstance(s, tuple) and len(s)==2:
if s[1] is None: raise TypeError("Cannot slice with None")
return self.get(False, s[0], s[1])
return self.get(False, s)
def __setitem__(self,s,val):
if isinstance(s,tuple) and len(s)==2:
return self.set(val, False, s[0], s[1])
return self.set(val, False, s)
@property
def nz(self):
return NZproxy(self)
def full(self, *args):
"""
full(self) -> PyObject *
"""
return _casadi.DM_full(self, *args)
def sparse(self, *args):
"""
sparse(self) -> PyObject *
"""
return _casadi.DM_sparse(self, *args)
__array_priority__ = 999.0
def __array_wrap__(self,out_arr,context=None):
if context is None:
return out_arr
name = context[0].__name__
args = list(context[1])
if len(context[1])==3:
raise Exception("Error with %s. Looks like you are using an assignment operator, such as 'a+=b' where 'a' is a numpy type. This is not supported, and cannot be supported without changing numpy." % name)
if "vectorized" in name:
name = name[:-len(" (vectorized)")]
conversion = {"multiply": "mul", "divide": "div", "true_divide": "div", "subtract":"sub","power":"pow","greater_equal":"ge","less_equal": "le", "less": "lt", "greater": "gt"}
if name in conversion:
name = conversion[name]
if len(context[1])==2 and context[1][1] is self and not(context[1][0] is self):
name = 'r' + name
args.reverse()
if not(hasattr(self,name)) or ('mul' in name):
name = '__' + name + '__'
fun=getattr(self, name)
return fun(*args[1:])
def __array__(self,*args,**kwargs):
import numpy as n
if len(args) > 1 and isinstance(args[1],tuple) and isinstance(args[1][0],n.ufunc) and isinstance(args[1][0],n.ufunc) and len(args[1])>1 and args[1][0].nin==len(args[1][1]):
if len(args[1][1])==3:
raise Exception("Error with %s. Looks like you are using an assignment operator, such as 'a+=b'. This is not supported when 'a' is a numpy type, and cannot be supported without changing numpy itself. Either upgrade a to a CasADi type first, or use 'a = a + b'. " % args[1][0].__name__)
return n.array([n.nan])
else:
if hasattr(self,'__array_custom__'):
return self.__array_custom__(*args,**kwargs)
else:
try:
return self.full()
except:
raise Exception("Implicit conversion of symbolic CasADi type to numeric matrix not supported.\n"
+ "This may occur when you pass a CasADi object to a numpy function.\n"
+ "Use an equivalent CasADi function instead of that numpy function.")
def __array_custom__(self,*args,**kwargs):
if "dtype" in kwargs and not(isinstance(kwargs["dtype"],n.double)):
return n.array(self.full(),dtype=kwargs["dtype"])
else:
return self.full()
def tocsc(self):
import numpy as np
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from scipy.sparse import csc_matrix
return csc_matrix( (self.nonzeros(),self.row(),self.colind()), shape = self.shape, dtype=np.double )
def toarray(self,simplify=False):
import numpy as np
if simplify:
if self.is_scalar():
return float(self)
elif self.is_vector():
return np.array(self.T.elements())
return np.array(self.T.elements()).reshape(self.shape)
def __nonzero__(self):
if self.numel()!=1:
raise Exception("Only a scalar can be cast to a float")
if self.nnz()==0:
return False
return float(self)!=0
def __abs__(self):
return abs(float(self))
def __setstate__(self, state):
self.__init__(self.from_info(state))
def __getstate__(self):
return self.info()
__swig_destroy__ = _casadi.delete_DM
DM_swigregister = _casadi.DM_swigregister
DM_swigregister(DM)
def DM_binary(*args):
"""
binary(int op, DM x, DM y) -> DM
"""
return _casadi.DM_binary(*args)
def DM_unary(*args):
"""
unary(int op, DM x) -> DM
"""
return _casadi.DM_unary(*args)
def DM_scalar_matrix(*args):
"""
scalar_matrix(int op, DM x, DM y) -> DM
"""
return _casadi.DM_scalar_matrix(*args)
def DM_matrix_scalar(*args):
"""
matrix_scalar(int op, DM x, DM y) -> DM
"""
return _casadi.DM_matrix_scalar(*args)
def DM_matrix_matrix(*args):
"""
matrix_matrix(int op, DM x, DM y) -> DM
"""
return _casadi.DM_matrix_matrix(*args)
def DM_set_max_depth(*args):
"""
set_max_depth(int eq_depth)
"""
return _casadi.DM_set_max_depth(*args)
def DM_get_max_depth(*args):
"""
get_max_depth() -> int
"""
return _casadi.DM_get_max_depth(*args)
def DM_get_input(*args):
"""
get_input(Function f) -> [DM]
"""
return _casadi.DM_get_input(*args)
def DM_get_free(*args):
"""
get_free(Function f) -> [DM]
"""
return _casadi.DM_get_free(*args)
def DM_type_name(*args):
"""
type_name() -> str
"""
return _casadi.DM_type_name(*args)
def DM_triplet(*args):
"""
triplet([int] row, [int] col, DM d) -> DM
triplet([int] row, [int] col, DM d, (int,int) rc) -> DM
triplet([int] row, [int] col, DM d, int nrow, int ncol) -> DM
"""
return _casadi.DM_triplet(*args)
def DM_inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> DM
inf((int,int) rc) -> DM
inf(Sparsity sp) -> DM
"""
return _casadi.DM_inf(*args)
def DM_nan(*args):
"""
create a matrix with all nan
nan(int nrow, int ncol) -> DM
nan((int,int) rc) -> DM
nan(Sparsity sp) -> DM
"""
return _casadi.DM_nan(*args)
def DM_eye(*args):
"""
eye(int ncol) -> DM
"""
return _casadi.DM_eye(*args)
def DM_set_precision(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_precision(int precision)
streams.
"""
return _casadi.DM_set_precision(*args)
def DM_set_width(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_width(int width)
streams.
"""
return _casadi.DM_set_width(*args)
def DM_set_scientific(*args):
"""
Set the 'precision, width & scientific' used in printing and serializing to
set_scientific(bool scientific)
streams.
"""
return _casadi.DM_set_scientific(*args)
def DM_rng(*args):
"""
rng(int seed)
"""
return _casadi.DM_rng(*args)
def DM_rand(*args):
"""
Create a matrix with uniformly distributed random numbers.
rand(int nrow, int ncol) -> DM
rand((int,int) rc) -> DM
rand(Sparsity sp) -> DM
"""
return _casadi.DM_rand(*args)
def DM_from_info(*args):
"""
from_info(dict info) -> DM
"""
return _casadi.DM_from_info(*args)
class IM(MatrixCommon, GenericExpressionCommon, GenIM, PrintableCommon):
"""
"""
__swig_setmethods__ = {}
for _s in [MatrixCommon, GenericExpressionCommon, GenIM, PrintableCommon]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IM, name, value)
__swig_getmethods__ = {}
for _s in [MatrixCommon, GenericExpressionCommon, GenIM, PrintableCommon]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, IM, name)
__repr__ = _swig_repr
def sanity_check(self, *args):
"""
[DEPRECATED] Correctness is checked during construction
sanity_check(self, bool complete)
"""
return _casadi.IM_sanity_check(self, *args)
def has_nz(self, *args):
"""
Returns true if the matrix has a non-zero at location rr, cc.
has_nz(self, int rr, int cc) -> bool
"""
return _casadi.IM_has_nz(self, *args)
def __nonzero__(self, *args):
"""
[INTERNAL]
__nonzero__(self) -> bool
"""
return _casadi.IM___nonzero__(self, *args)
def get(self, *args):
"""
get(self, bool ind1, Sparsity sp) -> IM
get(self, bool ind1, Slice rr) -> IM
get(self, bool ind1, IM rr) -> IM
get(self, bool ind1, Slice rr, Slice cc) -> IM
get(self, bool ind1, Slice rr, IM cc) -> IM
get(self, bool ind1, IM rr, Slice cc) -> IM
get(self, bool ind1, IM rr, IM cc) -> IM
"""
return _casadi.IM_get(self, *args)
def set(self, *args):
"""
set(self, IM m, bool ind1, Sparsity sp)
set(self, IM m, bool ind1, Slice rr)
set(self, IM m, bool ind1, IM rr)
set(self, IM m, bool ind1, Slice rr, Slice cc)
set(self, IM m, bool ind1, Slice rr, IM cc)
set(self, IM m, bool ind1, IM rr, Slice cc)
set(self, IM m, bool ind1, IM rr, IM cc)
| |
<filename>ppms/__init__.py
from astropy.io.ascii import basic, core
from astropy.table import Table, MaskedColumn
from astropy import units as u, constants as c
import numpy as np
import dateutil.parser as dparser
from scipy.ndimage import median_filter
class MaglabHeader(basic.CsvHeader):
comment = r'\s*;'
write_comment = ';'
start_line = 1
def get_cols(self, lines):
lines = self.process_lines(lines)
start_line = self.start_line
for i, line in enumerate(lines):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = [x.strip() for x in next(self.splitter([line]))]
self.units = next(self.splitter([next(lines)]))
self._set_cols_from_names()
for c, u in zip(self.cols, self.units):
setattr(c, 'unit', u)
class MaglabData(core.BaseData):
comment = r'\s*;'
write_comment = ';'
start_line = 5
class Maglab(basic.Csv):
"""Reads a Oxford Instruments Maglab data file."""
_format_name = 'maglab'
_io_registry_can_write = False
_description = 'Oxford Instruments Maglab data file reader'
header_class = MaglabHeader
data_class = MaglabData
def normalize(table):
data = []
for col in table.columns.values():
if isinstance(col, MaskedColumn) and col.mask.all():
# drop completely empty columns
continue
data.append(col)
return Table(data)
class PPMSHeader(basic.CsvHeader):
UNITS = {
# Heat Capacity Option units
'Seconds': 'second',
'seconds': 'second',
'Oersted': '0.0001 * T',
'Kelvin': 'K',
'µJ/K': 'uJ/K',
'µJ/K/K': 'uJ/K/K',
'Seconds': 'second',
# ACMS option units
'sec': 'second',
'emu': 'erg/gauss',
'Oe': '0.0001 * T',
'code': None,
}
comment = r'\s*;'
write_comment = ';'
def start_line(self, lines):
return list(lines).index('[Data]') + 1
def _set_cols_from_names(self):
names, units = [], []
for header in self.names:
if '(' in header:
h, u = [x.strip() for x in header.replace(')', '').split('(')]
else:
h, u = header.strip(), None
names.append(h)
if u in self.UNITS:
units.append(self.UNITS[u])
else:
units.append(u)
self.names = names
super(PPMSHeader, self)._set_cols_from_names()
for col, unit in zip(self.cols, units):
if unit:
col.unit = unit
class PPMSData(basic.CsvData):
comment = r'\s*;'
write_comment = ';'
def start_line(self, lines):
return list(lines).index('[Data]') + 2
class PPMSOutputter(core.TableOutputter):
def __call__(self, cols, meta):
cols = [c for c in cols if any(c.str_vals)]
return normalize(super(PPMSOutputter, self).__call__(cols, meta))
class PPMS(basic.Csv):
"""Reads a Quantum Design PPMS data file."""
_format_name = 'ppms'
_io_registry_can_write = False
_description = 'Quantum Design PPMS data file reader'
header_class = PPMSHeader
data_class = PPMSData
outputter_class = PPMSOutputter
fu = u.def_unit(['f.u.', 'formula unit'], u.dimensionless_unscaled)
def acms_legacy(path, volume=None, formula_units=None, mode='acdc'):
"""Reduce and preprocess acms dataset.
..note::
The colnames were changed. This function still uses the old
schema.
:param volume: The sample volume.
:param formula_units: The numer of formula units of the sample.
:param mode: Data modes, either 'acdc', 'ac' or 'dc'.
"""
if volume:
if not isinstance(volume, u.Quantity):
raise ValueError('Missing type of volume parameter.')
source = Table.read(path, format='ascii.ppms')
# Boolean mask, True for DC magnetisation measurements
dc_mask = source['Measure Type'] == 0
ac_mask = source['Measure Type'] == 5
if mode == 'acdc' and (np.sum(dc_mask) != np.sum(~ac_mask)):
raise ValueError('Nonequal number of ac ({}) and dc ({}) measurements'.format(np.sum(ac_mask), np.sum(dc_mask)) )
data = Table(masked=False)
if mode == 'ac':
data['B'] = source[ac_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[ac_mask]['Temperature']
else:
data['B'] = source[dc_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[dc_mask]['Temperature']
if (mode == 'ac') or (mode == 'acdc'):
data['B_ac'] = source[ac_mask]["Amplitude"].to(u.T)
data['m_ac'] = source[ac_mask]["M'"]
data["m'_ac"] = source[ac_mask]["M''"]
if volume:
H = data['B_ac'].quantity / c.mu0
M = data['m_ac'].quantity / volume
M_imag = data["m'_ac"].quantity / volume
data["χ"] = (M / H).si
data["χ'"] = (M_imag / H).si
if (mode == 'dc') or (mode == 'acdc'):
data['m'] = source[dc_mask]['M-DC']
if formula_units:
# calculate magnetic moment per formula unit
data['m_fu'] = data['m'].to(c.muB) / formula_units
if volume:
# calculate magnetisation.
data['M'] = (data['m'].quantity / volume).si
data.meta['temperature'] = np.round(data['T'].mean(), 1)
if volume:
data.meta['volume'] = volume
data.meta['z'] = source['Sample Center'].quantity[0].value
if mode == 'ac' or mode == 'acdc':
data.meta['frequency'] = source[ac_mask]['Frequency'][0]
data.meta['path'] = path
try:
# Try to extract date information from filepath
data.meta['date'] = dparser.parse(path,fuzzy=True)
except ValueError:
pass
return data
def acms(path, volume=None, formula_units=None, demag=None, mode='acdc', scan=None, masked=False):
"""Reduce and preprocess acms dataset.
:param volume: The sample volume.
:param formula_units: The numer of formula units of the sample.
:param demag: The demagnetizing factor. To calculate the demagnetizing correction,
the magnetisation M is needed. Therefore the volume is mandatory and it only
works with modes 'dc' or 'acdc'.
:param mode: Data modes, either 'acdc', 'ac' or 'dc'.
:param scan: The scan variable. if scan is 'B' then dM/dH can be calculated.
"""
if mode not in {'ac', 'dc', 'acdc'}:
raise ValueError("invalid mode. Must be one of 'ac', 'dc' or 'acdc'")
if volume:
if not isinstance(volume, u.Quantity):
raise ValueError('Missing type of volume parameter.')
if demag:
if volume is None:
raise ValueError(
'volume parameter is neccessary to calculate the'
'magnetisation used for demagnetizing correction.')
if mode == 'ac':
raise ValueError(
"Can't calculate demagnetizing correction with mode"
"'ac'. Magnetisation is neccessary.")
source = Table.read(path, format='ascii.ppms')
if masked:
data = Table()
dc_mask = ac_mask = np.ones(len(source), dtype=bool)
else:
data = Table(masked=False)
# Boolean mask, True for DC magnetisation measurements
dc_mask = source['Measure Type'] == 0
ac_mask = source['Measure Type'] == 5
if mode == 'acdc' and (np.sum(dc_mask) != np.sum(ac_mask)):
raise ValueError('Nonequal number of ac ({}) and dc ({}) measurements'.format(np.sum(ac_mask), np.sum(dc_mask)) )
if mode == 'ac':
data['B'] = source[ac_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[ac_mask]['Temperature']
else:
data['B'] = source[dc_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[dc_mask]['Temperature']
data['H'] = data['B'] / c.mu0
if (mode == 'ac') or (mode == 'acdc'):
data['t_ac'] = source[ac_mask]['Time Stamp']
data['f'] = source[ac_mask]['Frequency']
data['B_ac'] = source[ac_mask]["Amplitude"].to(u.T)
data["m'_ac"] = source[ac_mask]["M'"]
data["m''_ac"] = source[ac_mask]["M''"]
if volume:
H = data['B_ac'].quantity / c.mu0
M = data["m'_ac"].quantity / volume
M_imag = data["m''_ac"].quantity / volume
data["χ'"] = (M / H).si
data["χ''"] = (M_imag / H).si
# Handle higher harmonic susceptibilities
harmonics_real = [x for x in source[ac_mask].columns if x.startswith("M '[")]
for colname in harmonics_real:
i = int(colname[4:-1])
data["m'_ac[{}]".format(i)] = source[ac_mask][colname]
if volume:
M_i = data["m'_ac[{}]".format(i)].quantity / volume
data["χ'[{}]".format(i)] = (M_i / H).si
harmonics_imag = [x for x in source[ac_mask].columns if x.startswith("M ''[")]
for colname in harmonics_imag:
i = int(colname[5:-1])
data["m''_ac[{}]".format(i)] = source[ac_mask][colname]
if volume:
M_imag_i = data["m''_ac[{}]".format(i)].quantity / volume
data["χ''[{}]".format(i)] = (M_imag_i / H).si
if (mode == 'dc') or (mode == 'acdc'):
data['t_dc'] = source[dc_mask]['Time Stamp']
data['m'] = source[dc_mask]['M-DC']
if formula_units:
# calculate magnetic moment per formula unit
data['m_fu'] = data['m'].to(c.muB) / formula_units
if volume:
# calculate magnetisation.
data['M'] = M = (data['m'].quantity / volume).si
if scan == 'B':
data['dM/dH'] = np.gradient(M) / np.gradient(data['H'])
if demag:
demagnetizing_correction(data, demag=demag)
data.meta['temperature'] = np.round(data['T'].mean(), 1)
if volume:
data.meta['volume'] = volume
data.meta['z'] = source['Sample Center'].quantity[0].value
data.meta['path'] = path
try:
# Try to extract date information from filepath
data.meta['date'] = dparser.parse(path, fuzzy=True)
except ValueError:
pass
return data
def demagnetizing_correction(data, demag):
"""Calculates the demagnetizing correction.
The ac susceptibility is corrected according to [1]
[1]: <NAME>., <NAME>. & <NAME>. AC magnetic
susceptibility technique for the characterization of high
temperature superconductors. Egyptian Journal of Solids 23,
231–250 (2000).
"""
Hext, M = data['H'], data['M']
data['H_int'] = Hint = Hext - demag * M
data['B_int'] = c.mu0 * Hint
#scale = Hext / Hint
#idx = Hext == 0
#scale[idx] = median_filter(scale, size=5)[idx]
for name, col in data.columns.items():
if name == 'dM/dH':
data['dM/dH_int'] = np.gradient(M) / np.gradient(data['H_int'])
elif name == "χ'" or name.startswith("χ'["):
chi_r = col
chi_i = data[name.replace("'", "''")]
data[name + '_int'] = (chi_r - demag * (chi_r**2 + chi_i**2)) / (demag**2 * (chi_r**2 + chi_i**2) - 2 * demag * chi_r + 1)
elif name == "χ''" or name.startswith("χ''["):
chi_i = col
chi_r = data[name.replace("''", "'")]
data[name + '_int'] = chi_i / (demag**2 * (chi_r**2 + chi_i**2) - 2 * demag * chi_r + 1)
#data[name + '_int'] = col * scale
def magnetic_moment_in_fu(m, formula_units):
"""Converts the magnetic moment from si units to Bohr magneton per formula
units.
:param m: Magnetic moment.
:param formula_units: The number of formula units.
"""
return m.to(c.muB) / formula_units
def heatcapacity(path):
# The HC option sometimes creates comment lines without commas.
with open(path, 'r', encoding='cp1252', newline='') as f:
buffer = ''.join([l for l in f.readlines() if 'Error' not in l])
source = Table.read(buffer, format='ascii.ppms')
data = Table(masked=False)
data['B'] = source['Field'].to(u.T).round(4)
data['T'] = source['System Temp']
data['Tsample'] = source['Sample Temp']
data['Tpuck'] = source['Puck Temp']
data['C'] = source['Total HC']
| |
to the command.
"""
nagios_message = 'Database:%s, Time:%s, Size %s, Quantity : %%s' % \
(dbname, str(row[0]), row[1], )
result = row[2]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'MB', verbosity)
return error_code, message
def analyze_resstat(dbname, counter, row, warning, critical, verbosity):
"""Check resource status by comparing total woker time with warning
and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
start_time = str(row[0])
end_time = str(row[1])
usage = str(row[4])
size = str(row[5])
nagios_message = 'Database:%s, Sku:%s - start_time:%s, end_time:%s, '\
'Size:%s, Usage:%%s,' % (row[2], row[3], start_time,
end_time, size, )
result = row[5]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, 'MB', verbosity)
return error_code, message
def analyze_resusage(dbname, counter, row, warning, critical, verbosity):
"""Check resource usage with warning and critical ranges.
Does not use size
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
query_time = str(row[0])
size = str(row[4])
nagios_message = 'Database:%s, Sku:%s - time:%s, Size:%s, Usage:%%s' \
% ( row[1], row[2], query_time, size,)
result = row[3]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, '%', verbosity)
return error_code, message
def analyze_opstatus(dbname, counter, row, warning, critical, verbosity):
"""Analyze operation error_code - success, warning, or error -
comparing total woker time with warning and critical ranges
opstatus values are: 0 - success, 1 - warning and 2 - error
use warning <1 (e.g. 0.9) and critical < 2 (e.g. 1.9) to get Nagios status
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
nagios_message = 'Resource desc:%s, Operation:%s, error_desc:%s, '\
'Severity:%s, error_code:%%s' % \
( row[0], row[1], row[3], row[4], )
result = row[2]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, '', verbosity)
return error_code, message
def analyze_conection(dbname, counter, row, warning, critical, verbosity):
"""Analyze connection failures by comparing total woker
time with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
start_time = str(row[1])
end_time = str(row[2])
success = str(row[3])
conn_failure = str(row[5])
term_conn = str(row[6])
throttled_conn = str(row[7])
nagios_message = 'Database:%s, - start_time:%s, end_time:%s, '\
'Success Count:%s, Conn Failure Count:%s, '\
'Terminated Conn: %s, Throttled conn:%s, '\
'Total Failure Count:%%s, ' % (row[0], start_time,
end_time, success,
conn_failure, term_conn,
throttled_conn,)
result = row[4]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, '', verbosity)
return error_code, message
def analyze_eventlog(dbname, counter, row, warning, critical, verbosity):
"""Analyze SQL event log by comparing severity of the log message
with warning and critical ranges
dbname - name of database
counter - entry in the COUNTERS list
row - perf data from SQL server as an output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
database = row[0]
start_time = str(row[1])
count = str(row[8])
nagios_message = 'Database:%s, - start_time:%s, Sub-type descr:%s, '\
'Event count: %s, description:%s, Severity:%%s' % \
(database, start_time, row[6], count, row[9], )
result = row[7]
error_code, message = nagios_eval(result, warning, critical,
nagios_message, '', verbosity)
return error_code, message
SQL_QUERIES = {
'dbsize' : { 'help' : 'Database size',
'query' : DBSIZE_DMV,
'size' : 'single',
'printfn' : analyze_dbsize,
},
'objsize' : { 'help' : 'Database Object Size ',
'query' : OBJSIZE_DMV,
'size' : 'multiple',
'printfn' : analyze_objsize,
},
'connections' : { 'help' : 'Database Connections',
'query' : DBCONNECTIONS_DMV,
'size' : 'multiple',
'printfn' : analyze_conn_info,
},
'top5queries' : { 'help' : 'Top5 Queries',
'query' : TOP5QUERIES_DMV,
'size' : 'multiple',
'printfn' : analyze_top5_queries,
},
'queryplan' : { 'help' : 'Monitor Query Plan',
'query' : QUERYPLAN_DMV,
'size' : 'multiple',
'printfn' : analyze_queryplan,
},
'bwusage' : { 'help' : 'Bandwidth Usage (Cumulative)',
'query' : BWUSAGE_VIEW,
'frequency' : 'hourly',
'size' : 'multiple',
'printfn' : analyze_bwusage,
'db' : 'master',
},
'dbusage' : { 'help' : 'Databse usage (Daily)',
'query' : DBUSAGE_VIEW,
'size' : 'multiple',
'printfn' : analyze_dbusage,
'db' : 'master',
'frequency' : 'daily'
},
'resstat' : { 'help' : 'Databse Resource Status (Daily)',
'query' : RESSTAT_VIEW,
'size' : 'multiple',
'printfn' : analyze_resstat,
'db' : 'master',
'frequency' : 'hourly'
},
'resusage' : { 'help' : 'Databse Resource usage (Daily)',
'query' : RESUSAGE_VIEW,
'size' : 'multiple',
'printfn' : analyze_resusage,
'db' : 'master',
'frequency' : 'daily'
},
'opstatus' : { 'help' : 'Databse Op Status (Daily)',
'query' : OPSTATUS_VIEW,
'size' : 'multiple',
'printfn' : analyze_opstatus,
'db' : 'master',
},
'dbconnection' : { 'help' : 'Databse connection stat (Daily)',
'query' : DBCONNECTION_VIEW,
'size' : 'multiple',
'printfn' : analyze_conection,
'db' : 'master',
'frequency' : '5min'
},
'eventlog' : { 'help' : 'Event_log',
'query' : EVENTLOG_VIEW,
'size' : 'multiple',
'printfn' : analyze_eventlog,
'db' : 'master',
'frequency' : 'hourly'
},
}
def handle_args():
"""Create the parser, parse the args, and return them."""
parser = argparse.ArgumentParser(description='Check SQL Azure',
epilog='(c) MS Open Tech')
parser.add_argument('hostname', help='Azure SQL Server Address to check')
parser.add_argument(
'-u', '--username',
required=True,
help='Specify MSSQL User Name',
dest='user')
parser.add_argument(
'-p', '--password',
required=False,
help='Specify MSSQL Password',
dest='password')
parser.add_argument(
'-d', '--database',
required=True,
help='Specify Azure DB',
dest='database')
parser.add_argument('-w', '--warning', required=False, dest='warning',
help='Specify warning range')
parser.add_argument('-c', '--critical', required=False, dest='critical',
help='Specify critical range')
parser.add_argument('-k', '--key', required=True, dest='key',
help='Specify key for the DMV or SQL view')
parser.add_argument('-v', '--verbose', action='count',
default=0, help='verbosity')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
return parser.parse_args()
def setup_logger(verbose):
"""Creates a logger, using the verbosity, and returns it."""
global logger
logger = logging.getLogger()
if verbose >= 3:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
logger.addHandler(logging.StreamHandler())
def connect_db(options, db):
"""Connects to SQL azure database, using command line options."""
host = options.hostname
start = datetime.now()
if os.name != 'nt':
mssql = pyodbc.connect(
driver='FreeTDS',
TDS_Version = '8.0', # Use for
server = host,
port = 1433,
database = db,
uid = options.user,
pwd = options.password)
else:
try:
connstr = 'Driver={SQL Server Native Client 10.0};Server=tcp:'+\
host+',1433;Database='+db+';Uid='+options.user+';Pwd='+\
options.password+';Encrypt=yes;Connection Timeout=30;'
mssql = pyodbc.connect(connstr)
except:
return None, 0
total = datetime.now() - start
return mssql, total
def execute_query(mssql, dbname, sq_query, warning = None, critical = None,
verbosity = 0):
"""execute SQL query and by comparing severity of the log message with
warning and critical ranges
mssql - mssql object
dbname - name of database
sq_query - entry in the COUNTERS list output of the SQL query/DMV
warning - warning range argument to the command
critical - critical range argument to the command
verbosity - verbose argument to the command.
"""
total_error_code = 0
errors = []
query = sq_query['query']
if 'frequency' in sq_query:
if sq_query['frequency'] == 'hourly':
latest_utctime = datetime.utcnow()
hourly_query_clause = (latest_utctime -
timedelta(hours = 1, minutes = 30)).\
strftime('\'%Y%m%d %H:00:00\'')
query = query % hourly_query_clause
elif sq_query['frequency'] == 'daily':
latest_utctime = datetime.utcnow()
daily_query_clause = (latest_utctime -
timedelta(days = 1, hours = 12)).\
strftime('\'%Y%m%d %H:00:00\'')
query = query % daily_query_clause
elif sq_query['frequency'] == '5min':
latest_utctime = datetime.utcnow()
daily_query_clause = (latest_utctime -
timedelta(minutes = 8)).\
strftime('\'%Y%m%d %H:%M:00\'')
query = query % daily_query_clause
cur = | |
colors=["#FF0000", "#A00000"],
style='number-style-var-arg',
label=[_('Python'), 'f(x,y,z)', ' '],
prim_name='myfunction3',
default=['x+y+z', 100, 100, 100],
string_or_number=True,
help_string=_('a programmable block: used to add \
advanced multi-variable math equations, e.g., sin(x+y+z)'))
self.tw.lc.def_prim(
'myfunction3', 4,
Primitive(self.tw.lc.prim_myfunction, return_type=TYPE_FLOAT,
arg_descs=[ArgSlot(TYPE_STRING), ArgSlot(TYPE_FLOAT),
ArgSlot(TYPE_FLOAT), ArgSlot(TYPE_FLOAT)]))
palette.add_block('cartesian',
style='basic-style-extended-vertical',
label=_('Cartesian'),
prim_name='cartesian',
help_string=_('displays Cartesian coordinates'))
self.tw.lc.def_prim('cartesian', 0,
lambda self: self.tw.set_cartesian(True))
palette.add_block('userdefined',
style='basic-style-var-arg',
label=' ',
prim_name='userdefined',
string_or_number=True,
special_name=_('Python block'),
default=100,
help_string=_('runs code found in the tamyblock.py \
module found in the Journal'))
self.tw.lc.def_prim('userdefined', 1,
Primitive(self.tw.lc.prim_myblock,
arg_descs=[ArgSlot(TYPE_OBJECT)]))
BLOCKS_WITH_SKIN.append('userdefined')
PYTHON_SKIN.append('userdefined')
palette.add_block('userdefined2args',
hidden=True,
colors=["#FF0000", "#A00000"],
style='basic-style-var-arg',
label=' ',
prim_name='userdefined2',
string_or_number=True,
special_name=_('Python block'),
default=[100, 100],
help_string=_('runs code found in the tamyblock.py \
module found in the Journal'))
self.tw.lc.def_prim('userdefined2', 2,
Primitive(self.tw.lc.prim_myblock,
arg_descs=[ArgSlot(TYPE_OBJECT),
ArgSlot(TYPE_OBJECT)]))
BLOCKS_WITH_SKIN.append('userdefined2args')
PYTHON_SKIN.append('userdefined2args')
palette.add_block('userdefined3args',
hidden=True,
colors=["#FF0000", "#A00000"],
style='basic-style-var-arg',
label=' ',
prim_name='userdefined3',
special_name=_('Python block'),
default=[100, 100, 100],
string_or_number=True,
help_string=_('runs code found in the tamyblock.py \
module found in the Journal'))
self.tw.lc.def_prim('userdefined3', 3,
Primitive(self.tw.lc.prim_myblock,
arg_descs=[ArgSlot(TYPE_OBJECT),
ArgSlot(TYPE_OBJECT),
ArgSlot(TYPE_OBJECT)]))
BLOCKS_WITH_SKIN.append('userdefined3args')
PYTHON_SKIN.append('userdefined3args')
MEDIA_SHAPES.append('pythonsmall')
MEDIA_SHAPES.append('pythonoff')
MEDIA_SHAPES.append('pythonon')
palette.add_block('getfromurl',
style='number-style-1arg',
#TRANS: URL is universal resource locator
label=_('URL'),
default=\
'http://wiki.sugarlabs.org/images/2/2c/Logo_alt_3.svg',
prim_name='getfromurl',
help_string=\
_('gets a text string or an image from a URL'))
self.tw.lc.def_prim('getfromurl', 1,
Primitive(self.tw.lc.get_from_url,
arg_descs=[ArgSlot(TYPE_STRING)]))
palette.add_block('skin',
hidden=True,
colors=["#FF0000", "#A00000"],
style='basic-style-1arg',
label=_('turtle shell'),
prim_name='skin',
help_string=_("put a custom 'shell' on the turtle"))
self.tw.lc.def_prim('skin', 1,
Primitive(self.tw.lc.reskin,
arg_descs=[ArgSlot(TYPE_OBJECT)]))
# macro
palette.add_block('reskin',
style='basic-style-1arg',
label=_('turtle shell'),
help_string=_("put a custom 'shell' on the turtle"))
palette.add_block('addturtle',
style='basic-style-1arg',
label=_('turtle'),
prim_name='addturtle',
default=1,
string_or_number=True,
help_string=_('chooses which turtle to command'))
self.tw.lc.def_prim('addturtle', 1,
Primitive(self.tw.lc.prim_turtle,
arg_descs=[ArgSlot(TYPE_STRING)]))
palette.add_block('turtlex',
style='number-style-1arg',
label=_('turtle x'),
prim_name='turtlex',
default=['Yertle'],
help_string=_('Returns x coordinate of turtle'))
self.tw.lc.def_prim(
'turtlex', 1,
Primitive(self.tw.turtles.get_turtle_x,
arg_descs=[ArgSlot(TYPE_OBJECT)],
return_type=TYPE_BOX))
palette.add_block('turtley',
style='number-style-1arg',
label=_('turtle y'),
prim_name='turtley',
default=['Yertle'],
help_string=_('Returns y coordinate of turtle'))
self.tw.lc.def_prim(
'turtley', 1,
Primitive(self.tw.turtles.get_turtle_y,
arg_descs=[ArgSlot(TYPE_OBJECT)],
return_type=TYPE_BOX))
palette.add_block('activeturtle',
style='box-style',
label=_('active turtle'),
prim_name='activeturtle',
value_block=True,
help_string=_('the name of the active turtle'))
self.tw.lc.def_prim(
'activeturtle', 0,
Primitive(Turtle.get_name,
return_type=TYPE_BOX))
palette.add_block('turtleh',
style='number-style-1arg',
label=_('turtle heading'),
prim_name='turtleh',
default=['Yertle'],
help_string=_('Returns heading of turtle'))
self.tw.lc.def_prim(
'turtleh', 1,
Primitive(self.tw.turtles.get_turtle_heading,
arg_descs=[ArgSlot(TYPE_OBJECT)],
return_type=TYPE_BOX))
palette.add_block('sandwichclampcollapsed',
hidden=True,
style='clamp-style-collapsed',
label=_('click to open'),
prim_name='clamp',
special_name=_('top'),
help_string=_('top of a collapsed stack'))
palette.add_block('loadpalette',
style='basic-style-1arg',
string_or_number=True,
label=_('select palette'),
prim_name='loadpalette',
default=_('turtle'),
help_string=_('selects a palette'))
self.tw.lc.def_prim('loadpalette', 1,
Primitive(self.tw.prim_load_palette,
export_me=False,
arg_descs=[ArgSlot(TYPE_STRING)]))
palette.add_block('loadblock',
style='basic-style-var-arg',
label=_('load'),
prim_name='loadblock',
default=_('forward'),
help_string=_('loads a block'))
self.tw.lc.def_prim('loadblock', 1,
Primitive(self.tw.prim_load_block,
export_me=False,
arg_descs=[ArgSlot(TYPE_STRING)]))
palette.add_block('loadblock2arg',
style='basic-style-var-arg',
hidden=True,
label=_('load'),
prim_name='loadblock2',
string_or_number=True,
default=[_('forward'), 100],
help_string=_('loads a block'))
self.tw.lc.def_prim('loadblock2', 2,
Primitive(self.tw.prim_load_block,
export_me=False,
arg_descs=[ArgSlot(TYPE_STRING),
ArgSlot(TYPE_OBJECT)]))
palette.add_block('loadblock3arg',
style='basic-style-var-arg',
hidden=True,
label=_('load'),
string_or_number=True,
prim_name='loadblock3',
default=[_('setxy'), 0, 0],
help_string=_('loads a block'))
self.tw.lc.def_prim('loadblock3', 3,
Primitive(self.tw.prim_load_block,
export_me=False,
arg_descs=[ArgSlot(TYPE_STRING),
ArgSlot(TYPE_OBJECT),
ArgSlot(TYPE_OBJECT)]))
# macro
palette.add_block('indexblock',
style='basic-style-extended-vertical',
label=_('index'),
help_string=_('return the text of the positions'))
palette.add_block('index',
hidden=True,
style='number-style-var-3arg',
label=[_('index') + '\n\n', _('string'),
_('start'), _('end')],
prim_name='index',
default=[_('text'), 0, 1],
string_or_number=True,
help_string=_('return the text of the positions'))
self.tw.lc.def_prim('index', 3,
Primitive(self.prim_index,
return_type=TYPE_STRING,
arg_descs=[ArgSlot(TYPE_STRING),
ArgSlot(TYPE_INT),
ArgSlot(TYPE_INT)]))
def _portfolio_palette(self):
palette = make_palette('portfolio',
colors=["#0606FF", "#0606A0"],
help_string=_('Palette of presentation \
templates'),
position=9,
translation=_('portfolio'))
palette.add_block('hideblocks',
style='basic-style-extended-vertical',
label=_('hide blocks'),
prim_name='hideblocks',
help_string=_('declutters canvas by hiding blocks'))
self.tw.lc.def_prim(
'hideblocks', 0,
Primitive(self._prim_hideblocks, export_me=False))
palette.add_block('showblocks',
style='basic-style-extended-vertical',
label=_('show blocks'),
prim_name='showblocks',
help_string=_('restores hidden blocks'))
self.tw.lc.def_prim(
'showblocks', 0,
Primitive(self._prim_showblocks, export_me=False))
palette.add_block('fullscreen',
style='basic-style-extended-vertical',
label=_('Fullscreen').lower(),
prim_name='fullscreen',
help_string=_('hides the Sugar toolbars'))
self.tw.lc.def_prim(
'fullscreen', 0,
Primitive(self.tw.set_fullscreen, export_me=False))
primitive_dictionary['bulletlist'] = self._prim_list
palette.add_block('bulletlist',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='bullet-style',
label=_('list'),
string_or_number=True,
prim_name='bulletlist',
default=['∙ ', '∙ '],
help_string=_('presentation bulleted list'))
self.tw.lc.def_prim('bulletlist', 1,
primitive_dictionary['bulletlist'], True)
# macros
palette.add_block('list',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: list of \
bullets'))
MEDIA_SHAPES.append('list')
palette.add_block('1x1a',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: select \
Journal object (no description)'))
MEDIA_SHAPES.append('1x1a')
palette.add_block('1x1',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: select \
Journal object (with description)'))
MEDIA_SHAPES.append('1x1')
palette.add_block('2x2',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: select four \
Journal objects'))
MEDIA_SHAPES.append('2x2')
palette.add_block('2x1',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: select two \
Journal objects'))
MEDIA_SHAPES.append('2x1')
palette.add_block('1x2',
style='basic-style-extended',
label=' ',
help_string=_('presentation template: select two \
Journal objects'))
MEDIA_SHAPES.append('1x2')
# Display-dependent constants
palette.add_block('leftpos',
style='box-style',
label=_('left'),
prim_name='lpos',
logo_command='lpos',
help_string=_('xcor of left of screen'))
self.tw.lc.def_prim(
'lpos', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('leftpos')]))
palette.add_block('bottompos',
style='box-style',
label=_('bottom'),
prim_name='bpos',
logo_command='bpos',
help_string=_('ycor of bottom of screen'))
self.tw.lc.def_prim(
'bpos', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('bottompos')]))
palette.add_block('width',
style='box-style',
label=_('width'),
prim_name='hres',
logo_command='width',
help_string=_('the canvas width'))
self.tw.lc.def_prim(
'hres', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('width')]))
palette.add_block('rightpos',
style='box-style',
label=_('right'),
prim_name='rpos',
logo_command='rpos',
help_string=_('xcor of right of screen'))
self.tw.lc.def_prim(
'rpos', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('rightpos')]))
palette.add_block('toppos',
style='box-style',
label=_('top'),
prim_name='tpos',
logo_command='tpos',
help_string=_('ycor of top of screen'))
self.tw.lc.def_prim(
'tpos', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('toppos')]))
palette.add_block('height',
style='box-style',
label=_('height'),
prim_name='vres',
logo_command='height',
help_string=_('the canvas height'))
self.tw.lc.def_prim(
'vres', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('height')]))
palette.add_block('titlex',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('title x'),
logo_command='titlex',
prim_name='titlex')
self.tw.lc.def_prim(
'titlex', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('titlex')]))
palette.add_block('titley',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('title y'),
logo_command='titley',
prim_name='titley')
self.tw.lc.def_prim(
'titley', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('titley')]))
palette.add_block('leftx',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('left x'),
prim_name='leftx',
logo_command='leftx')
self.tw.lc.def_prim(
'leftx', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('leftx')]))
palette.add_block('topy',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('top y'),
prim_name='topy',
logo_command='topy')
self.tw.lc.def_prim(
'topy', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('topy')]))
palette.add_block('rightx',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('right x'),
prim_name='rightx',
logo_command='rightx')
self.tw.lc.def_prim(
'rightx', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('rightx')]))
palette.add_block('bottomy',
hidden=True,
colors=["#0606FF", "#0606A0"],
style='box-style',
label=_('bottom y'),
prim_name='bottomy',
logo_command='bottomy')
self.tw.lc.def_prim(
'bottomy', 0,
Primitive(CONSTANTS.get, return_type=TYPE_INT,
arg_descs=[ConstantArg('bottomy')]))
def _myblocks_palette(self):
''' User-defined macros are saved as a json-encoded file;
these get loaded into a palette on startup '''
if hasattr(self.tw, 'macros_path') and \
os.path.exists(self.tw.macros_path):
files = get_endswith_files(self.tw.macros_path, ".tb")
if len(files) > 0:
palette = make_palette(
'myblocks',
colors=["#FFFF00", "#A0A000"],
help_string=_('Palette of user-defined operators'),
translation=_('my blocks'))
for tafile in files:
data = data_from_file(tafile)
name = os.path.basename(tafile)[:-3]
# print 'loading macro %s' % (name)
MACROS['user-defined-' + name] = hat_on_top(listify(data))
palette.add_block('user-defined-' + name,
style='basic-style-extended-vertical',
label=name)
# Block primitives
def after_keypress(self):
if self.tw.lc.update_values:
if self.tw.keypress in KEY_DICT:
if KEY_DICT[self.tw.keypress] in REVERSE_KEY_DICT:
self.tw.lc.update_label_value(
'keyboard', REVERSE_KEY_DICT[
KEY_DICT[self.tw.keypress]])
else:
self.tw.lc.update_label_value(
'keyboard', chr(KEY_DICT[self.tw.keypress]))
elif self.tw.keyboard > 0:
self.tw.lc.update_label_value('keyboard',
chr(self.tw.keyboard))
self.tw.keypress = ''
def after_pop(self, *ignored_args):
if self.tw.lc.update_values:
if not self.tw.lc.heap:
self.tw.lc.update_label_value('pop')
else:
self.tw.lc.update_label_value('pop', self.tw.lc.heap[-1])
def after_push(self, *ignored_args):
if self.tw.lc.update_values:
if not self.tw.lc.heap:
self.tw.lc.update_label_value('pop')
else:
self.tw.lc.update_label_value('pop', self.tw.lc.heap[-1])
def prim_speak(self, text):
""" Speak text """
if type(text) == float and int(text) == text:
text = int(text)
lang = os.environ['LANG'][0:2]
if lang in VOICES:
language_option = '-v ' + VOICES[lang]
else:
language_option = ''
os.system('espeak %s "%s" --stdout | aplay' %
(language_option, str(text)))
if self.tw.sharing():
if language_option == '':
event = 'S|%s' % (data_to_string([self.tw.nick, 'None', text]))
else:
event = 'S|%s' % (data_to_string([self.tw.nick,
language_option, text]))
self.tw.send_event(event)
def prim_sinewave(self, pitch, amplitude, duration):
""" Create a Csound score to play a sine wave. """
self.orchlines = []
self.scorelines = []
self.instrlist = []
try:
pitch = abs(float(pitch))
amplitude = abs(float(amplitude))
duration = abs(float(duration))
except ValueError:
self.tw.lc.stop_logo()
raise logoerror("#notanumber")
self._play_sinewave(pitch, amplitude, duration)
if self.tw.running_sugar:
path = os.path.join(get_path(self.tw.activity, 'instance'),
'tmp.csd')
else:
path = os.path.join(tempfile.gettempdir(), 'tmp.csd')
# Create a csound file from the score.
self._audio_write(path)
# Play the csound file.
os.system('csound ' + path + ' > /dev/null 2>&1')
def _play_sinewave(self, pitch, amplitude, duration, starttime=0,
pitch_envelope=99, amplitude_envelope=100,
instrument=1):
pitenv = pitch_envelope
ampenv = amplitude_envelope
if not 1 in self.instrlist:
self.orchlines.append("instr 1\n")
self.orchlines.append("kpitenv oscil 1, 1/p3, p6\n")
self.orchlines.append("aenv oscil 1, 1/p3, p7\n")
self.orchlines.append("asig oscil p5*aenv, p4*kpitenv, p8\n")
self.orchlines.append("out asig\n")
self.orchlines.append("endin\n\n")
self.instrlist.append(1)
self.scorelines.append("i1 %s %s %s %s %s %s %s\n" %
(str(starttime), str(duration), str(pitch),
str(amplitude), str(pitenv), str(ampenv),
str(instrument)))
def _audio_write(self, file):
""" Compile a .csd file. """
csd = open(file, "w")
csd.write("<CsoundSynthesizer>\n\n")
csd.write("<CsOptions>\n")
csd.write("-+rtaudio=alsa -odevaudio -m0 -d -b256 -B512\n")
csd.write("</CsOptions>\n\n")
csd.write("<CsInstruments>\n\n")
csd.write("sr=16000\n")
csd.write("ksmps=50\n")
csd.write("nchnls=1\n\n")
for line in self.orchlines:
csd.write(line)
csd.write("\n</CsInstruments>\n\n")
csd.write("<CsScore>\n\n")
csd.write("f1 0 2048 10 1\n")
csd.write("f2 0 2048 10 1 0 .33 0 .2 0 .143 0 .111\n")
csd.write("f3 0 2048 10 1 .5 .33 .25 .2 .175 .143 .125 .111 .1\n")
csd.write("f10 0 2048 10 1 0 0 .3 0 .2 0 0 .1\n")
csd.write("f99 0 2048 7 1 2048 1\n")
csd.write("f100 0 2048 7 0. 10 1. 1900 1. 132 0.\n")
csd.write(self.scorelines.pop())
csd.write("e\n")
csd.write("\n</CsScore>\n")
csd.write("\n</CsoundSynthesizer>")
csd.close()
def after_mouse_x(self):
""" Show mouse x coordinate """
if self.tw.lc.update_values:
self.tw.lc.update_label_value('mousex', self.tw.get_mouse_x())
def after_mouse_y(self):
""" Show mouse y coordinate """
if self.tw.lc.update_values:
self.tw.lc.update_label_value('mousey', self.tw.get_mouse_y())
def after_see(self):
""" Show color under turtle """
if self.tw.lc.update_values:
self.tw.lc.update_label_value(
'see',
self.tw.turtles.get_active_turtle().get_color_index())
def _prim_list(self, blklist):
""" Expandable list block """
self.tw.lc.showlist(blklist)
self.tw.lc.ireturn()
yield True
def after_time(self, elapsed_time):
""" Update the label of the 'time' block after computing the new
value. """
if self.tw.lc.update_values:
self.tw.lc.update_label_value('time', elapsed_time)
def _prim_hideblocks(self):
""" hide blocks and show showblocks button """
self.tw.hideblocks()
self.tw.lc.trace = 0
self.tw.step_time = 0
if self.tw.running_sugar:
self.tw.activity.stop_turtle_button.set_icon("hideshowoff")
self.tw.activity.stop_turtle_button.set_tooltip(_('Show blocks'))
def _prim_showblocks(self):
""" show blocks and show stop turtle button """
self.tw.showblocks()
self.tw.lc.trace = 1
self.tw.step_time = 3
if self.tw.running_sugar:
self.tw.activity.stop_turtle_button.set_icon("stopiton")
self.tw.activity.stop_turtle_button.set_tooltip(_('Stop turtle'))
def after_set(self, name, value=None):
''' Update the associated value blocks '''
| |
= {}
for var in dep:
dep_per[var] = dep[var] / total_corr * 100
sorted_deps = dict(sorted(dep_per.items(),
key=lambda item: item[1], reverse=True))
for dep in sorted_deps:
name = dep
if name == 'bpm':
name = 'BPM'
else:
name = name.capitalize()
dep_str += '`{:<6}:` `{:<6}%`\n'.format(name, round(sorted_deps[dep], 2))
else:
dep_str = ''
# favorite mappers
sorted_mappers = dict(sorted(freq_list['mappers'].items(),
key=lambda item: item[1], reverse=True))
mappers_str = ''
counter = 0
for mapper in sorted_mappers: # start with 5
# mod combo (count, pp, % of total)
top_idx = [i for i, x in enumerate(stats['mappers']) if x == mapper]
pp_mod_combo = np.array(stats['pp'])[top_idx]
pp_w_mod_combo = np.array(stats['pp_w'])[top_idx]
mappers_str += '`{:<15}:` `{:<2}` [`{:<8}`(`{:<5}%`)|`{:<8}`(`{:<5}%`)]\n'.format(
mapper, sorted_mappers[mapper],
round(np.sum(pp_w_mod_combo), 1),
round(np.sum(pp_w_mod_combo)/pp_w_total * 100, 1),
round(np.sum(pp_mod_combo), 1),
round(np.sum(pp_mod_combo)/pp_total * 100, 1))
counter += 1
if counter > 5:
break
# footer----------------
# get online/offline status
if 'is_online' in user.keys():
icon_url = self._get_online_icon(user['is_online'])
else:
icon_url = self.owoAPI.get_server_avatar(api)
# check if verified
verified = ""
if 'discord' in user.keys():
user_discrim_str = "{0.name}#{0.discriminator}".format(server_user)
if user['discord'] == user_discrim_str:
verified = " | Verified"
# embed -----------
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="Top {} Stats in {} for {}".format(
len(stats['pp']), gamemode_text, user['username']),
icon_url = flag_url, url = user_url)
em.add_field(name='Performance Spread (┃= avg):',
value=stats_str, inline=False)
if dep_str:
em.add_field(name='PP Attr. Dependencies (/100):', value=dep_str, inline=True)
em.add_field(name='Favourite Mods:', value=mod_str, inline=True)
em.add_field(name='Favourite Mod Combos (#, Weighted|Unweighted PP):', value=mod_combos_str, inline=False)
em.add_field(name='Favourite Mappers (#, Weighted|Unweighted PP):', value=mappers_str, inline=False)
em.set_thumbnail(url=profile_url)
"""
if 'cover' in user.keys():
em.set_image(url=user['cover']['url'])
"""
em.set_footer(text = "On osu! {} Server{}".format(
self.owoAPI.get_server_name(api), verified), icon_url=icon_url)
return em
def _count_freq(self, data_list):
freq_dict = {}
for el in data_list:
if el not in freq_dict:
freq_dict[str(el)] = 0
freq_dict[str(el)] += 1
return freq_dict
def _draw_box_whisker(self, data_list, name='None', total_length=20):
start_char = '╠'
end_char = '╣'
fill_char = '═'
mean_char = '┃'
med_char = '┋'
d_mean = np.mean(data_list)
d_min = np.min(data_list)
d_max = np.max(data_list)
d_median = np.median(data_list)
bx_whisker = fill_char * total_length
bx_whisker = list(bx_whisker)
med_idx = int(round((d_median - d_min) /(d_max - d_min) * total_length))
# bx_whisker[med_idx] = med_char
mean_idx = int(round((d_mean - d_min) /(d_max - d_min) * total_length))
bx_whisker[mean_idx] = mean_char
# replace the letters with parameter
mean_str = '{:.2f}'.format(d_mean)
if mean_idx/total_length > 0.5:
bx_whisker[mean_idx-len(mean_str):mean_idx] = '{:.2f}'.format(d_mean)
else:
bx_whisker[mean_idx+1:mean_idx+len(mean_str)+1] = '{:.2f}'.format(d_mean)
bx_whisker = ''.join(bx_whisker)
# print(bx_whisker)
# make pretty
bx_whisker_full = '`{:<6}:` `{:<6.2f}` `{}{}{}` `{:<6.2f}`'.format(
name, float(d_min), start_char, bx_whisker, end_char, float(d_max))
"""
bx_whisker_full = '`{:<6}: {}{}{}`\n'.format(
name, start_char, bx_whisker, end_char)
labels = '`{}{:<6.2f}{}{:<6.2f}{}{:<6.2f}`'.format(
8*' ', float(d_min), (mean_idx-len('{:<6.2f}'.format(float(d_min))))*' ',
d_mean, 6*' ', float(d_max))
bx_whisker_full += labels"""
return bx_whisker_full
async def _create_graph_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
# info
profile_url = await self.owoAPI.get_user_avatar(user['user_id'], api)
user_url = self.owoAPI.get_user_url(user['user_id'], api)
gamemode_text = utils.get_gamemode_text(gamemode)
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="{} Rank graph for {}".format(gamemode_text, user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
#try:
replays_watched = user["replays_watched_counts"]
monthly_playcounts = user["monthly_playcounts"]
discord_file, url = await drawing.plot_profile(user['rank_history']["data"],
replays_watched, monthly_playcounts, color = colour)
em.set_image(url=url)
return em, discord_file
async def _create_recent_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
recent = await self.owoAPI.get_user_activity(user['user_id'], api=api)
# info
profile_url = await self.owoAPI.get_user_avatar(user['user_id'], api)
user_url = self.owoAPI.get_user_url(user['user_id'], api)
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="Recent activity for {}".format(user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
msg = ''
for activity in recent:
act_time = datetime.datetime.strptime(activity['date'], '%Y-%m-%d %H:%M:%S')
try:
timeago = utils.time_ago(datetime.datetime.now(), act_time, shift = 4, abbr=True)
except:
try:
timeago = utils.time_ago(datetime.datetime.now(), act_time, shift = 0, abbr=True)
except:
timeago = utils.time_ago(datetime.datetime.now(), act_time, shift = 0, abbr=True)
if activity['type'] == 'rank':
beatmap_id = activity['beatmap']['url']
beatmap_id = beatmap_id[int(beatmap_id.rfind('/'))+1:]
beatmap_id = beatmap_id.replace('/','')
url = 'https://osu.ppy.sh/b/' + beatmap_id
msg += "▸ Achieved {} (`#{}`) on [{}]({}) ({})\n".format(
self.RANK_EMOTES[activity['scoreRank']],
activity['rank'],activity['beatmap']['title'], url, timeago)
elif activity['type'] == 'rankLost':
beatmap_id = activity['beatmap']['url']
beatmap_id = beatmap_id[int(beatmap_id.rfind('/'))+1:]
beatmap_id = beatmap_id.replace('/','')
url = 'https://osu.ppy.sh/b/' + beatmap_id
msg += "▸ Lost first place on [{}]({}) ({})\n".format(
activity['beatmap']['title'], url, timeago)
elif activity['type'] == 'nameChange':
pass
elif activity['type'] == 'beatmapsetUpload':
beatmapset_id = activity['beatmapset']['url']
beatmapset_id = beatmapset_id[int(beatmapset_id.rfind('/'))+1:]
beatmapset_id = beatmapset_id.replace('/','')
url = 'https://osu.ppy.sh/s/' + beatmapset_id
msg += "▸ Updated beatmap set [{}]({}) ({})\n".format(
activity['beatmapset']['title'], url, timeago)
if msg == '':
msg = "No recent activity."
em.description = msg
return em
async def _create_most_played_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
info = soup.find("script", {"id": "json-extras"}, type='application/json')
web_data = json.loads(info.text)
most_played = web_data['beatmapPlaycounts']
gamemode_text = utils.get_gamemode_text(gamemode)
# info
user_url = 'https://osu.ppy.sh/u/{}'.format(user['user_id'])
profile_url ='https://a.ppy.sh/{}'.format(user['user_id'])
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="Most played maps for {}".format(user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
msg = ''
count = 1
for ct_beatmap in most_played:
beatmap = ct_beatmap['beatmap']
beatmapset = ct_beatmap['beatmapset']
url = 'https://osu.ppy.sh/s/' + str(beatmapset['id'])
dl_links = self._get_dl_links(beatmapset['id'], beatmap['id'])
msg += "{}. [{} - {} [{}]]({}) ({} times) [[download]({})]\n".format(count,
beatmapset['artist'], beatmapset['title'], beatmap['version'], url,
ct_beatmap['count'], dl_links[0])
count += 1
em.description = msg
return em
async def _create_first_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
info = soup.find("script", {"id": "json-extras"}, type='application/json')
web_data = json.loads(info.text)
firsts = web_data['scoresFirsts']
# info
user_url = 'https://osu.ppy.sh/u/{}'.format(user['user_id'])
profile_url ='https://a.ppy.sh/{}'.format(user['user_id'])
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="First Place Maps for {}".format(user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
msg = ''
for ft_info in firsts:
beatmap = ft_info['beatmap']
beatmapset = ft_info['beatmapset']
dl_links = self._get_dl_links(beatmapset['id'], beatmap['id'])
url = 'https://osu.ppy.sh/b/' + str(beatmap['id'])
if ft_info['pp']:
pp = float(ft_info['pp'])
pp = "{:.1f}".format(pp)
else:
pp = "0"
msg += "▸ [{} - {} [{}]]({}) (`{}`|`{}pp`|+`{}`) [[download]({})]\n".format(
beatmapset['artist'], beatmapset['title'], beatmap['version'], url,
ft_info['rank'], pp, ''.join(ft_info['mods']), dl_links[0])
if msg == '':
msg = "None."
em.description = msg
return em
async def _create_favorites_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
info = soup.find("script", {"id": "json-extras"}, type='application/json')
web_data = json.loads(info.text)
favorite_maps = web_data['favouriteBeatmapsets']
# info
user_url = 'https://osu.ppy.sh/u/{}'.format(user['user_id'])
profile_url ='https://a.ppy.sh/{}'.format(user['user_id'])
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="Favorite Maps for {}".format(user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
msg = ''
for fav_info in favorite_maps:
dl_links = self._get_dl_links(fav_info['id'], fav_info['beatmaps'][0]['id'])
url = 'https://osu.ppy.sh/s/' + str(fav_info['id'])
user_url = 'https://osu.ppy.sh/users/' + str(fav_info['user_id'])
msg += "▸ [{} - {}]({}) by [{}]({}) [[download]({})]\n".format(
fav_info['artist'], fav_info['title'], url, fav_info['creator'], user_url, dl_links[0])
if msg == '':
msg = "None"
em.description = msg
return em
async def _create_beatmaps_embed(self, ctx, user, gamemode=0, api='bancho'):
server = ctx.message.guild
server_user = ctx.message.author
info = soup.find("script", {"id": "json-extras"}, type='application/json')
web_data = json.loads(info.text)
user_maps_approved = web_data['rankedAndApprovedBeatmapsets']
user_maps_unranked = web_data["unrankedBeatmapsets"]
user_maps_graveyard = web_data["graveyardBeatmapsets"]
gamemode_text = utils.get_gamemode_text(gamemode)
# info
user_url = 'https://osu.ppy.sh/u/{}'.format(user['user_id'])
profile_url ='https://a.ppy.sh/{}'.format(user['user_id'])
em = discord.Embed(description='', colour=server_user.colour)
em.set_author(name="Beatmaps created by {}".format(user['username']), icon_url = profile_url, url = user_url)
colour = (server_user.colour.r/255, server_user.colour.g/255, server_user.colour.b/255)
types = ['Ranked', 'Unranked', 'Graveyard']
for status in types:
if status == 'Ranked':
maps = user_maps_approved
elif status == "Unranked":
maps = user_maps_unranked
else:
maps = user_maps_graveyard
msg = ''
for beatmap in maps:
dl_links = self._get_dl_links(beatmap['id'], beatmap['beatmaps'][0]['id'])
url = 'https://osu.ppy.sh/s/' + str(beatmap['id'])
msg += "▸ [{} - {}]({}) (Maps: `{}` ♥: `{}`) [[download]({})]\n".format(
beatmap['artist'], beatmap['title'], url, len(beatmap['beatmaps']), beatmap['favourite_count'],
dl_links[0])
if not maps:
msg += "None currently.\n"
em.add_field(name = "__{} Maps ({})__\n".format(status, len(maps)), value = msg, inline = False)
return em
# ------------------------- some database commands -------------------
async def update_user_servers_list(self, discord_user, server):
# update discord user server and username
caller_user = await self.players.find_one(
{"user_id":str(discord_user.id)})
if caller_user:
# print("found user")
if "servers" in caller_user and caller_user["servers"]:
if str(server.id) not in caller_user["servers"]:
caller_user["servers"].append(str(server.id))
# print("adding to list")
else:
caller_user["servers"] = [str(server.id)]
# print("single server")
await self.players.update_many({"user_id":str(discord_user.id)},
{"$set": {"servers": caller_user["servers"]}})
"""
async def update_user_servers_leave(self, member):
db_user = db.osu_settings.find_one({"user_id":member.id})
if not db_user:
return
if "servers" in db_user and member.str(server.id) in db_user["servers"]:
try:
db_user["servers"].remove(member.str(server.id))
except:
pass
else:
db_user["servers"] = []
await self.players.update_one({"user_id":member.id},
{"$set": {"servers":db_user["servers"]}})
async def update_user_servers_join(self, member):
db_user = db.osu_settings.find_one({"user_id":member.id})
if not db_user:
None
if "servers" in db_user and member.str(server.id) not in db_user["servers"]:
db_user["servers"].append(member.str(server.id))
else:
db_user["servers"] = [member.str(server.id)]
await self.players.update_one({"user_id":member.id},
{"$set": {"servers":db_user["servers"]}})
"""
# -------------------------- live map feed ---------------------------------------------
@checks.mod_or_permissions(manage_guild=True)
@commands.command(name="mapfeed", aliases = ['mf', 'feed'])
async def mapfeed(self, ctx, *options):
"""
Set channel for map feeds (default all maps/modes). Sets channel you execute | |
{{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
config = '''
name: "{}"
platform: "pytorch_libtorch"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT__0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT__1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT__0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT__1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_openvino_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
batch_dim = () if max_batch == 0 else (max_batch,)
if not tu.validate_for_openvino_model(
input_dtype, output0_dtype, output1_dtype, batch_dim + input_shape,
batch_dim + output0_shape, batch_dim + output1_shape):
return
# Create the model
model_name = tu.get_model_name(
"openvino_nobatch" if max_batch == 0 else "openvino", input_dtype,
output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
in0 = ng.parameter(shape=batch_dim + input_shape,
dtype=input_dtype,
name="INPUT0")
in1 = ng.parameter(shape=batch_dim + input_shape,
dtype=input_dtype,
name="INPUT1")
r0 = ng.add(in0, in1) if not swap else ng.subtract(in0, in1)
r1 = ng.subtract(in0, in1) if not swap else ng.add(in0, in1)
result0 = ng.reshape(r0, batch_dim + output0_shape, special_zero=False)
result1 = ng.reshape(r1, batch_dim + output1_shape, special_zero=False)
op0 = ng.convert(result0, destination_type=output0_dtype, name="OUTPUT0")
op1 = ng.convert(result1, destination_type=output1_dtype, name="OUTPUT1")
function = ng.impl.Function([op0, op1], [in0, in1], model_name)
ie_network = IENetwork(ng.impl.Function.to_capsule(function))
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
ie_network.serialize(model_version_dir + "/model.xml",
model_version_dir + "/model.bin")
def create_openvino_modelconfig(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
output0_label_cnt, version_policy):
batch_dim = () if max_batch == 0 else (max_batch,)
if not tu.validate_for_openvino_model(
input_dtype, output0_dtype, output1_dtype, batch_dim + input_shape,
batch_dim + output0_shape, batch_dim + output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"openvino_nobatch" if max_batch == 0 else "openvino", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
# platform is empty and backend is 'openvino' for openvino model
config = '''
name: "{}"
backend: "openvino"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
instance_group [ {{ kind: KIND_CPU }}]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_models(models_dir,
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
output0_label_cnt,
version_policy=None):
model_version = 1
# Create two models, one that supports batching with a max-batch
# of 8, and one that does not with a max-batch of 0
if FLAGS.graphdef:
# max-batch 8
create_graphdef_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_graphdef_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_graphdef_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_graphdef_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.savedmodel:
# max-batch 8
create_savedmodel_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_savedmodel_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_savedmodel_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_savedmodel_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.tensorrt:
# max-batch 8
suffix = ()
if input_dtype == np.int8 or output0_dtype == np.int8 or output1_dtype == np.int8:
suffix = (1, 1)
create_plan_modelconfig(models_dir, 8, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_plan_modelfile(models_dir, 8, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_plan_modelconfig(models_dir, 0, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_plan_modelfile(models_dir, 0, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype)
if -1 in input_shape:
# models for testing optimization profiles
create_plan_modelconfig(models_dir,
8,
model_version,
input_shape + suffix,
output0_shape + suffix,
output1_shape + suffix,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
min_dim=4,
max_dim=32)
create_plan_modelfile(models_dir,
8,
model_version,
input_shape + suffix,
output0_shape + suffix,
output1_shape + suffix,
input_dtype,
output0_dtype,
output1_dtype,
min_dim=4,
max_dim=32)
if FLAGS.onnx:
# max-batch 8
create_onnx_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_onnx_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_onnx_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_onnx_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.libtorch:
# max-batch 8
create_libtorch_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_libtorch_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_libtorch_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_libtorch_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.openvino:
# max-batch 8
create_openvino_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_openvino_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_openvino_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_openvino_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.ensemble:
for pair in emu.platform_types_and_validation():
if not pair[1](input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
continue
config_input_shape = input_shape
config_output0_shape = output0_shape
config_output1_shape = output1_shape
if pair[0] == "plan":
if len(input_shape) == 1 and input_dtype == np.int8:
config_input_shape = (input_shape[0], 1, 1)
if len(output0_shape) == 1 and output0_dtype == np.int8:
config_output0_shape = (output0_shape[0], 1, 1)
if len(output1_shape) == 1 and output1_dtype == np.int8:
config_output1_shape = (output1_shape[0], 1, 1)
# max-batch 8
emu.create_ensemble_modelconfig(pair[0], models_dir, 8,
model_version, config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
emu.create_ensemble_modelfile(pair[0], models_dir, 8, model_version,
config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
emu.create_ensemble_modelconfig(pair[0], models_dir, 0,
model_version, config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
emu.create_ensemble_modelfile(pair[0], models_dir, 0, model_version,
config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype)
def create_fixed_models(models_dir,
input_dtype,
output0_dtype,
output1_dtype,
version_policy=None):
input_size = 16
create_models(models_dir, input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,), (input_size,), input_size,
version_policy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--models_dir',
type=str,
required=True,
help='Top-level model directory')
parser.add_argument('--graphdef',
required=False,
action='store_true',
help='Generate GraphDef models')
parser.add_argument('--savedmodel',
required=False,
action='store_true',
help='Generate SavedModel models')
parser.add_argument('--tensorrt',
required=False,
action='store_true',
help='Generate TensorRT PLAN models')
parser.add_argument('--onnx',
required=False,
action='store_true',
help='Generate Onnx Runtime Onnx models')
parser.add_argument(
'--onnx_opset',
type=int,
required=False,
default=0,
help='Opset used for Onnx models. Default is to use ONNXRT default')
parser.add_argument('--libtorch',
required=False,
action='store_true',
help='Generate Pytorch LibTorch models')
parser.add_argument('--openvino',
required=False,
action='store_true',
help='Generate Openvino models')
parser.add_argument('--variable',
required=False,
action='store_true',
help='Used variable-shape tensors for input/output')
parser.add_argument('--ensemble',
required=False,
action='store_true',
help='Generate ensemble models against the models' +
' in all platforms. Note that the models generated' +
' are not completed.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.graphdef or FLAGS.savedmodel:
import tensorflow as tf
from tensorflow.python.framework import graph_io
if FLAGS.tensorrt:
import tensorrt as trt
if FLAGS.onnx:
import onnx
if FLAGS.libtorch:
import torch
from torch import nn
if FLAGS.openvino:
from openvino.inference_engine import IENetwork
import ngraph as ng
import test_util as tu
# Tests with models that accept fixed-shape input/output tensors
if not FLAGS.variable:
create_fixed_models(FLAGS.models_dir, np.int8, np.int8, np.int8,
('latest', 1))
create_fixed_models(FLAGS.models_dir, np.int16, np.int16, np.int16,
('latest', 2))
create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np.int32,
('all', None))
create_fixed_models(FLAGS.models_dir, np.int64, np.int64, np.int64)
create_fixed_models(FLAGS.models_dir, np.float16, np.float16,
np.float16, ('specific', [
1,
]))
create_fixed_models(FLAGS.models_dir, np.float32, np.float32,
np.float32, ('specific', [1, 3]))
create_fixed_models(FLAGS.models_dir, np.float16, np.float32,
np.float32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int8)
create_fixed_models(FLAGS.models_dir, np.int8, np.int32, np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int16)
create_fixed_models(FLAGS.models_dir, np.int32, np.float32, np.float32)
| |
given explicitely
u,v = np.atleast_2d(u,v); N=u.shape[1]; # K... number of iso-q-lines
else:
u,v = self.__resample_aperture_border(N)# aperture borders, shape (K,N)
if vp is None: vp = (self.Npx/2,self.Npx);# set reasonable start value
trapz = trafo.TrapezoidalDistortion(vp); # initialize trafo
param0= list(vp)+[0]*len(u); # param: [vp, u0]
# deviation of given trapezoidal distortion from observed values u,v
def residuals(param,u,v,T,leastsq=True):
T.vp = param[:2]; u0 = param[2:]; # fit parameters
return self.__residuals(u,v,T,u0,leastsq);
# perform fitting
fit,_ = opt.leastsq(residuals, param0, args=(u,v,trapz));
trapz,u0,res = residuals(fit,u,v,trapz,False);
self.__debug_distortion(trapz,u,v,u0,res,
title="DEBUG: fit_trapezoidal_distortions()");
# save results, slit borders in slit-cooridinates
self.s2u = trapz;
self.s2u.sl=self.u2x.ul;
self.s2u.sr=self.u2x.ur; # same as borders in normalized coords
# history
self.history.extend(self.s2u.info(3).split("\n"));
return trapz,u0;
def fit_polynomial_distortions(self,N=None,I=1,J=1,c0=None,const='fixed_slit'):
"""
Least-square fitting of all iso-q-lines (u,v) by polynomial functions
of order J along the energy axis t=v with the restriction, that the
coefficients of different iso-q-lines can be expressed as polynoms in q
of order I. This corresponds to a transformation T:(s,t)->(u,v)
u(s,t) = sum_ij C_ij s^i t^j; v(s,t) = t.
Note that also the exact position s_k of the k'th iso-q-line at E=0
(v=0) is not known exactly and included in the fit. As the fitting
parameters s_k and C_ij are not independent, we add further constraints
according to the parameter 'const'.
N ... (opt) number of sampling points along y-direction for each image
J ... (opt) degree of fit u = sum_j c_j v^j for a single aperture border
I ... (opt) degree of polynomial c_j = sum_i C_ij s^i for coefficients
c0 ... (opt) initial guess for the coefficients C_ij, overwrites I,J
const.. (opt) constraints for fitting parameters:
'fixed_slit': T(s,0) = (s,0), the trafo will not
change the coordinates at the slit position t=0;
'aperture_calibration': T(0,0) = (0,0) to avoid shifts
and constant aperture size s[k/2+2]-s[k/2]=1
RETURN (poly,u)
poly ... PolynomialDistortion object
s_k ... 1d-array; slit coordinate for each iso-q-line
"""
# data + initial parameters
u,v = self.__resample_aperture_border(N); # aperture borders, shape (K,N)
# fit approximate trapezoidal distortions, used to
# -> calculate initial parameters for polynomial fit
# -> distinguish between multiple solutions in inverse() of PolynomialDistortion
self.verbosity-=10;
trapz,u0_trapz = self.fit_trapezoidal_distortions(self, u=u, v=v);
self.verbosity+=10;
# initial fit parameters
if c0 is not None: c0 = np.asarray(c0,dtype=float);
else:
c0 = np.zeros((I+1,J+1),dtype=float); # i=0,...,I; j=0,...;J
c0[0,:2] = [0, trapz.vp[0]/trapz.vp[1]];
c0[1,:2] = [1, -1/trapz.vp[1] ];
I = c0.shape[0]-1; J = c0.shape[1]-1;
K = u.shape[0]; # K=2Nap (# of left+right aperture borders)
# 1. FIXED-SLIT CONSTRAINTS
# restrictions for fitting C_ij and s_k:
# T(s,0)=(s,0) <=> C_i0 = 1 if i==1 else 0
# remaining fit parameters:
# param[0:(I+1)*J] ... C_ij for j=1,...,J
# param[(I+1)*J:(I+1)*J+K] ... s_k for k=0,...,K-1
if const=='fixed_slit':
c0[:,0]= 0; c0[1,0] = 1; # => T(s,0) = (s,0)
poly = trafo.PolynomialDistortion(c0,T=trapz); # initialize trafo
param0 = np.hstack((c0[:,1:].flatten(),u0_trapz)); # param: [c0, u0]
def residuals(param,u,v,T,leastsq=True):
T.coeff[:,1:] = param[:(I+1)*J].reshape(I+1,J);
s_k = param[(I+1)*J:];
return self.__residuals(u,v,T,s_k,leastsq);
# 2. FIXED APERTURE SIZE
# restrictions for fitting C_ij and s_k:
# T(0,0)=(0,0) <=> C_00 = 0;
# s[k/2+1] - s[k/2] = 1 for all k
# Note: k=0 mod K/2 for left border, k=1 mod K/2 for right border
# remaining fit parameters:
# param[0:Nc] ... C_ij for all i,j except C_00, Nc=(I+1)(J+1)-1
# param[Nc:Nc+K/2] ... s_k for k=0,2,...,K
elif const=='aperture_calibration':
assert K%2==0; # even number of lines required
poly = trafo.PolynomialDistortion(c0,T=trapz);
param0 = np.hstack((c0.flatten()[1:], u0_trapz[::2]));
DS = np.mean(u0_trapz[1::2]-u0_trapz[::2]);
def residuals(param,u,v,T,leastsq=True):
T.coeff = np.insert(param[:(I+1)*(J+1)-1],0,0).reshape((I+1,J+1)); # C_00=0
s_k = np.asarray([[s,s+DS] for s in param[(I+1)*(J+1)-1:]]).flat;
# set s[k+1]-s[k]=DS instead of 1 such that the total slit width remains close
# to 1 like in the case of const='fixed slit' (exact value is not important)
return self.__residuals(u,v,T,s_k,leastsq);
else: raise ValueError("Parameter const='%s' is not allowed."%const);
# perform fitting
fit,_ = opt.leastsq(residuals, param0, args=(u,v,poly));
#fit = param0
poly,s_k,res = residuals(fit,u,v,poly,False);
self.__debug_distortion(poly,u,v,s_k,res,
title="DEBUG: fit_polynomial_distortions(), %s, I=%d, J=%d"%(const,I,J));
# save results and slit borders in slit coordinates
self.s2u = poly;
self.s2u.sl,_=poly.inverse(self.u2x.ul,0);
self.s2u.sr,_=poly.inverse(self.u2x.ur,0);
# history
self.history.extend(self.s2u.info(3).split("\n"));
self.history.append("|- I=%d, J=%d, const=%s"%(I,J,const));
return poly,s_k
def linearize_qaxis(self,N=20,ord=2,dq=1):
"""
Fit transformation
N ... (opt) number of sampling points along y-direction for each image
ord ... (opt) order of fitting polynomial
dq ... (opt) size of the aperture q-coordinates
RETURNS aperture size and position in px, shape (k, n)
"""
# 1. get undistorted coordinates of aperture borders
u,v = self.__resample_aperture_border(N); # aperture borders, shape (K,N)
s,t = self.s2u.inverse(u,v); # correct distortions
# 2. calculate apearent aperture size
s = s.reshape(self.Nap,2,N); # shape (k,2,N)
size = s[:,1] - s[:,0]; # right-left
pos = 0.5*(s[:,1]+s[:,0]); # (right+left)/2
# 3. fit polynomial (common for all v-values)
size_dispersion = np.poly1d(np.polyfit(pos.flatten(),size.flatten(),ord));
if self.verbosity>2: # DEBUG: plot aperture size + quadratic fit
smin,smax,slen = s.min(),s.max(),s.max()-s.min();
x = np.mgrid[smin-0.1*slen:smax+0.1*slen:100j];
fig=plt.figure();
plt.title("DEBUG: Normalized aperture size for different y");
plt.gca().set_color_cycle([plt.cm.winter(1.*i/N) for i in range(N)]); # continous colors
plt.plot(pos,size,'o',alpha=0.5);
plt.plot(x,size_dispersion(x),'k-');
plt.xlabel("slit position s");
plt.ylabel("appearent aperture size ds");
self.__dbg_fig.append(fig);
# 4. create transformation object (q,r) -> (s,t)
self.q2s=trafo.NonlinearDispersion(size_dispersion,scale=dq);
# 5. write history
self.history.extend(self.q2s.info(3).split('\n'));
# TEST: check positive dispersion within the slit
if self.q2s.xrange[0]>=self.s2u.sl or self.q2s.xrange[1]<=self.s2u.sr:
print self.q2s.info(3);
plt.show();
raise ValueError("Unexpected xrange in QDispersion.linearize_qaxis().\n"\
"Check polynomial fit of appearent aperture size using verbosity>2");
if self.verbosity>2:
print self.q2s.info(3);
# TEST: aperture size should be roughly dq in q coordinates
q,r=self.q2s.inverse(s,t.reshape(self.Nap,2,N));
qsize = np.mean(q[:,1]-q[:,0],axis=1); # average over energies
# - deviation of single aperture from dq by >5%
if not np.allclose(qsize,dq,rtol=0.05) and self.verbosity>0:
print "WARNING: in QDispersion.linearize_qaxis(): \n"+ \
" calculated aperture size deviates by more than 5% from scale dq: \n"+ \
" dq: %8.3f, %8.3f < qsize < %8.3f \n " % (dq,qsize.min(),qsize.max());
# - variation of aperture size
if np.std(qsize)/np.mean(qsize)>0.01 and self.verbosity>0: # rel error > 1%
print "WARNING: in QDispersion.linearize_qaxis(): \n"+ \
" calculated aperture size varies by more than 1%: \n"+ \
" mean(dq): %8.3g, std(dq): %8.3g, variation: %5.2f%%\n"\
%(np.mean(qsize),np.std(qsize),100*np.std(qsize)/np.mean(qsize));
return size,pos
def get_q2u(self):
"""
RETURN combined transformation from linearized coordinates
to normalized slit coordinates (q,r)->(s,t)->(u,v)
"""
return trafo.Seq(self.s2u,self.q2s);
def get_absolute_qs(self,line,verbosity=3):
"""
OLD!
determine two points on y-axis with known q-distance
(low-loss w-q reference with central spot and bragg spot)
line ... 1D array with N-points containing two peaks
"""
x = np.arange(N,dtype='float');
ref=gaussfilt1D(line, 5); peaks=[];
for i in range(2): # fit 2 peaks
imax = np.argmax(ref); # initial guess for peak
p, pconv = \
opt.curve_fit(models.gauss,x,ref,p0=(imax, np.sum(ref[imax-5:imax+5]), 10));
peaks.append(p); # gauss fit
imin = max(p[0]-5*p[2],0);
imax = min(p[0]+5*p[2],N);
ref[imin:imax]=0; # remove peak from line (5*fwhm around x0)
if verbosity>2:
plt.figure(); plt.title("DEBUG: Fit q-reference");
plt.plot(x,line,'k');
plt.plot(x,models.gauss(x,*peaks[0]),'r');
plt.plot(x,models.gauss(x,*peaks[1]),'g');
return peaks[0][0], peaks[1][0];
def get_status(self):
return "\n".join(self.history);
def calibrate_qaxis(q2s,sl,sr,G):
"""
Calibration of q-axis with two symmetric Bragg spots -G,G.
q2s ... NonlinearDispersion object mapping linear. q to slit coordinates
sl,sr ... slit coordinates of -G,G Bragg spot
G ... length of G in reciprocal units [1/A]
Note: we test for consistency of sl and sr, the direct beam is at s=0;
RETURNS: rescaled trafo q2s
"""
# calculate linearized coordinates corresponding to u-values
Q2s = copy.deepcopy(q2s);
ql,_= q2s.inverse(sl,0);
qr,_= q2s.inverse(sr,0);
assert ql < 0 and qr > 0;
assert np.allclose( (0,0), q2s.inverse(0,0) ); # direct beam at coordinate u=0=q
# calculate scaling factor and check consistency
q =(qr-ql)/2.;
scale = G/q;
Q2s.scale_u(scale); # change scale in NonlinearDispersion
# check consistency (ql vs qr)
rel_err=np.abs(qr-q)/q;
if rel_err > 0.01 : # relative error of 1%
print "WARNING in calibrate_qaxis(): left and right q-vector deviate:"
print " ql=%.3f, qr=%.3f, rel_err=%.1f%% " %(scale*ql,scale*qr, rel_err*100)
return Q2s;
def fit_aperture_borders(ap_series,illu_ref=None,reverse_y=False,verbosity=1,offset=0,**kwargs):
" wrapper for backward compatibility "
QDisp=QDispersion(ap_series, illu_ref,reverse_y=reverse_y,verbosity=verbosity);
QDisp.crop_img(ymin=offset);
return QDisp.fit_aperture_borders(**kwargs);
# -- main ----------------------------------------
if __name__ == '__main__':
try:
# filenames
aperture_files = ["../tests/qseries%d.tif" % (i) for i in range(1,10) if i<>2];
ref_illumination = "../tests/qreference.tif";
# fitting aperture borders + normalization
QDisp=QDispersion(aperture_files, | |
'contour'], {}, ''),
('error_bars', 'geom_errorbar', ['x', 'ymin', 'ymax'], ['alpha', 'color', 'group', 'linetype', 'size', 'width', 'position'], {'width': 0.25}, ''),
('error_barsh', 'geom_errorbarh', ['x', 'y', 'xmin', 'xmax'], ['alpha', 'color', 'group', 'linetype', 'size', 'width'], {'width': 0.25}, ''),
('freq_poly', 'geom_freqpoly', ['x'], ['alpha', 'color', 'linetype', 'size'], {}, ''),
('hex', 'geom_hex', ['x', 'y'], ['alpha', 'color', 'fill', 'size'], {}, ''),
# ('histogram', this is it's own function
('histogram', 'geom_histogram', ['x', 'y'], ['color', ' group', 'fill', 'position', 'add_text', 'binwidth', 'alpha', 'size', 'stat'], {'y': '..count..', 'position': 'dodge', 'stat': 'bin'}, ''),
(('horizontal_line', 'horizontal_bar', 'hline'), 'geom_hline', ['yintercept'], ['alpha', 'color', 'linetype', 'size'], {'alpha': 0.5, 'color': 'black', 'size': 1}, 'Renamed hline'),
('label', 'geom_label', ['x', 'y', 'label'], ['alpha', 'angle', 'color', 'family', 'fontface', 'hjust', 'vjust', 'lineheight', 'size', 'vjust', 'parse', 'nudge_x', 'nudge_y', 'label.padding', 'label.r', 'label.size', 'show.legend', 'check_overlap', 'position'], {'position: identitiy'}, ''),
('line', 'geom_line', ['x', 'y'], ['color', 'group', 'shape', 'alpha', 'size', 'stat', 'fun.y', 'linetype'], {}, ''),
('linerange', 'geom_linerange', ['x', 'ymax', 'ymin'], ['alpha', 'color', 'linetype', 'size'], {}, ''),
('map', 'geom_map', ['map_id'], ['alpha', 'color', 'fill', 'linetype', 'size'], {}, ''),
('path', 'geom_path', ['x', 'y'], ['alpha', 'color', 'fill', 'linetype', 'size', 'group'], {}, ''),
(('point', 'scatter'), 'geom_point', ['x', 'y'], ['color', 'group', 'shape', 'size', 'alpha', 'stat', 'fun.y', 'position'], {}, ''),
('jitter', 'geom_jitter', ['x', 'y'], ['color', 'group', 'shape', 'size', 'alpha', 'stat', 'fun.y', 'position'], {}, ''),
('pointrange', 'geom_pointrange', ['x', 'y', 'ymin', 'ymax'], ['alpha', 'color', ' fill', 'linetype', 'shape', 'size'], {}, ''),
('polygon', 'geom_polygon', ['x', 'y'], ['alpha', 'color', 'fill', 'linetype', 'size'], {}, ''),
('quantile', 'geom_quantile', ['x', 'y'], ['alpha', 'color', 'linetype', 'size', 'weight'], {}, ''),
('raster', 'geom_raster', ['x', 'y'], ['fill', 'alpha'], {}, ''),
('rect', 'geom_rect', ['xmin', 'xmax', 'ymin', 'ymax'], ['alpha', 'color', 'fill', 'linetype', 'size'], {'alpha': 1}, ''),
('ribbon', 'geom_ribbon', ['x', 'ymin', 'ymax'], ['alpha', 'color', 'fill', 'linetype', 'size', 'position'], {}, ''),
('rug', 'geom_rug', [], ['sides'], {'sides': 'bl'}, ''),
('segment', 'geom_segment', ['x', 'xend', 'y', 'yend'], ['alpha', 'color', 'linetype', 'size'], {'size': 0.5}, ''),
('smooth', 'geom_smooth', ['x', 'y'], ['alpha', 'color', ' fill', 'linetype', 'size', 'weight', 'method', 'group'], {}, ''),
('step', 'geom_step', ['x', 'y'], ['direction', 'stat', 'position', 'alpha', 'color', 'linetype', 'size'], {}, ''),
('text', 'geom_text', ['x', 'y', 'label'], ['alpha', 'angle', 'color', 'family', 'fontface', 'hjust', 'vjust', 'lineheight', 'size', 'vjust', 'parse', 'nudge_x', 'nudge_y', 'label.padding', 'label.r', 'label.size', 'show.legend', 'check_overlap', 'position'], {'position: identity'}, ''),
('tile', 'geom_tile', ['x', 'y'], ['alpha', 'color', 'fill', 'size', 'linetype', 'stat'], {}, ''),
('violin', 'geom_violin', ['x', 'y'], ['alpha', 'color', 'fill', 'linetype', 'size', 'weight', 'scale', 'stat', 'position', 'trim'], {'stat': 'ydensity'}, ''),
(('vertical_line', 'vertical_bar', 'vline'), 'geom_vline', ['xintercept'], ['alpha', 'color', 'size', 'linetype'], {'alpha': 0.5, 'color': 'black', 'size': 1}, ''),
('stacked_bar_plot', 'geom_bar', ['x', 'y', 'fill'], ['position', 'stat'], {'position': 'stack', 'stat': 'identity'}, ''), # do we still need this?
# annotations
('annotation_logticks', 'annotation_logticks', [], ['base', 'sides', 'scaled', 'short', 'mid', 'long'],
{
'base': 10,
'sides': "bl",
'scaled': True,
'short': robjects.r('unit')(0.1, "cm"),
'mid': robjects.r('unit')(0.2, "cm"),
'long': robjects.r('unit')(0.3, "cm"),
}, ''),
)
class Plot(_RPlotBase):
def __init__(self, dataframe, *ignored):
"""Create a new ggplot2 object from DataFrame"""
load_r()
self.r = {}
self.r['ggplot'] = robjects.r['ggplot']
self.r['aes'] = robjects.r['aes']
if robjects.r("exists('ggplot2:::\"+.ggplot\"')")[0]:
self.r['add'] = robjects.r('ggplot2:::"+.ggplot"')
else:
self.r['add'] = robjects.r('ggplot2::"%+%"')
self.r['layer'] = robjects.r['layer']
self.r['facet_wrap'] = robjects.r['facet_wrap']
self.r['geom_text'] = robjects.r['geom_text']
self.r['ggsave'] = robjects.r['ggsave']
self.old_names = []
self.lab_rename = {}
self.dataframe = self._prep_dataframe(dataframe)
self._other_adds = []
self.to_rename = {}
self._add_geom_methods()
self.previous_mappings = {}
self.ipython_plot_width = ipython_plot_width
self.ipython_plot_height = ipython_plot_height
self.used_columns = set()
self.limitsize = True
self.default_theme()
self._log_y_scale = False
self._expected_y_scale = None
def default_theme(self):
self.theme_grey() # apply default theme..,.
def _build_plot(self):
if self._expected_y_scale is not None:
if ((self._expected_y_scale == 'log' and not self._log_y_scale) or
(self._expected_y_scale == 'normal' and self._log_y_scale)):
raise ValueError(
"Log/non log Y scale mismatch between add_alternating_background and scale_y"
)
plot = self.r['ggplot'](convert_dataframe_to_r(self.dataframe))
for obj in self._other_adds:
plot = self.r['add'](plot, obj)
for name, value in self.lab_rename.items():
plot = self.r['add'](plot,
robjects.r('labs(%s = "%s")' % (name, value)))
return plot
def render(self,
output_filename,
width=8,
height=6,
dpi=300,
din_size=None):
"""Save the plot to a file.
If you set @din_size to A4, it will overwrite width and height with a portrait orientend A4 sheet of paper
"""
if din_size == 'A4':
width = 8.267
height = 11.692
plot = self._build_plot()
output_filename = output_filename.replace(
'%', '%%'
) # R tries some kind of integer substitution on these, so we need to double the %
kwargs = {}
if output_filename.endswith('.png'):
kwargs['type'] = 'cairo'
self.r['ggsave'](
filename=output_filename,
plot=plot,
width=width,
height=height,
dpi=dpi,
limitsize=self.limitsize,
**kwargs)
def _prep_dataframe(self, df):
"""prepare the dataframe by renaming all the columns
(we use this to get around R naming issues - the axis get labled correctly later on)"""
if 'pydataframe.dataframe.DataFrame' in str(type(df)):
df = self._convert_pydataframe(df)
elif isinstance(df, dict):
df = pandas.DataFrame(df)
if isinstance(df.columns, pandas.MultiIndex):
df.columns = [' '.join(col).strip() for col in df.columns.values]
df = df.reset_index()
#df = dataframe.copy()
new_names = []
for name in df.columns:
if not name in self.old_names:
new_names.append(name)
self.old_names.extend(new_names)
rename = dict([(name, 'dat_%s' % self.old_names.index(name))
for name in df.columns])
df = df.rename(columns=rename)
return df
def _convert_pydataframe(self, pdf):
"""Compability shim for still being able to use old pydataframes with the new pandas interface"""
d = {}
for column in pdf.columns_ordered:
o = pdf.gcv(column)
if 'pydataframe.factors.Factor' in str(type(o)):
d[column] = pandas.Series(
pandas.Categorical(o.as_levels(), categories=o.levels))
else:
d[column] = o
return pandas.DataFrame(d)
def _translate_params(self, params):
"""Translate between the original dataframe names and the numbered ones we assign
to avoid r-parsing issues"""
aes_params = []
for aes_name, aes_column in params.items():
if aes_column in self.old_names:
self.used_columns.update([aes_column])
new_name = 'dat_%s' % self.old_names.index(aes_column)
aes_params.append('%s=%s' % (aes_name, new_name))
if aes_column in self.to_rename:
self._fix_axis_label(aes_name, new_name,
self.to_rename[aes_column])
else:
self._fix_axis_label(aes_name, new_name, aes_column)
else: # a fixeud value
aes_params.append("%s=%s" % (aes_name, aes_column))
return aes_params
def _fix_axis_label(self, aes_name, new_name, real_name):
"""Reapply the correct (or new) labels to the axis, overwriting our dat_%i numbered dataframe
columns"""
which_legend = False
if aes_name == 'x':
which_legend = 'x'
elif aes_name == 'y':
which_legend = 'y'
elif aes_name == 'color' or aes_name == 'colour':
which_legend = 'colour'
elif aes_name == 'fill':
which_legend = 'fill'
elif aes_name == 'shape':
which_legend = 'shape'
elif aes_name == 'size':
which_legend = 'size'
elif aes_name == 'linetype':
which_legend = 'linetype'
if which_legend:
self.lab_rename[which_legend] = real_name
def _add(self, geom_name, required_mappings, optional_mappings, defaults,
args, kwargs, target):
"""The generic method to add a geom to the ggplot.
You need to call add_xyz (see _add_geom_methods for a list, with each variable mapping
being one argument) with the respectivly required parameters (see ggplot documentation).
You may optionally pass in an argument called data, which will replace the plot-global dataframe
for this particular geom
"""
mappings = {}
all_defined_mappings = required_mappings + optional_mappings
for a, b in zip(
all_defined_mappings, args
): # so that you could in theory also pass the optional_mappings by position...required_mappings
mappings[a] = b
mappings.update(kwargs)
if 'data' in mappings:
data = mappings['data']
del mappings['data']
else:
data = None
for mapping in mappings:
if mapping not in required_mappings and mapping not in optional_mappings:
raise ValueError(
"%s does not take parameter %s" % (geom_name, mapping))
for mapping in required_mappings:
if mapping not in mappings:
if mapping in defaults:
if hasattr(
defaults[mapping],
'__call__',
):
mappings[mapping] = defaults[mapping](mappings)
else:
mappings[mapping] = defaults[mapping]
elif mapping in self.previous_mappings:
mappings[mapping] = self.previous_mappings[mapping]
else:
raise ValueError("Missing required mapping in %s: %s" %
(geom_name, mapping))
else:
self.previous_mappings[mapping] = mappings[mapping]
for mapping in optional_mappings:
if mapping not in mappings:
if mapping in defaults:
if hasattr(
defaults[mapping],
'__call__',
):
mappings[mapping] = defaults[mapping](mappings)
else:
mappings[mapping] = defaults[mapping]
else:
mappings[mapping] = None
self.reset_params(data)
for param in mappings:
self.parse_param(param, mappings[param])
if 'stat' in self.other_collection and 'y' in self.other_collection: # support ..count.. and so on
self.aes_collection['y'] = self.other_collection['y']
del self.other_collection['y']
if geom_name.startswith('annotation'):
target.append(robjects.r(geom_name)(**self.other_collection))
else:
target.append(
robjects.r(geom_name)(self._build_aesthetic(
self.aes_collection), **self.other_collection))
return self
def _add_geom_methods(self):
"""add add_xyz methods for all geoms in ggplot.
All geoms have required & optional attributes and take an optional data parameter with another
dataframe
"""
methods = _geoms()
# python method name (add_ + name), geom (R) name, required attributes, optional attributes, default attribute values
for x in methods:
if len(x) != 6:
raise ValueError("Wrong number of arguments: %s" % (x, ))
for (names, geom, required, optional, defaults, doc_str) in methods:
def define(geom, required, optional,
defaults): # we need to capture the variables...
def | |
# Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the server command group
"""
from __future__ import absolute_import, print_function
import os
import io
import pytest
from .cli_test_extensions import CLITestsBase
from .common_options_help_lines import CMD_OPTION_HELP_HELP_LINE
TEST_DIR = os.path.dirname(__file__)
# A mof file that defines basic qualifier decls, classes, and instances
# but not tied to the DMTF classes.
SIMPLE_MOCK_FILE = 'simple_mock_model.mof'
INVOKE_METHOD_MOCK_FILE = 'simple_mock_invokemethod.py'
MOCK_SERVER_MODEL = os.path.join(TEST_DIR, 'testmock',
'wbemserver_mock_script.py')
# The empty mock model is used to ensure that the new model that is added does
# not modify any existing elements in the repository.
EMPTY_MOCK_MODEL = 'empty_mock_model.mof'
SIMPLE_MOCK_MODEL = 'simple_mock_model.mof'
SIMPLE_MOCK_MODEL_FILEPATH = os.path.join(
os.path.dirname(__file__), SIMPLE_MOCK_MODEL)
SIMPLE_MOCK_MODEL_FILEPATH_REL = os.path.relpath(SIMPLE_MOCK_MODEL_FILEPATH)
with io.open(SIMPLE_MOCK_MODEL_FILEPATH, 'r', encoding='utf-8') as fp:
SIMPLE_MOCK_MODEL_CONTENT = fp.read()
MOF_WITH_ERROR_FILEPATH = os.path.join(
os.path.dirname(__file__), 'mof_with_error.mof')
QUALIFIER_FILTER_MODEL = "qualifier_filter_model.mof"
# The following lists define the help for each command in terms of particular
# parts of lines that are to be tested.
# For each list, try to include:
# 1. The usage line and in particular the argument component
# 2. The first line of the command comment (i.e. the summary sentence)
# 3. The last line CMD_OPTION_HELP_HELP_LINE
# 4. Each option including at least the long and short names
SERVER_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server COMMAND [ARGS] '
'[COMMAND-OPTIONS]',
'Command group for WBEM servers.',
CMD_OPTION_HELP_HELP_LINE,
'brand Get the brand of the server.',
'info Get information about the server.',
'interop Get the Interop namespace of the server (deprecated).',
'namespaces List the namespaces of the server (deprecated).',
'add-mof Compile MOF and add/update CIM objects in the server.',
'remove-mof Compile MOF and remove CIM objects from the server.',
'schema Get information about the server schemas.'
]
SERVER_BRAND_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server brand',
'Get the brand of the server.',
CMD_OPTION_HELP_HELP_LINE,
]
SERVER_INFO_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server info [COMMAND-OPTIONS]',
'Get information about the server.',
CMD_OPTION_HELP_HELP_LINE,
]
SERVER_INTEROP_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server interop [COMMAND-OPTIONS]',
'Get the Interop namespace of the server (deprecated).',
CMD_OPTION_HELP_HELP_LINE,
]
SERVER_NAMESPACES_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server namespaces [COMMAND-OPTIONS]',
'List the namespaces of the server (deprecated)',
CMD_OPTION_HELP_HELP_LINE,
]
SERVER_SCHEMA_HELP_LINES = [
'Usage: pywbemcli [GENERAL-OPTIONS] server schema [COMMAND-OPTIONS]',
'Get information about the server schemas.',
'-n, --namespace NAMESPACE Namespace to use for this command, instead',
'-d, --detail Display details about each schema in the',
CMD_OPTION_HELP_HELP_LINE,
]
OK = True # mark tests OK when they execute correctly
RUN = True # Mark OK = False and current test case being created RUN
FAIL = False # Any test currently FAILING or not tested yet
# pylint: enable=line-too-long
TEST_CASES = [
# List of testcases.
# Each testcase is a list with the following items:
# * desc: Description of testcase.
# * inputs: String, or tuple/list of strings, or dict of 'env', 'args',
# 'general', and 'stdin'. See the 'inputs' parameter of
# CLITestsBase.command_test() in cli_test_extensions.py for detailed
# documentation.
# * exp_response: Dictionary of expected responses (stdout, stderr, rc) and
# test definition (test: <testname>). See the 'exp_response' parameter
# of CLITestsBase.command_test() in cli_test_extensions.py for
# detailed documentation.
# * mock: None, name of file (.mof or .py), or list thereof.
# * condition: If True the test is executed, if 'pdb' the test breaks in the
# the debugger, if 'verbose' print verbose messages, if False the test
# is skipped.
['Verify server command --help response',
'--help',
{'stdout': SERVER_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command -h response',
'-h',
{'stdout': SERVER_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command brand --help response',
['brand', '--help'],
{'stdout': SERVER_BRAND_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command brand -h response',
['brand', '-h'],
{'stdout': SERVER_BRAND_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command info --help response',
['info', '--help'],
{'stdout': SERVER_INFO_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command info -h response',
['info', '-h'],
{'stdout': SERVER_INFO_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command interop --help response',
['interop', '--help'],
{'stdout': SERVER_INTEROP_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command interop -h response',
['interop', '-h'],
{'stdout': SERVER_INTEROP_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command namespaces --help response',
['namespaces', '--help'],
{'stdout': SERVER_NAMESPACES_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command namespaces -h response',
['namespaces', '-h'],
{'stdout': SERVER_NAMESPACES_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command schema --help response',
['schema', '--help'],
{'stdout': SERVER_SCHEMA_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify server command schema -h response',
['schema', '-h'],
{'stdout': SERVER_SCHEMA_HELP_LINES,
'test': 'innows'},
None, OK],
#
# Verify the individual commands returning data
#
['Verify server command interop default output',
{'args': ['interop'], },
{'stdout': ['interop'],
'rc': 0,
'test': 'innows'},
MOCK_SERVER_MODEL, OK],
['Verify server command interop',
{'args': ['interop'],
'general': ['-o', 'text']},
{'stdout': ['interop'],
'rc': 0,
'test': 'innows'},
MOCK_SERVER_MODEL, OK],
['Verify server command namespaces',
{'args': ['namespaces'],
'general': ['-o', 'simple']},
{'stdout': ['Namespace Name',
'----------------',
'interop',
'root/cimv2'],
'rc': 0,
'test': 'lines'},
MOCK_SERVER_MODEL, OK],
['Verify server command namespaces, text output',
{'args': ['namespaces'],
'general': ['-o', 'text']},
{'stdout': ['interop', 'root/cimv2'],
'rc': 0,
'test': 'lines'},
MOCK_SERVER_MODEL, OK],
['Verify server command namespaces with sort option',
{'args': ['namespaces'], },
{'stdout': ['interop'],
'rc': 0,
'test': 'innows'},
MOCK_SERVER_MODEL, OK],
['Verify server command brand',
{'args': ['brand'], },
{'stdout': ['OpenPegasus'],
'rc': 0,
'test': 'innows'},
MOCK_SERVER_MODEL, OK],
['Verify server command brand with --output table fails',
{'args': ['brand'],
'general': ['-o', 'simple']},
{'stderr': ['Output format "simple" not allowed for this command'],
'rc': 1,
'test': 'innows'},
MOCK_SERVER_MODEL, OK],
['Verify server command info with mock server',
{'args': ['info'],
'general': ['-o', 'simple']},
{'stdout':
['Server General Information',
'Brand Version Interop Namespace Namespaces',
'----------- --------- ------------------- -------------------',
'OpenPegasus 2.15.0 interop interop, root/cimv2'],
'rc': 0,
'test': 'linesnows'},
MOCK_SERVER_MODEL, OK],
# MOF compile commands
['Verify server command add-mof of same mock file fails',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH],
'general': []},
{'stdout': ["Class 'CIM_BaseRef' namespace 'root/cimv2' cannot be "
"modified because it has subclasses"],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with abs path',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with rel path',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH_REL],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with --include rel',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH, '--include', '.'],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with --include abs',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH, '--include', os.getcwd()],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of same mock file with --dry-run',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH, '--dry-run'],
'general': []},
{'stdout':
['Executing in dry-run mode'],
'rc': 0,
'test': 'innows'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with --verbose',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH],
'general': ['--verbose']},
{'stdout':
['Setting qualifier root/cimv2:Key',
'Creating class root/cimv2:CIM_Foo',
'Creating instance of class root/cimv2:CIM_Foo'],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of same mock file with --dry-run and '
'--verbose',
{'args': ['add-mof', SIMPLE_MOCK_MODEL_FILEPATH, '--dry-run'],
'general': ['--verbose']},
{'stdout':
['Setting qualifier root/cimv2:Key',
'Creating class root/cimv2:CIM_Foo',
'Creating instance of class root/cimv2:CIM_Foo'],
'rc': 0,
'test': 'innows'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command add-mof of new mock file with --verbose via '
'stdin',
{'args': ['server', 'add-mof', '-'],
'general': ['--verbose'],
'stdin': SIMPLE_MOCK_MODEL_CONTENT},
{'stdout':
['Setting qualifier root/cimv2:Key',
'Creating class root/cimv2:CIM_Foo',
'Creating instance of class root/cimv2:CIM_Foo'],
'rc': 0,
'test': 'innows'},
EMPTY_MOCK_MODEL, OK],
['Verify server command add-mof of invalid MOF file',
{'args': ['add-mof', MOF_WITH_ERROR_FILEPATH],
'general': ['--verbose']},
{'stdout':
['MOF grammar error'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command remove-mof of same mock file with abs path',
{'args': ['remove-mof', SIMPLE_MOCK_MODEL_FILEPATH],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command remove-mof of same mock file with rel path',
{'args': ['remove-mof', SIMPLE_MOCK_MODEL_FILEPATH_REL],
'general': []},
{'stdout':
[],
'rc': 0,
'test': 'innows'},
SIMPLE_MOCK_MODEL, OK],
['Verify server command remove-mof of same mock file with --include rel',
{'args': ['remove-mof', SIMPLE_MOCK_MODEL_FILEPATH, '--include', '.'],
'general': | |
<reponame>nomike/spambayes
# Test sb_imapfilter script.
import re
import sys
import time
import email
import types
import socket
import threading
import imaplib
import unittest
import asyncore
import StringIO
try:
IMAPError = imaplib.error
except AttributeError:
IMAPError = imaplib.IMAP4.error
import sb_test_support
sb_test_support.fix_sys_path()
from spambayes import message
from spambayes import Dibbler
from spambayes.Options import options
from spambayes.classifier import Classifier
from sb_imapfilter import run, BadIMAPResponseError, LoginFailure
from sb_imapfilter import IMAPSession, IMAPMessage, IMAPFolder, IMAPFilter
IMAP_PORT = 8143
IMAP_USERNAME = "testu"
IMAP_PASSWORD = "<PASSWORD>"
IMAP_FOLDER_LIST = ["INBOX", "unsure", "ham_to_train", "spam",
"spam_to_train"]
# Must be different.
SB_ID_1 = "<EMAIL>"
SB_ID_2 = "14102004"
# Key is UID.
IMAP_MESSAGES = {
# 101 should be valid and have a MessageID header, but no
# X-Spambayes-MessageID header.
101 : """Subject: Test\r
Message-ID: <%s>\r
\r
Body test.""" % (SB_ID_1,),
# 102 should be valid and have both a MessageID header and a
# X-Spambayes-MessageID header.
102 : """Subject: Test2\r
Message-ID: <%s>\r
%s: %s\r
\r
Another body test.""" % (SB_ID_1, options["Headers", "mailid_header_name"],
SB_ID_2),
# 103 is taken from Anthony's email torture test (the
# test_zero-length-boundary file).
103 : """Received: from noisy-2-82-67-182-141.fbx.proxad.net(192.168.127.12)
via SMTP by mx1.example.com, id smtpdAAAzMayUR; Tue Apr 27 18:56:48 2004
Return-Path: " Freeman" <<EMAIL>>
Received: from rly-xn05.mx.aol.com (rly-xn05.mail.aol.com [1172.16.17.328]) by air-xn02.mail.aol.com (v98.10) with ESMTP id MAILINXN22-6504043449c151; Tue, 27 Apr 2004 16:57:46 -0300
Received: from 172.16.31.10 by 192.168.127.12; Tue, 27 Apr 2004 14:54:46 -0500
From: " Gilliam" <.<EMAIL>>
To: To: <EMAIL>
Subject: Your Source For Online Prescriptions....Soma-Watson..VALIUM-Roche .
Date: Wed, 28 Apr 2004 00:52:46 +0500
Mime-Version: 1.0
Content-Type: multipart/alternative;
boundary=""
X-Mailer: AOL 7.0 for Windows US sub 118
X-AOL-IP: 172.16.58.3
X-AOL-SCOLL-SCORE: 1:XXX:XX
X-AOL-SCOLL-URL_COUNT: 2
Message-ID: <<EMAIL>>
--
Content-Type: text/html;
charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
<strong><a href=3D"http://www.ibshels454drugs.biz/c39/">ENTER HERE</a> to
ORDER MEDS Online, such as XANAX..VALIUM..SOMA..Much MORE SHIPPED
OVERNIGHT,to US and INTERNATIONAL</strong>
---
""",
# 104 should be valid and have neither a MessageID header nor a
# X-Spambayes-MessageID header.
104 : """Subject: Test2\r
\r
Yet another body test.""",
}
# Map of ID -> UID
IMAP_UIDS = {1 : 101, 2: 102, 3:103, 4:104}
# Messages that are UNDELETED
UNDELETED_IDS = (1,2)
class TestListener(Dibbler.Listener):
"""Listener for TestIMAP4Server."""
def __init__(self, socketMap=asyncore.socket_map):
Dibbler.Listener.__init__(self, IMAP_PORT, TestIMAP4Server,
(socketMap,), socketMap=socketMap)
# If true, the next command will fail, whatever it is.
FAIL_NEXT = False
class TestIMAP4Server(Dibbler.BrighterAsyncChat):
"""Minimal IMAP4 server, for testing purposes. Accepts a limited
subset of commands, and also a KILL command, to terminate."""
def __init__(self, clientSocket, socketMap):
# Grumble: asynchat.__init__ doesn't take a 'map' argument,
# hence the two-stage construction.
Dibbler.BrighterAsyncChat.__init__(self)
Dibbler.BrighterAsyncChat.set_socket(self, clientSocket, socketMap)
self.set_terminator('\r\n')
# okCommands are just ignored (we pass back a happy this-was-fine
# answer, and do nothing.
self.okCommands = ['NOOP', 'LOGOUT', 'CAPABILITY', 'KILL']
# These commands actually result in something.
self.handlers = {'LIST' : self.onList,
'LOGIN' : self.onLogin,
'SELECT' : self.onSelect,
'FETCH' : self.onFetch,
'SEARCH' : self.onSearch,
'UID' : self.onUID,
'APPEND' : self.onAppend,
'STORE' : self.onStore,
}
self.push("* OK [CAPABILITY IMAP4REV1 AUTH=LOGIN] " \
"localhost IMAP4rev1\r\n")
self.request = ''
self.next_id = 0
self.in_literal = (0, None)
def collect_incoming_data(self, data):
"""Asynchat override."""
if self.in_literal[0] > 0:
# Also add the line breaks.
self.request = "%s\r\n%s" % (self.request, data)
else:
self.request = self.request + data
def found_terminator(self):
"""Asynchat override."""
global FAIL_NEXT
if self.in_literal[0] > 0:
if len(self.request) >= self.in_literal[0]:
self.push(self.in_literal[1](self.request,
*self.in_literal[2]))
self.in_literal = (0, None)
self.request = ''
return
id, command = self.request.split(None, 1)
if FAIL_NEXT:
FAIL_NEXT = False
self.push("%s NO Was told to fail.\r\n" % (id,))
if ' ' in command:
command, args = command.split(None, 1)
else:
args = ''
command = command.upper()
if command in self.okCommands:
self.push("%s OK (we hope)\r\n" % (id,))
if command == 'LOGOUT':
self.close_when_done()
if command == 'KILL':
self.socket.shutdown(2)
self.close()
raise SystemExit()
else:
handler = self.handlers.get(command, self.onUnknown)
self.push(handler(id, command, args, False)) # Or push_slowly for testing
self.request = ''
def push_slowly(self, response):
"""Useful for testing."""
for c in response:
self.push(c)
time.sleep(0.02)
def onLogin(self, id, command, args, uid=False):
"""Log in to server."""
username, password = args.split(None, 1)
username = username.strip('"')
password = password.strip('"')
if username == IMAP_USERNAME and password == IMAP_PASSWORD:
return "%s OK [CAPABILITY IMAP4REV1] User %s " \
"authenticated.\r\n" % (id, username)
return "%s NO LOGIN failed\r\n" % (id,)
def onList(self, id, command, args, uid=False):
"""Return list of folders."""
base = '\r\n* LIST (\\NoInferiors \\UnMarked) "/" '
return "%s%s\r\n%s OK LIST completed\r\n" % \
(base[2:], base.join(IMAP_FOLDER_LIST), id)
def onStore(self, id, command, args, uid=False):
# We ignore flags.
return "%s OK STORE completed\r\n" % (id,)
def onSelect(self, id, command, args, uid=False):
exists = "* %d EXISTS" % (len(IMAP_MESSAGES),)
recent = "* 0 RECENT"
uidv = "* OK [UIDVALIDITY 1091599302] UID validity status"
next_uid = "* OK [UIDNEXT 23] Predicted next UID"
flags = "* FLAGS (\Answered \Flagged \Deleted \Draft \Seen)"
perm_flags = "* OK [PERMANENTFLAGS (\* \Answered \Flagged " \
"\Deleted \Draft \Seen)] Permanent flags"
complete = "%s OK [READ-WRITE] SELECT completed" % (id,)
return "%s\r\n" % ("\r\n".join([exists, recent, uidv, next_uid,
flags, perm_flags, complete]),)
def onAppend(self, id, command, args, uid=False):
# Only stores for this session.
folder, args = args.split(None, 1)
# We ignore the folder.
if ')' in args:
flags, args = args.split(')', 1)
flags = flags[1:]
# We ignore the flags.
unused, date, args = args.split('"', 2)
# We ignore the date.
if '{' in args:
# A literal.
size = int(args[2:-1])
self.in_literal = (size, self.appendLiteral, (id,))
return "+ Ready for argument\r\n"
# Strip off the space at the front.
return self.appendLiteral(args[1:], id)
def appendLiteral(self, message, command_id):
while True:
id = self.next_id
self.next_id += 1
if id not in IMAP_MESSAGES:
break
IMAP_MESSAGES[id] = message
return "* APPEND %s\r\n%s OK APPEND succeeded\r\n" % \
(id, command_id)
def onSearch(self, id, command, args, uid=False):
args = args.upper()
results = ()
if args.find("UNDELETED") != -1:
for msg_id in UNDELETED_IDS:
if uid:
results += (IMAP_UIDS[msg_id],)
else:
results += (msg_id,)
if uid:
command_string = "UID " + command
else:
command_string = command
return "%s\r\n%s OK %s completed\r\n" % \
("* SEARCH " + ' '.join([str(r) for r in results]), id,
command_string)
def onFetch(self, id, command, args, uid=False):
msg_nums, msg_parts = args.split(None, 1)
msg_nums = msg_nums.split()
response = {}
for msg in msg_nums:
response[msg] = []
if msg_parts.find("UID") != -1:
if uid:
for msg in msg_nums:
response[msg].append("FETCH (UID %s)" % (msg,))
else:
for msg in msg_nums:
response[msg].append("FETCH (UID %s)" %
(IMAP_UIDS[int(msg)]))
if msg_parts.find("BODY.PEEK[]") != -1:
for msg in msg_nums:
if uid:
msg_uid = int(msg)
else:
msg_uid = IMAP_UIDS[int(msg)]
response[msg].append(("FETCH (BODY[] {%s}" %
(len(IMAP_MESSAGES[msg_uid])),
IMAP_MESSAGES[msg_uid]))
if msg_parts.find("RFC822.HEADER") != -1:
for msg in msg_nums:
if uid:
msg_uid = int(msg)
else:
msg_uid = IMAP_UIDS[int(msg)]
msg_text = IMAP_MESSAGES[msg_uid]
headers, unused = msg_text.split('\r\n\r\n', 1)
response[msg].append(("FETCH (RFC822.HEADER {%s}" %
(len(headers),), headers))
if msg_parts.find("FLAGS INTERNALDATE") != -1:
# We make up flags & dates.
for msg in msg_nums:
response[msg].append('FETCH (FLAGS (\Seen \Deleted) '
'INTERNALDATE "27-Jul-2004 13:1'
'1:56 +1200')
for msg in msg_nums:
try:
simple = " ".join(response[msg])
except TypeError:
simple = []
for part in response[msg]:
if isinstance(part, types.StringTypes):
simple.append(part)
else:
simple.append('%s\r\n%s)' % (part[0], part[1]))
simple = " ".join(simple)
response[msg] = "* %s %s" % (msg, simple)
response_text = "\r\n".join(response.values())
return "%s\r\n%s OK FETCH completed\r\n" % (response_text, id)
def onUID(self, id, command, args, uid=False):
actual_command, args = args.split(None, 1)
handler = self.handlers.get(actual_command, self.onUnknown)
return handler(id, actual_command, args, uid=True)
def onUnknown(self, id, command, args, uid=False):
"""Unknown IMAP4 command."""
return "%s BAD Command unrecognised: %s\r\n" % (id, repr(command))
class BaseIMAPFilterTest(unittest.TestCase):
def setUp(self):
# shorten for testing so we don't wait forever
IMAPSession.timeout = 5
self.imap = IMAPSession("localhost:%d" % IMAP_PORT)
def tearDown(self):
try:
self.imap.logout()
except IMAPError:
pass
class IMAPSessionTest(BaseIMAPFilterTest):
def testConnection(self):
# Connection is made in setup, just need to check
# that it worked.
self.assert_(self.imap.connected)
def testGoodLogin(self):
self.imap.login(IMAP_USERNAME, IMAP_PASSWORD)
self.assert_(self.imap.logged_in)
def testBadLogin(self):
self.assertRaises(LoginFailure, self.imap.login, IMAP_USERNAME,
"wrong password")
def test_check_response(self):
test_data = "IMAP response data"
response = ("OK", test_data)
data = self.imap.check_response("", response)
self.assertEqual(data, test_data)
response = ("NO", test_data)
self.assertRaises(BadIMAPResponseError, self.imap.check_response,
"", response)
def testSelectFolder(self):
# This test will fail if testGoodLogin fails.
self.imap.login(IMAP_USERNAME, IMAP_PASSWORD)
# Check handling of Python (not SpamBayes) bug #845560.
self.assertRaises(BadIMAPResponseError, self.imap.SelectFolder, "")
# Check selection.
self.imap.SelectFolder("Inbox")
response = self.imap.response('OK')
self.assertEquals(response[0], "OK")
self.assert_(response[1] != [None])
# Check that we don't reselect if we are already in that folder.
self.imap.SelectFolder("Inbox")
response = self.imap.response('OK')
self.assertEquals(response[0], "OK")
self.assertEquals(response[1], [None])
def test_folder_list(self):
global FAIL_NEXT
# This test will fail if testGoodLogin fails.
self.imap.login(IMAP_USERNAME, IMAP_PASSWORD)
# Everything working.
folders = self.imap.folder_list()
correct = IMAP_FOLDER_LIST[:]
correct.sort()
| |
<reponame>treehopper-electronics/treehopper-sdk
### This file was auto-generated by RegisterGenerator. Any changes to it will be overwritten!
from treehopper.libraries.register_manager_adapter import RegisterManagerAdapter
from treehopper.libraries.register_manager import RegisterManager, Register, sign_extend
class Gains:
x1 = 0
x4 = 1
x2 = 2
x8 = 3
x16 = 4
x32 = 5
x64 = 6
x128 = 7
class Vldoes:
mV_4500 = 0
mV_4200 = 1
mV_3900 = 2
mV_3600 = 3
mV_3300 = 4
mV_3000 = 5
mV_2700 = 6
mV_2400 = 7
class CalMods:
OffsetCalibrationInternal = 0
Reserved = 1
OffsetCalibrationSystem = 2
GainCalibrationSystem = 3
class ConversionRates:
Sps_10 = 0
Sps_20 = 1
Sps_40 = 2
Sps_80 = 3
Sps_320 = 7
class AdcVcms:
ExtendedCommonModeRefp = 3
ExtendedCommonModeRefn = 2
disable = 0
class RegChpFreqs:
off = 3
class Nau7802Registers(RegisterManager):
def __init__(self, manager: RegisterManagerAdapter):
RegisterManager.__init__(self, manager, True)
self.puCtrl = self.PuCtrlRegister(self)
self.registers.append(self.puCtrl)
self.ctrl1 = self.Ctrl1Register(self)
self.registers.append(self.ctrl1)
self.ctrl2 = self.Ctrl2Register(self)
self.registers.append(self.ctrl2)
self.i2cCtrl = self.I2cCtrlRegister(self)
self.registers.append(self.i2cCtrl)
self.adcResult = self.AdcResultRegister(self)
self.registers.append(self.adcResult)
self.adc = self.AdcRegister(self)
self.registers.append(self.adc)
self.pga = self.PgaRegister(self)
self.registers.append(self.pga)
self.powerCtrl = self.PowerCtrlRegister(self)
self.registers.append(self.powerCtrl)
class PuCtrlRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x00, 1, False)
self.registerReset = 0
self.powerUpDigital = 0
self.powerUpAnalog = 0
self.powerUpReady = 0
self.cycleStart = 0
self.cycleReady = 0
self.useExternalCrystal = 0
self.useInternalLdo = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.registerReset & 0x1) << 0) | ((self.powerUpDigital & 0x1) << 1) | ((self.powerUpAnalog & 0x1) << 2) | ((self.powerUpReady & 0x1) << 3) | ((self.cycleStart & 0x1) << 4) | ((self.cycleReady & 0x1) << 5) | ((self.useExternalCrystal & 0x1) << 6) | ((self.useInternalLdo & 0x1) << 7)
def setValue(self, value: int):
self.registerReset = ((value >> 0) & 0x1)
self.powerUpDigital = ((value >> 1) & 0x1)
self.powerUpAnalog = ((value >> 2) & 0x1)
self.powerUpReady = ((value >> 3) & 0x1)
self.cycleStart = ((value >> 4) & 0x1)
self.cycleReady = ((value >> 5) & 0x1)
self.useExternalCrystal = ((value >> 6) & 0x1)
self.useInternalLdo = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "RegisterReset: {} (offset: 0, width: 1)\r\n".format(self.registerReset)
retVal += "PowerUpDigital: {} (offset: 1, width: 1)\r\n".format(self.powerUpDigital)
retVal += "PowerUpAnalog: {} (offset: 2, width: 1)\r\n".format(self.powerUpAnalog)
retVal += "PowerUpReady: {} (offset: 3, width: 1)\r\n".format(self.powerUpReady)
retVal += "CycleStart: {} (offset: 4, width: 1)\r\n".format(self.cycleStart)
retVal += "CycleReady: {} (offset: 5, width: 1)\r\n".format(self.cycleReady)
retVal += "UseExternalCrystal: {} (offset: 6, width: 1)\r\n".format(self.useExternalCrystal)
retVal += "UseInternalLdo: {} (offset: 7, width: 1)\r\n".format(self.useInternalLdo)
return retVal
class Ctrl1Register(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x01, 1, False)
self.gain = 0
self.vldo = 0
self.drdySelect = 0
self.conversionReadyPinPolarity = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.gain & 0x7) << 0) | ((self.vldo & 0x7) << 3) | ((self.drdySelect & 0x1) << 6) | ((self.conversionReadyPinPolarity & 0x1) << 7)
def setValue(self, value: int):
self.gain = ((value >> 0) & 0x7)
self.vldo = ((value >> 3) & 0x7)
self.drdySelect = ((value >> 6) & 0x1)
self.conversionReadyPinPolarity = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "Gain: {} (offset: 0, width: 3)\r\n".format(self.gain)
retVal += "Vldo: {} (offset: 3, width: 3)\r\n".format(self.vldo)
retVal += "DrdySelect: {} (offset: 6, width: 1)\r\n".format(self.drdySelect)
retVal += "ConversionReadyPinPolarity: {} (offset: 7, width: 1)\r\n".format(self.conversionReadyPinPolarity)
return retVal
class Ctrl2Register(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x02, 1, False)
self.calMod = 0
self.calStart = 0
self.calError = 0
self.conversionRate = 0
self.channelSelect = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.calMod & 0x3) << 0) | ((self.calStart & 0x1) << 2) | ((self.calError & 0x1) << 3) | ((self.conversionRate & 0x7) << 4) | ((self.channelSelect & 0x1) << 7)
def setValue(self, value: int):
self.calMod = ((value >> 0) & 0x3)
self.calStart = ((value >> 2) & 0x1)
self.calError = ((value >> 3) & 0x1)
self.conversionRate = ((value >> 4) & 0x7)
self.channelSelect = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "CalMod: {} (offset: 0, width: 2)\r\n".format(self.calMod)
retVal += "CalStart: {} (offset: 2, width: 1)\r\n".format(self.calStart)
retVal += "CalError: {} (offset: 3, width: 1)\r\n".format(self.calError)
retVal += "ConversionRate: {} (offset: 4, width: 3)\r\n".format(self.conversionRate)
retVal += "ChannelSelect: {} (offset: 7, width: 1)\r\n".format(self.channelSelect)
return retVal
class I2cCtrlRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x11, 1, False)
self.bgpCp = 0
self.ts = 0
self.boPga = 0
self.si = 0
self.wpd = 0
self.spe = 0
self.frd = 0
self.crsd = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.bgpCp & 0x1) << 0) | ((self.ts & 0x1) << 1) | ((self.boPga & 0x1) << 2) | ((self.si & 0x1) << 3) | ((self.wpd & 0x1) << 4) | ((self.spe & 0x1) << 5) | ((self.frd & 0x1) << 6) | ((self.crsd & 0x1) << 7)
def setValue(self, value: int):
self.bgpCp = ((value >> 0) & 0x1)
self.ts = ((value >> 1) & 0x1)
self.boPga = ((value >> 2) & 0x1)
self.si = ((value >> 3) & 0x1)
self.wpd = ((value >> 4) & 0x1)
self.spe = ((value >> 5) & 0x1)
self.frd = ((value >> 6) & 0x1)
self.crsd = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
retVal += "BgpCp: {} (offset: 0, width: 1)\r\n".format(self.bgpCp)
retVal += "Ts: {} (offset: 1, width: 1)\r\n".format(self.ts)
retVal += "BoPga: {} (offset: 2, width: 1)\r\n".format(self.boPga)
retVal += "Si: {} (offset: 3, width: 1)\r\n".format(self.si)
retVal += "Wpd: {} (offset: 4, width: 1)\r\n".format(self.wpd)
retVal += "Spe: {} (offset: 5, width: 1)\r\n".format(self.spe)
retVal += "Frd: {} (offset: 6, width: 1)\r\n".format(self.frd)
retVal += "Crsd: {} (offset: 7, width: 1)\r\n".format(self.crsd)
return retVal
class AdcResultRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x12, 3, True)
self.value = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.value & 0xFFFFFF) << 0)
def setValue(self, value: int):
self.value = sign_extend((value >> 0) & 0xFFFFFF, 24)
def __str__(self):
retVal = ""
retVal += "Value: {} (offset: 0, width: 24)\r\n".format(self.value)
return retVal
class AdcRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x15, 1, False)
self.regChp = 0
self.adcVcm = 0
self.regChpFreq = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.regChp & 0x3) << 0) | ((self.adcVcm & 0x3) << 2) | ((self.regChpFreq & 0x3) << 4)
def setValue(self, value: int):
self.regChp = ((value >> 0) & 0x3)
self.adcVcm = ((value >> 2) & 0x3)
self.regChpFreq = ((value >> 4) & 0x3)
def __str__(self):
retVal = ""
retVal += "RegChp: {} (offset: 0, width: 2)\r\n".format(self.regChp)
retVal += "AdcVcm: {} (offset: 2, width: 2)\r\n".format(self.adcVcm)
retVal += "RegChpFreq: {} (offset: 4, width: 2)\r\n".format(self.regChpFreq)
return retVal
class PgaRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x1B, 1, False)
self.disableChopper = 0
self.pgaInv = 0
self.pgaBypass = 0
self.ldoMode = 0
self.rdOptSel = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.disableChopper & 0x1) << 0) | ((self.pgaInv & 0x1) << 3) | ((self.pgaBypass & 0x1) << 4) | ((self.ldoMode & 0x1) << 5) | ((self.rdOptSel & 0x1) << 6)
def setValue(self, value: int):
self.disableChopper = ((value >> 0) & 0x1)
self.pgaInv = ((value >> 3) & 0x1)
self.pgaBypass = ((value >> 4) & 0x1)
self.ldoMode = ((value >> 5) & 0x1)
self.rdOptSel = ((value >> 6) & 0x1)
def __str__(self):
retVal = ""
retVal += "DisableChopper: {} (offset: 0, width: 1)\r\n".format(self.disableChopper)
retVal += "PgaInv: {} (offset: 3, width: 1)\r\n".format(self.pgaInv)
retVal += "PgaBypass: {} (offset: 4, width: 1)\r\n".format(self.pgaBypass)
retVal += "LdoMode: {} (offset: 5, width: 1)\r\n".format(self.ldoMode)
retVal += "RdOptSel: {} (offset: 6, width: 1)\r\n".format(self.rdOptSel)
return retVal
class PowerCtrlRegister(Register):
def __init__(self, reg_manager: RegisterManager):
Register.__init__(self, reg_manager, 0x1C, 1, False)
self.pgaCurr = 0
self.adcCurr = 0
self.masterBiasCurr = 0
self.pgaCapEn = 0
def read(self):
self._manager.read(self)
return self
def getValue(self):
return ((self.pgaCurr & 0x3) << 0) | ((self.adcCurr & 0x3) << 2) | ((self.masterBiasCurr & 0x7) << 4) | ((self.pgaCapEn & 0x1) << 7)
def setValue(self, value: int):
self.pgaCurr = ((value >> 0) & 0x3)
self.adcCurr = ((value >> 2) & 0x3)
self.masterBiasCurr = ((value >> 4) & 0x7)
self.pgaCapEn = ((value >> 7) & 0x1)
def __str__(self):
retVal = ""
| |
Nebraska',
'Banner County, Nebraska',
'Blaine County, Nebraska',
'Boone County, Nebraska',
'Box Butte County, Nebraska',
'Boyd County, Nebraska',
'Brown County, Nebraska',
'Buffalo County, Nebraska',
'Burt County, Nebraska',
'Butler County, Nebraska',
'Cass County, Nebraska',
'Cedar County, Nebraska',
'Chase County, Nebraska',
'Cherry County, Nebraska',
'Cheyenne County, Nebraska',
'Clay County, Nebraska',
'Colfax County, Nebraska',
'Cuming County, Nebraska',
'Custer County, Nebraska',
'Dakota County, Nebraska',
'Dawes County, Nebraska',
'Dawson County, Nebraska',
'Deuel County, Nebraska',
'Dixon County, Nebraska',
'Dodge County, Nebraska',
'Douglas County, Nebraska',
'Dundy County, Nebraska',
'Fillmore County, Nebraska',
'Franklin County, Nebraska',
'Frontier County, Nebraska',
'Furnas County, Nebraska',
'Gage County, Nebraska',
'Garden County, Nebraska',
'Garfield County, Nebraska',
'Gosper County, Nebraska',
'Grant County, Nebraska',
'Greeley County, Nebraska',
'Hall County, Nebraska',
'Hamilton County, Nebraska',
'Harlan County, Nebraska',
'Hayes County, Nebraska',
'Hitchcock County, Nebraska',
'Holt County, Nebraska',
'Hooker County, Nebraska',
'Howard County, Nebraska',
'Jefferson County, Nebraska',
'Johnson County, Nebraska',
'Kearney County, Nebraska',
'Keith County, Nebraska',
'Keya Paha County, Nebraska',
'Kimball County, Nebraska',
'Knox County, Nebraska',
'Lancaster County, Nebraska',
'Lincoln County, Nebraska',
'Logan County, Nebraska',
'Loup County, Nebraska',
'McPherson County, Nebraska',
'Madison County, Nebraska',
'Merrick County, Nebraska',
'Morrill County, Nebraska',
'Nance County, Nebraska',
'Nemaha County, Nebraska',
'Nuckolls County, Nebraska',
'Otoe County, Nebraska',
'Pawnee County, Nebraska',
'Perkins County, Nebraska',
'Phelps County, Nebraska',
'Pierce County, Nebraska',
'Platte County, Nebraska',
'Polk County, Nebraska',
'Red Willow County, Nebraska',
'Richardson County, Nebraska',
'Rock County, Nebraska',
'Saline County, Nebraska',
'Sarpy County, Nebraska',
'Saunders County, Nebraska',
'Scotts Bluff County, Nebraska',
'Seward County, Nebraska',
'Sheridan County, Nebraska',
'Sherman County, Nebraska',
'Sioux County, Nebraska',
'Stanton County, Nebraska',
'Thayer County, Nebraska',
'Thomas County, Nebraska',
'Thurston County, Nebraska',
'Valley County, Nebraska',
'Washington County, Nebraska',
'Wayne County, Nebraska',
'Webster County, Nebraska',
'Wheeler County, Nebraska',
'York County, Nebraska',
'Churchill County, Nevada',
'Clark County, Nevada',
'Douglas County, Nevada',
'Elko County, Nevada',
'Esmeralda County, Nevada',
'Eureka County, Nevada',
'Humboldt County, Nevada',
'Lander County, Nevada',
'Lincoln County, Nevada',
'Lyon County, Nevada',
'Mineral County, Nevada',
'Nye County, Nevada',
'Pershing County, Nevada',
'Storey County, Nevada',
'Washoe County, Nevada',
'White Pine County, Nevada',
'Carson City, Nevada',
'Belknap County, New Hampshire',
'Carroll County, New Hampshire',
'Cheshire County, New Hampshire',
'Coos County, New Hampshire',
'Grafton County, New Hampshire',
'Hillsborough County, New Hampshire',
'Merrimack County, New Hampshire',
'Rockingham County, New Hampshire',
'Strafford County, New Hampshire',
'Sullivan County, New Hampshire',
'Atlantic County, New Jersey',
'Bergen County, New Jersey',
'Burlington County, New Jersey',
'Camden County, New Jersey',
'Cape May County, New Jersey',
'Cumberland County, New Jersey',
'Essex County, New Jersey',
'Gloucester County, New Jersey',
'Hudson County, New Jersey',
'Hunterdon County, New Jersey',
'Mercer County, New Jersey',
'Middlesex County, New Jersey',
'Monmouth County, New Jersey',
'Morris County, New Jersey',
'Ocean County, New Jersey',
'Passaic County, New Jersey',
'Salem County, New Jersey',
'Somerset County, New Jersey',
'Sussex County, New Jersey',
'Union County, New Jersey',
'Warren County, New Jersey',
'Bernalillo County, New Mexico',
'Catron County, New Mexico',
'Chaves County, New Mexico',
'Cibola County, New Mexico',
'Colfax County, New Mexico',
'Curry County, New Mexico',
'De Baca County, New Mexico',
'Doña Ana County, New Mexico',
'Eddy County, New Mexico',
'Grant County, New Mexico',
'Guadalupe County, New Mexico',
'Harding County, New Mexico',
'Hidalgo County, New Mexico',
'Lea County, New Mexico',
'Lincoln County, New Mexico',
'Los Alamos County, New Mexico',
'Luna County, New Mexico',
'McKinley County, New Mexico',
'Mora County, New Mexico',
'Otero County, New Mexico',
'Quay County, New Mexico',
'Rio Arriba County, New Mexico',
'Roosevelt County, New Mexico',
'Sandoval County, New Mexico',
'San Juan County, New Mexico',
'San Miguel County, New Mexico',
'Santa Fe County, New Mexico',
'Sierra County, New Mexico',
'Socorro County, New Mexico',
'Taos County, New Mexico',
'Torrance County, New Mexico',
'Union County, New Mexico',
'Valencia County, New Mexico',
'Albany County, New York',
'Allegany County, New York',
'Bronx County, New York',
'Broome County, New York',
'Cattaraugus County, New York',
'Cayuga County, New York',
'Chautauqua County, New York',
'Chemung County, New York',
'Chenango County, New York',
'Clinton County, New York',
'Columbia County, New York',
'Cortland County, New York',
'Delaware County, New York',
'Dutchess County, New York',
'Erie County, New York',
'Essex County, New York',
'Franklin County, New York',
'Fulton County, New York',
'Genesee County, New York',
'Greene County, New York',
'Hamilton County, New York',
'Herkimer County, New York',
'Jefferson County, New York',
'Kings County, New York',
'Lewis County, New York',
'Livingston County, New York',
'Madison County, New York',
'Monroe County, New York',
'Montgomery County, New York',
'Nassau County, New York',
'New York County, New York',
'Niagara County, New York',
'Oneida County, New York',
'Onondaga County, New York',
'Ontario County, New York',
'Orange County, New York',
'Orleans County, New York',
'Oswego County, New York',
'Otsego County, New York',
'Putnam County, New York',
'Queens County, New York',
'Rensselaer County, New York',
'Richmond County, New York',
'Rockland County, New York',
'St. Lawrence County, New York',
'Saratoga County, New York',
'Schenectady County, New York',
'Schoharie County, New York',
'Schuyler County, New York',
'Seneca County, New York',
'Steuben County, New York',
'Suffolk County, New York',
'Sullivan County, New York',
'Tioga County, New York',
'Tompkins County, New York',
'Ulster County, New York',
'Warren County, New York',
'Washington County, New York',
'Wayne County, New York',
'Westchester County, New York',
'Wyoming County, New York',
'Yates County, New York',
'Alamance County, North Carolina',
'Alexander County, North Carolina',
'Alleghany County, North Carolina',
'Anson County, North Carolina',
'Ashe County, North Carolina',
'Avery County, North Carolina',
'Beaufort County, North Carolina',
'Bertie County, North Carolina',
'Bladen County, North Carolina',
'Brunswick County, North Carolina',
'Buncombe County, North Carolina',
'Burke County, North Carolina',
'Cabarrus County, North Carolina',
'Caldwell County, North Carolina',
'Camden County, North Carolina',
'Carteret County, North Carolina',
'Caswell County, North Carolina',
'Catawba County, North Carolina',
'Chatham County, North Carolina',
'Cherokee County, North Carolina',
'Chowan County, North Carolina',
'Clay County, North Carolina',
'Cleveland County, North Carolina',
'Columbus County, North Carolina',
'Craven County, North Carolina',
'Cumberland County, North Carolina',
'Currituck County, North Carolina',
'Dare County, North Carolina',
'Davidson County, North Carolina',
'Davie County, North Carolina',
'Duplin County, North Carolina',
'Durham County, North Carolina',
'Edgecombe County, North Carolina',
'Forsyth County, North Carolina',
'Franklin County, North Carolina',
'Gaston County, North Carolina',
'Gates County, North Carolina',
'Graham County, North Carolina',
'Granville County, North Carolina',
'Greene County, North Carolina',
'Guilford County, North Carolina',
'Halifax County, North Carolina',
'Harnett County, North Carolina',
'Haywood County, North Carolina',
'Henderson County, North Carolina',
'Hertford County, North Carolina',
'Hoke County, North Carolina',
'Hyde County, North Carolina',
'Iredell County, North Carolina',
'Jackson County, North Carolina',
'Johnston County, North Carolina',
'Jones County, North Carolina',
'Lee County, North Carolina',
'Lenoir County, North Carolina',
'Lincoln County, North Carolina',
'McDowell County, North Carolina',
'Macon County, North Carolina',
'Madison County, North Carolina',
'Martin County, North Carolina',
'Mecklenburg County, North Carolina',
'Mitchell County, North Carolina',
'Montgomery County, North Carolina',
'Moore County, North Carolina',
'Nash County, North Carolina',
'New Hanover County, North Carolina',
'Northampton County, North Carolina',
'Onslow County, North Carolina',
'Orange County, North Carolina',
'Pamlico County, North Carolina',
'Pasquotank County, North Carolina',
'Pender County, North Carolina',
'Perquimans County, North Carolina',
'Person County, North Carolina',
'Pitt County, North Carolina',
'Polk County, North Carolina',
'Randolph County, North Carolina',
'Richmond County, North Carolina',
'Robeson County, North Carolina',
'Rockingham County, North Carolina',
'Rowan County, North Carolina',
'Rutherford County, North Carolina',
'Sampson County, North Carolina',
'Scotland County, North Carolina',
'Stanly County, North Carolina',
'Stokes County, North Carolina',
'Surry County, North Carolina',
'Swain County, North Carolina',
'Transylvania County, North Carolina',
'Tyrrell County, North Carolina',
'Union County, North Carolina',
'Vance County, North Carolina',
'Wake County, North Carolina',
'Warren County, North Carolina',
'Washington County, North Carolina',
'Watauga County, North Carolina',
'Wayne County, North Carolina',
'Wilkes County, North Carolina',
'Wilson County, North Carolina',
'Yadkin County, North Carolina',
'Yancey County, North Carolina',
'Adams County, North Dakota',
'Barnes County, North Dakota',
'Benson County, North Dakota',
'Billings County, North Dakota',
'Bottineau County, North Dakota',
'Bowman County, North Dakota',
'Burke County, North Dakota',
'Burleigh County, North Dakota',
'Cass County, North Dakota',
'Cavalier County, | |
'0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
test(123, ',', '123')
test(-123, ',', '-123')
test(1234, ',', '1,234')
test(-1234, ',', '-1,234')
test(123456, ',', '123,456')
test(-123456, ',', '-123,456')
test(1234567, ',', '1,234,567')
test(-1234567, ',', '-1,234,567')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# Unified type for integers
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'n'
self.assertRaises(ValueError, 3 .__format__, ",n")
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# can't have '#' with 'c'
self.assertRaises(ValueError, 3 .__format__, "#c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
# TODO: RUSTPYTHON
@unittest.expectedFailure
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format_string('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format_string('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format_string('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_float__format__(self):
def test(f, format_spec, result):
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(format(f, format_spec), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 3.0 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totally empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate float formatting
test(1.0, '.0e', '1e+00')
test(1.0, '#.0e', '1.e+00')
test(1.0, '.0f', '1')
test(1.0, '#.0f', '1.')
test(1.1, 'g', '1.1')
test(1.1, '#g', '1.10000')
test(1.0, '.0%', '100%')
test(1.0, '#.0%', '100.%')
# Issue 7094: Alternate formatting (specified by #)
test(1.0, '0e', '1.000000e+00')
test(1.0, '#0e', '1.000000e+00')
test(1.0, '0f', '1.000000' )
test(1.0, '#0f', '1.000000')
test(1.0, '.1e', '1.0e+00')
test(1.0, '#.1e', '1.0e+00')
test(1.0, '.1f', '1.0')
test(1.0, '#.1f', '1.0')
test(1.0, '.1%', '100.0%')
test(1.0, '#.1%', '100.0%')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_slot_wrapper_types(self):
self.assertIsInstance(object.__init__, types.WrapperDescriptorType)
self.assertIsInstance(object.__str__, types.WrapperDescriptorType)
self.assertIsInstance(object.__lt__, types.WrapperDescriptorType)
self.assertIsInstance(int.__lt__, types.WrapperDescriptorType)
def test_method_wrapper_types(self):
self.assertIsInstance(object().__init__, types.MethodWrapperType)
self.assertIsInstance(object().__str__, types.MethodWrapperType)
self.assertIsInstance(object().__lt__, types.MethodWrapperType)
self.assertIsInstance((42).__lt__, types.MethodWrapperType)
def test_method_descriptor_types(self):
self.assertIsInstance(str.join, types.MethodDescriptorType)
self.assertIsInstance(list.append, types.MethodDescriptorType)
self.assertIsInstance(''.join, types.BuiltinMethodType)
self.assertIsInstance([].append, types.BuiltinMethodType)
self.assertIsInstance(int.__dict__['from_bytes'], types.ClassMethodDescriptorType)
self.assertIsInstance(int.from_bytes, types.BuiltinMethodType)
self.assertIsInstance(int.__new__, types.BuiltinMethodType)
class MappingProxyTests(unittest.TestCase):
mappingproxy = types.MappingProxyType
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
class userdict(dict):
pass
mapping = {'x': 1, 'y': 2}
self.assertEqual(self.mappingproxy(mapping), mapping)
mapping = userdict(x=1, y=2)
self.assertEqual(self.mappingproxy(mapping), mapping)
mapping = collections.ChainMap({'x': 1}, {'y': 2})
self.assertEqual(self.mappingproxy(mapping), mapping)
self.assertRaises(TypeError, self.mappingproxy, 10)
self.assertRaises(TypeError, self.mappingproxy, ("a", "tuple"))
self.assertRaises(TypeError, self.mappingproxy, ["a", "list"])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_methods(self):
attrs = set(dir(self.mappingproxy({}))) - set(dir(object()))
self.assertEqual(attrs, {
'__contains__',
'__getitem__',
'__class_getitem__',
'__ior__',
'__iter__',
'__len__',
'__or__',
'__reversed__',
'__ror__',
'copy',
'get',
'items',
'keys',
'values',
})
def test_get(self):
view = self.mappingproxy({'a': 'A', 'b': 'B'})
self.assertEqual(view['a'], 'A')
self.assertEqual(view['b'], 'B')
self.assertRaises(KeyError, view.__getitem__, 'xxx')
self.assertEqual(view.get('a'), 'A')
self.assertIsNone(view.get('xxx'))
self.assertEqual(view.get('xxx', 42), 42)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_missing(self):
class dictmissing(dict):
def __missing__(self, key):
return "missing=%s" % key
view = self.mappingproxy(dictmissing(x=1))
self.assertEqual(view['x'], 1)
self.assertEqual(view['y'], 'missing=y')
self.assertEqual(view.get('x'), 1)
self.assertEqual(view.get('y'), None)
self.assertEqual(view.get('y', 42), 42)
self.assertTrue('x' in view)
self.assertFalse('y' in view)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_customdict(self):
class customdict(dict):
def __contains__(self, key):
if key == 'magic':
return True
else:
return dict.__contains__(self, key)
def __iter__(self):
return iter(('iter',))
def __len__(self):
return 500
def copy(self):
return 'copy'
def keys(self):
return 'keys'
def items(self):
return 'items'
def values(self):
return 'values'
def __getitem__(self, key):
return "getitem=%s" % dict.__getitem__(self, key)
def get(self, key, default=None):
return "get=%s" % dict.get(self, key, 'default=%r' % default)
custom = customdict({'key': 'value'})
view = self.mappingproxy(custom)
self.assertTrue('key' in view)
self.assertTrue('magic' in view)
self.assertFalse('xxx' in view)
self.assertEqual(view['key'], 'getitem=value')
self.assertRaises(KeyError, view.__getitem__, 'xxx')
self.assertEqual(tuple(view), ('iter',))
self.assertEqual(len(view), 500)
self.assertEqual(view.copy(), 'copy')
self.assertEqual(view.get('key'), 'get=value')
self.assertEqual(view.get('xxx'), 'get=default=None')
self.assertEqual(view.items(), 'items')
self.assertEqual(view.keys(), 'keys')
self.assertEqual(view.values(), 'values')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_chainmap(self):
d1 = {'x': 1}
d2 = {'y': 2}
mapping = collections.ChainMap(d1, d2)
view = self.mappingproxy(mapping)
self.assertTrue('x' in view)
self.assertTrue('y' in view)
self.assertFalse('z' | |
instance from a saved dictionary representation.
Parameters
----------
d : dict
Returns
-------
LargeMultinomialLogitStep
"""
check_choicemodels_version()
from choicemodels import MultinomialLogitResults
# Pass values from the dictionary to the __init__() method
obj = cls(choosers=d['choosers'], alternatives=d['alternatives'],
model_expression=d['model_expression'], choice_column=d['choice_column'],
chooser_filters=d['chooser_filters'],
chooser_sample_size=d['chooser_sample_size'],
alt_filters=d['alt_filters'], alt_sample_size=d['alt_sample_size'],
out_choosers=d['out_choosers'], out_alternatives=d['out_alternatives'],
out_column=d['out_column'], out_chooser_filters=d['out_chooser_filters'],
out_alt_filters=d['out_alt_filters'],
constrained_choices=d['constrained_choices'], alt_capacity=d['alt_capacity'],
chooser_size=d['chooser_size'], max_iter=d['max_iter'],
mct_intx_ops=d.get('mct_intx_ops', None), name=d['name'],
tags=d['tags'])
# Load model fit data
obj.summary_table = d['summary_table']
obj.fitted_parameters = d['fitted_parameters']
if obj.fitted_parameters is not None:
obj.model = MultinomialLogitResults(model_expression=obj.model_expression,
fitted_parameters=obj.fitted_parameters)
return obj
def to_dict(self):
"""
Create a dictionary representation of the object.
Returns
-------
dict
"""
d = {
'template': self.template,
'template_version': self.template_version,
'name': self.name,
'tags': self.tags,
'choosers': self.choosers,
'alternatives': self.alternatives,
'model_expression': self.model_expression,
'choice_column': self.choice_column,
'chooser_filters': self.chooser_filters,
'chooser_sample_size': self.chooser_sample_size,
'alt_filters': self.alt_filters,
'alt_sample_size': self.alt_sample_size,
'out_choosers': self.out_choosers,
'out_alternatives': self.out_alternatives,
'out_column': self.out_column,
'out_chooser_filters': self.out_chooser_filters,
'out_alt_filters': self.out_alt_filters,
'constrained_choices': self.constrained_choices,
'alt_capacity': self.alt_capacity,
'chooser_size': self.chooser_size,
'max_iter': self.max_iter,
'mct_intx_ops': self.mct_intx_ops,
'summary_table': self.summary_table,
'fitted_parameters': self.fitted_parameters,
}
return d
# TO DO - there has got to be a less verbose way to handle getting and setting
@property
def choosers(self):
return self.__choosers
@choosers.setter
def choosers(self, value):
self.__choosers = self._normalize_table_param(value)
self.send_to_listeners('choosers', value)
@property
def alternatives(self):
return self.__alternatives
@alternatives.setter
def alternatives(self, value):
self.__alternatives = self._normalize_table_param(value)
self.send_to_listeners('alternatives', value)
@property
def model_expression(self):
return self.__model_expression
@model_expression.setter
def model_expression(self, value):
self.__model_expression = value
self.send_to_listeners('model_expression', value)
@property
def choice_column(self):
return self.__choice_column
@choice_column.setter
def choice_column(self, value):
self.__choice_column = value
self.send_to_listeners('choice_column', value)
@property
def chooser_filters(self):
return self.__chooser_filters
@chooser_filters.setter
def chooser_filters(self, value):
self.__chooser_filters = value
self.send_to_listeners('chooser_filters', value)
@property
def chooser_sample_size(self):
return self.__chooser_sample_size
@chooser_sample_size.setter
def chooser_sample_size(self, value):
self.__chooser_sample_size = value
self.send_to_listeners('chooser_sample_size', value)
@property
def alt_filters(self):
return self.__alt_filters
@alt_filters.setter
def alt_filters(self, value):
self.__alt_filters = value
self.send_to_listeners('alt_filters', value)
@property
def alt_sample_size(self):
return self.__alt_sample_size
@alt_sample_size.setter
def alt_sample_size(self, value):
self.__alt_sample_size = value
self.send_to_listeners('alt_sample_size', value)
@property
def out_choosers(self):
return self.__out_choosers
@out_choosers.setter
def out_choosers(self, value):
self.__out_choosers = self._normalize_table_param(value)
self.send_to_listeners('out_choosers', value)
@property
def out_alternatives(self):
return self.__out_alternatives
@out_alternatives.setter
def out_alternatives(self, value):
self.__out_alternatives = self._normalize_table_param(value)
self.send_to_listeners('out_alternatives', value)
@property
def out_column(self):
return self.__out_column
@out_column.setter
def out_column(self, value):
self.__out_column = value
self.send_to_listeners('out_column', value)
@property
def out_chooser_filters(self):
return self.__out_chooser_filters
@out_chooser_filters.setter
def out_chooser_filters(self, value):
self.__out_chooser_filters = value
self.send_to_listeners('out_chooser_filters', value)
@property
def out_alt_filters(self):
return self.__out_alt_filters
@out_alt_filters.setter
def out_alt_filters(self, value):
self.__out_alt_filters = value
self.send_to_listeners('out_alt_filters', value)
@property
def constrained_choices(self):
return self.__constrained_choices
@constrained_choices.setter
def constrained_choices(self, value):
self.__constrained_choices = value
self.send_to_listeners('constrained_choices', value)
@property
def alt_capacity(self):
return self.__alt_capacity
@alt_capacity.setter
def alt_capacity(self, value):
self.__alt_capacity = value
self.send_to_listeners('alt_capacity', value)
@property
def chooser_size(self):
return self.__chooser_size
@chooser_size.setter
def chooser_size(self, value):
self.__chooser_size = value
self.send_to_listeners('chooser_size', value)
@property
def max_iter(self):
return self.__max_iter
@max_iter.setter
def max_iter(self, value):
self.__max_iter = value
self.send_to_listeners('max_iter', value)
@property
def mct_intx_ops(self):
return self.__mct_intx_ops
@mct_intx_ops.setter
def mct_intx_ops(self, value):
self.__mct_intx_ops = value
self.send_to_listeners('mct_intx_ops', value)
def perform_mct_intx_ops(self, mct, nan_handling='zero'):
"""
Method to dynamically update a MergedChoiceTable object according to
a pre-defined set of operations specified in the model .yaml config.
Operations are performed sequentially as follows: 1) Pandas merges
with other Orca tables; 2) Pandas group-by aggregations; 3) rename
existing columns; 4) create new columns via Pandas `eval()`.
Parameters
----------
mct : choicemodels.tools.MergedChoiceTable
nan_handling : str
Either 'zero' or 'drop', where the former will replace all NaN's
and None's with 0 integers and the latter will drop all rows with
any NaN or Null values.
Returns
-------
MergedChoiceTable
"""
intx_ops = self.mct_intx_ops
mct_df = mct.to_frame()
og_mct_index = mct_df.index.names
mct_df.reset_index(inplace=True)
mct_df.index.name = 'mct_index'
# merges
intx_df = mct_df.copy()
for merge_args in intx_ops.get('successive_merges', []):
# make sure mct index is preserved during merge
left_cols = merge_args.get('mct_cols', intx_df.columns)
left_idx = merge_args.get('left_index', False)
if intx_df.index.name == mct_df.index.name:
if not left_idx:
intx_df.reset_index(inplace=True)
if mct_df.index.name not in left_cols:
left_cols += [mct_df.index.name]
elif mct_df.index.name in intx_df.columns:
if mct_df.index.name not in left_cols:
left_cols += [mct_df.index.name]
else:
raise KeyError(
'Column {0} must be preserved in intx ops!'.format(
mct_df.index.name))
left = intx_df[left_cols]
right = get_data(
merge_args['right_table'],
extra_columns=merge_args.get('right_cols', None))
intx_df = pd.merge(
left, right,
how=merge_args.get('how', 'inner'),
on=merge_args.get('on_cols', None),
left_on=merge_args.get('left_on', None),
right_on=merge_args.get('right_on', None),
left_index=left_idx,
right_index=merge_args.get('right_index', False),
suffixes=merge_args.get('suffixes', ('_x', '_y')))
# aggs
aggs = intx_ops.get('aggregations', False)
if aggs:
intx_df = intx_df.groupby('mct_index').agg(aggs)
# rename cols
if intx_ops.get('rename_cols', False):
intx_df = intx_df.rename(
columns=intx_ops['rename_cols'])
# update mct
mct_df = pd.merge(mct_df, intx_df, on='mct_index')
# create new cols from expressions
for eval_op in intx_ops.get('sequential_eval_ops', []):
new_col = eval_op['name']
expr = eval_op['expr']
engine = eval_op.get('engine', 'numexpr')
mct_df[new_col] = mct_df.eval(expr, engine=engine)
# restore original mct index
mct_df.set_index(og_mct_index, inplace=True)
# handle NaNs and Nones
if mct_df.isna().values.any():
if nan_handling == 'zero':
print("Replacing MCT None's and NaN's with 0")
mct_df = mct_df.fillna(0)
elif nan_handling == 'drop':
print("Dropping rows with None's/NaN's from MCT")
mct_df = mct_df.dropna(axis=0)
return MergedChoiceTable.from_df(mct_df)
def fit(self, mct=None):
"""
Fit the model; save and report results. This uses the ChoiceModels estimation
engine (originally from UrbanSim MNL).
The `fit()` method can be run as many times as desired. Results will not be saved
with Orca or ModelManager until the `register()` method is run.
After sampling alternatives for each chooser, the merged choice table is saved to
the class object for diagnostic use (`mergedchoicetable` with type
choicemodels.tools.MergedChoiceTable).
Parameters
----------
mct : choicemodels.tools.MergedChoiceTable
This parameter is a temporary backdoor allowing us to pass in a more
complicated choice table than can be generated within the template, for
example including sampling weights or interaction terms.
Returns
-------
None
"""
check_choicemodels_version()
from choicemodels import MultinomialLogit
from choicemodels.tools import MergedChoiceTable
if (mct is not None):
df_from_mct = mct.to_frame()
idx_names = df_from_mct.index.names
df_from_mct = df_from_mct.reset_index()
df_from_mct = apply_filter_query(
df_from_mct, self.chooser_filters).set_index(idx_names)
mct = MergedChoiceTable.from_df(df_from_mct)
else:
observations = get_data(tables=self.choosers,
filters=self.chooser_filters,
model_expression=self.model_expression,
extra_columns=self.choice_column)
if (self.chooser_sample_size is not None):
observations = observations.sample(self.chooser_sample_size)
alternatives = get_data(tables=self.alternatives,
filters=self.alt_filters,
model_expression=self.model_expression)
mct = MergedChoiceTable(observations=observations,
alternatives=alternatives,
chosen_alternatives=self.choice_column,
sample_size=self.alt_sample_size)
model = MultinomialLogit(data=mct,
model_expression=self.model_expression)
results = model.fit()
self.name = self._generate_name()
self.summary_table = str(results)
print(self.summary_table)
coefs = results.get_raw_results()['fit_parameters']['Coefficient']
self.fitted_parameters = coefs.tolist()
self.model = results
# Save merged choice table to the class object for diagnostics
self.mergedchoicetable = mct
def run(self, chooser_batch_size=None, interaction_terms=None):
"""
Run the model step: simulate choices and use them to update an Orca column.
The simulated choices are saved to the class object for diagnostics. If choices
are unconstrained, the choice table and the probabilities of sampled alternatives
are saved as well.
Parameters
----------
chooser_batch_size : int
This parameter gets passed to
choicemodels.tools.simulation.iterative_lottery_choices and is a temporary
workaround for dealing with memory issues that arise from generating massive
merged choice tables for simulations that involve large numbers of choosers,
large numbers of alternatives, and large numbers of predictors. It allows the
user to specify a batch size for simulating choices one chunk at a time.
interaction_terms : pandas.Series, pandas.DataFrame, or list of either, optional
Additional column(s) of interaction terms whose values depend on the
combination of observation and alternative, to be merged onto the final data
table. If passed as a Series or DataFrame, it should include a two-level
MultiIndex. One level's name and values should match an index or column from
the observations table, and the other should match an index or column from the
alternatives table.
Returns
-------
None
"""
check_choicemodels_version()
from choicemodels import MultinomialLogit
from choicemodels.tools import (MergedChoiceTable, monte_carlo_choices,
iterative_lottery_choices)
# Clear simulation attributes from the class object
self.mergedchoicetable = None
self.probabilities = None
self.choices = None
if interaction_terms is not None:
uniq_intx_idx_names = set([
idx for intx in interaction_terms for idx in intx.index.names])
obs_extra_cols = to_list(self.chooser_size) + \
list(uniq_intx_idx_names)
alts_extra_cols = to_list(
self.alt_capacity) + list(uniq_intx_idx_names)
else:
obs_extra_cols = to_list(self.chooser_size)
alts_extra_cols = to_list(self.alt_capacity)
# get any necessary extra columns from the mct intx operations spec
if self.mct_intx_ops:
intx_extra_obs_cols = self.mct_intx_ops.get('extra_obs_cols', [])
intx_extra_obs_cols = to_list(intx_extra_obs_cols)
obs_extra_cols += intx_extra_obs_cols
intx_extra_alts_cols = self.mct_intx_ops.get('extra_alts_cols', [])
intx_extra_alts_cols = to_list(intx_extra_alts_cols)
alts_extra_cols += intx_extra_alts_cols
observations = get_data(tables=self.out_choosers,
fallback_tables=self.choosers,
filters=self.out_chooser_filters,
model_expression=self.model_expression,
extra_columns=obs_extra_cols)
if len(observations) == 0:
print("No valid choosers")
return
alternatives = get_data(tables=self.out_alternatives,
fallback_tables=self.alternatives,
filters=self.out_alt_filters,
model_expression=self.model_expression,
extra_columns=alts_extra_cols)
if len(alternatives) == 0:
print("No valid alternatives")
return
# Remove filter columns before merging, in case column names overlap
expr_cols = columns_in_formula(self.model_expression)
obs_cols = set(observations.columns) & set(
expr_cols + to_list(obs_extra_cols))
observations = observations[list(obs_cols)]
alt_cols = set(alternatives.columns) & set(
expr_cols + to_list(alts_extra_cols))
alternatives | |
executed when the task runs.
:param pulumi.Input[str] user_task_managed_initial_warehouse_size: Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse)
:param pulumi.Input[int] user_task_timeout_ms: Specifies the time limit on a single run of the task before it times out (in milliseconds).
:param pulumi.Input[str] warehouse: The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user*task*managed*initial*warehouse_size)
:param pulumi.Input[str] when: Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.
"""
if after is not None:
pulumi.set(__self__, "after", after)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if database is not None:
pulumi.set(__self__, "database", database)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if error_integration is not None:
pulumi.set(__self__, "error_integration", error_integration)
if name is not None:
pulumi.set(__self__, "name", name)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if session_parameters is not None:
pulumi.set(__self__, "session_parameters", session_parameters)
if sql_statement is not None:
pulumi.set(__self__, "sql_statement", sql_statement)
if user_task_managed_initial_warehouse_size is not None:
pulumi.set(__self__, "user_task_managed_initial_warehouse_size", user_task_managed_initial_warehouse_size)
if user_task_timeout_ms is not None:
pulumi.set(__self__, "user_task_timeout_ms", user_task_timeout_ms)
if warehouse is not None:
pulumi.set(__self__, "warehouse", warehouse)
if when is not None:
pulumi.set(__self__, "when", when)
@property
@pulumi.getter
def after(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the predecessor task in the same database and schema of the current task. When a run of the predecessor task finishes successfully, it triggers this task (after a brief lag). (Conflict with schedule)
"""
return pulumi.get(self, "after")
@after.setter
def after(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "after", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a comment for the task.
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The database in which to create the task.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if the task should be started (enabled) after creation or should remain suspended (default).
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="errorIntegration")
def error_integration(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the notification integration used for error notifications.
"""
return pulumi.get(self, "error_integration")
@error_integration.setter
def error_integration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "error_integration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the task; must be unique for the database and schema in which the task is created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def schedule(self) -> Optional[pulumi.Input[str]]:
"""
The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after)
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The schema in which to create the task.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter(name="sessionParameters")
def session_parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Specifies session parameters to set for the session when the task runs. A task supports all session parameters.
"""
return pulumi.get(self, "session_parameters")
@session_parameters.setter
def session_parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "session_parameters", value)
@property
@pulumi.getter(name="sqlStatement")
def sql_statement(self) -> Optional[pulumi.Input[str]]:
"""
Any single SQL statement, or a call to a stored procedure, executed when the task runs.
"""
return pulumi.get(self, "sql_statement")
@sql_statement.setter
def sql_statement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_statement", value)
@property
@pulumi.getter(name="userTaskManagedInitialWarehouseSize")
def user_task_managed_initial_warehouse_size(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse)
"""
return pulumi.get(self, "user_task_managed_initial_warehouse_size")
@user_task_managed_initial_warehouse_size.setter
def user_task_managed_initial_warehouse_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_task_managed_initial_warehouse_size", value)
@property
@pulumi.getter(name="userTaskTimeoutMs")
def user_task_timeout_ms(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the time limit on a single run of the task before it times out (in milliseconds).
"""
return pulumi.get(self, "user_task_timeout_ms")
@user_task_timeout_ms.setter
def user_task_timeout_ms(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_task_timeout_ms", value)
@property
@pulumi.getter
def warehouse(self) -> Optional[pulumi.Input[str]]:
"""
The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user*task*managed*initial*warehouse_size)
"""
return pulumi.get(self, "warehouse")
@warehouse.setter
def warehouse(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "warehouse", value)
@property
@pulumi.getter
def when(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.
"""
return pulumi.get(self, "when")
@when.setter
def when(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "when", value)
class Task(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
after: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
database: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
error_integration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
schedule: Optional[pulumi.Input[str]] = None,
schema: Optional[pulumi.Input[str]] = None,
session_parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
sql_statement: Optional[pulumi.Input[str]] = None,
user_task_managed_initial_warehouse_size: Optional[pulumi.Input[str]] = None,
user_task_timeout_ms: Optional[pulumi.Input[int]] = None,
warehouse: Optional[pulumi.Input[str]] = None,
when: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
task = snowflake.Task("task",
comment="my task",
database="db",
schema="schema",
warehouse="warehouse",
schedule="10 MINUTE",
sql_statement="select * from foo;",
session_parameters={
"foo": "bar",
},
user_task_timeout_ms=10000,
after="preceding_task",
when="foo AND bar",
enabled=True)
serverless_task = snowflake.Task("serverlessTask",
comment="my serverless task",
database="db",
schema="schema",
schedule="10 MINUTE",
sql_statement="select * from foo;",
session_parameters={
"foo": "bar",
},
user_task_timeout_ms=10000,
user_task_managed_initial_warehouse_size="XSMALL",
after="preceding_task",
when="foo AND bar",
enabled=True)
```
## Import
# format is database name | schema name | task name
```sh
$ pulumi import snowflake:index/task:Task example 'dbName|schemaName|taskName'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] after: Specifies the predecessor task in the same database and schema of the current task. When a run of the predecessor task finishes successfully, it triggers this task (after a brief lag). (Conflict with schedule)
:param pulumi.Input[str] comment: Specifies a comment for the task.
:param pulumi.Input[str] database: The database in which to create the task.
:param pulumi.Input[bool] enabled: Specifies if the task should be started (enabled) after creation or should remain suspended (default).
:param pulumi.Input[str] error_integration: Specifies the name of the notification integration used for error notifications.
:param pulumi.Input[str] name: Specifies the identifier for the task; must be unique for the database and schema in which the task is created.
:param pulumi.Input[str] schedule: The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after)
:param pulumi.Input[str] schema: The schema in which to create the task.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] session_parameters: Specifies session parameters to set for the session when the task runs. A task supports all session parameters.
:param pulumi.Input[str] sql_statement: Any single SQL statement, or a call to a stored procedure, executed when the task runs.
:param pulumi.Input[str] user_task_managed_initial_warehouse_size: Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse)
:param pulumi.Input[int] user_task_timeout_ms: Specifies the time limit on a single run of the task before it times out (in milliseconds).
:param pulumi.Input[str] warehouse: The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user*task*managed*initial*warehouse_size)
:param pulumi.Input[str] when: Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TaskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
task = snowflake.Task("task",
comment="my task",
database="db",
schema="schema",
warehouse="warehouse",
schedule="10 MINUTE",
sql_statement="select * from foo;",
session_parameters={
"foo": "bar",
},
user_task_timeout_ms=10000,
after="preceding_task",
when="foo AND bar",
enabled=True)
serverless_task = snowflake.Task("serverlessTask",
comment="my serverless task",
database="db",
schema="schema",
schedule="10 MINUTE",
sql_statement="select * from foo;",
session_parameters={
"foo": "bar",
},
user_task_timeout_ms=10000,
user_task_managed_initial_warehouse_size="XSMALL",
after="preceding_task",
when="foo AND bar",
enabled=True)
| |
import math
import torch
import torch.nn as nn
import torchvision
from . import block as B
from . import spectral_norm as SN
####################
# Generator
####################
class SRResNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='batch', act_type='relu', \
mode='NAC', res_scale=1, upsample_mode='upconv'):
super(SRResNet, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
resnet_blocks = [B.ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\
mode=mode, res_scale=res_scale) for _ in range(nb)]
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
if upsample_mode == 'upconv':
upsample_block = B.upconv_blcok
elif upsample_mode == 'pixelshuffle':
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
else:
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*resnet_blocks, LR_conv)),\
*upsampler, HR_conv0, HR_conv1)
def forward(self, x):
x = self.model(x)
return x
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, \
act_type='leakyrelu', mode='CNA', upsample_mode='upconv'):
super(RRDBNet, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)]
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
if upsample_mode == 'upconv':
upsample_block = B.upconv_blcok
elif upsample_mode == 'pixelshuffle':
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
else:
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\
*upsampler, HR_conv0, HR_conv1)
def forward(self, x):
x = self.model(x)
return x
####################
# Discriminator
####################
# VGG style Discriminator with input size 128*128
class Discriminator_VGG_128(nn.Module):
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_128, self).__init__()
# features
# hxw, c
# 128, 64
conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 64, 64
conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 32, 128
conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 16, 256
conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 8, 512
conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 4, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\
conv9)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 4 * 4, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# VGG style Discriminator with input size 128*128, Spectral Normalization
class Discriminator_VGG_128_SN(nn.Module):
def __init__(self):
super(Discriminator_VGG_128_SN, self).__init__()
# features
# hxw, c
# 128, 64
self.lrelu = nn.LeakyReLU(0.2, True)
self.conv0 = SN.spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))
self.conv1 = SN.spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))
# 64, 64
self.conv2 = SN.spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))
self.conv3 = SN.spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))
# 32, 128
self.conv4 = SN.spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))
self.conv5 = SN.spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))
# 16, 256
self.conv6 = SN.spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))
self.conv7 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 8, 512
self.conv8 = SN.spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))
self.conv9 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 4, 512
# classifier
self.linear0 = SN.spectral_norm(nn.Linear(512 * 4 * 4, 100))
self.linear1 = SN.spectral_norm(nn.Linear(100, 1))
def forward(self, x):
x = self.lrelu(self.conv0(x))
x = self.lrelu(self.conv1(x))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.lrelu(self.conv4(x))
x = self.lrelu(self.conv5(x))
x = self.lrelu(self.conv6(x))
x = self.lrelu(self.conv7(x))
x = self.lrelu(self.conv8(x))
x = self.lrelu(self.conv9(x))
x = x.view(x.size(0), -1)
x = self.lrelu(self.linear0(x))
x = self.linear1(x)
return x
class Discriminator_VGG_96(nn.Module):
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_96, self).__init__()
# features
# hxw, c
# 96, 64
conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 48, 64
conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 24, 128
conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 12, 256
conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 6, 512
conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 3, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\
conv9)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Discriminator_VGG_192(nn.Module):
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_192, self).__init__()
# features
# hxw, c
# 192, 64
conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \
mode=mode)
conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 96, 64
conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 48, 128
conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 24, 256
conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 12, 512
conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 6, 512
conv10 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \
act_type=act_type, mode=mode)
conv11 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \
act_type=act_type, mode=mode)
# 3, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\
conv9, conv10, conv11)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
####################
# Perceptual Network
####################
# Assume input range is [0, 1]
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
# Assume input range is [0, 1]
class ResNet101FeatureExtractor(nn.Module):
def __init__(self, use_input_norm=True, device=torch.device('cpu')):
super(ResNet101FeatureExtractor, self).__init__()
model = torchvision.models.resnet101(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.children())[:8])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
class MINCNet(nn.Module):
def __init__(self):
super(MINCNet, self).__init__()
self.ReLU = nn.ReLU(True)
self.conv11 = nn.Conv2d(3, 64, 3, 1, 1)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 1)
self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv21 = nn.Conv2d(64, 128, 3, 1, 1)
self.conv22 = nn.Conv2d(128, 128, 3, 1, 1)
self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv31 = nn.Conv2d(128, 256, 3, 1, 1)
self.conv32 = nn.Conv2d(256, 256, 3, 1, 1)
self.conv33 = nn.Conv2d(256, 256, 3, 1, 1)
self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)
self.conv41 = nn.Conv2d(256, 512, 3, | |
<filename>paddlespeech/text/speechtask/punctuation_restoration/modules/crf.py<gh_stars>0
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
__all__ = ['CRF']
class CRF(nn.Layer):
"""
Linear-chain Conditional Random Field (CRF).
Args:
nb_labels (int): number of labels in your tagset, including special symbols.
bos_tag_id (int): integer representing the beginning of sentence symbol in
your tagset.
eos_tag_id (int): integer representing the end of sentence symbol in your tagset.
pad_tag_id (int, optional): integer representing the pad symbol in your tagset.
If None, the model will treat the PAD as a normal tag. Otherwise, the model
will apply constraints for PAD transitions.
batch_first (bool): Whether the first dimension represents the batch dimension.
"""
def __init__(self,
nb_labels: int,
bos_tag_id: int,
eos_tag_id: int,
pad_tag_id: int=None,
batch_first: bool=True):
super().__init__()
self.nb_labels = nb_labels
self.BOS_TAG_ID = bos_tag_id
self.EOS_TAG_ID = eos_tag_id
self.PAD_TAG_ID = pad_tag_id
self.batch_first = batch_first
# initialize transitions from a random uniform distribution between -0.1 and 0.1
self.transitions = self.create_parameter(
[self.nb_labels, self.nb_labels],
default_initializer=nn.initializer.Uniform(-0.1, 0.1))
self.init_weights()
def init_weights(self):
# enforce contraints (rows=from, columns=to) with a big negative number
# so exp(-10000) will tend to zero
# no transitions allowed to the beginning of sentence
self.transitions[:, self.BOS_TAG_ID] = -10000.0
# no transition alloed from the end of sentence
self.transitions[self.EOS_TAG_ID, :] = -10000.0
if self.PAD_TAG_ID is not None:
# no transitions from padding
self.transitions[self.PAD_TAG_ID, :] = -10000.0
# no transitions to padding
self.transitions[:, self.PAD_TAG_ID] = -10000.0
# except if the end of sentence is reached
# or we are already in a pad position
self.transitions[self.PAD_TAG_ID, self.EOS_TAG_ID] = 0.0
self.transitions[self.PAD_TAG_ID, self.PAD_TAG_ID] = 0.0
def forward(self,
emissions: paddle.Tensor,
tags: paddle.Tensor,
mask: paddle.Tensor=None) -> paddle.Tensor:
"""Compute the negative log-likelihood. See `log_likelihood` method."""
nll = -self.log_likelihood(emissions, tags, mask=mask)
return nll
def log_likelihood(self, emissions, tags, mask=None):
"""Compute the probability of a sequence of tags given a sequence of
emissions scores.
Args:
emissions (paddle.Tensor): Sequence of emissions for each label.
Shape of (batch_size, seq_len, nb_labels) if batch_first is True,
(seq_len, batch_size, nb_labels) otherwise.
tags (paddle.LongTensor): Sequence of labels.
Shape of (batch_size, seq_len) if batch_first is True,
(seq_len, batch_size) otherwise.
mask (paddle.FloatTensor, optional): Tensor representing valid positions.
If None, all positions are considered valid.
Shape of (batch_size, seq_len) if batch_first is True,
(seq_len, batch_size) otherwise.
Returns:
paddle.Tensor: sum of the log-likelihoods for each sequence in the batch.
Shape of ()
"""
# fix tensors order by setting batch as the first dimension
if not self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
if mask is None:
mask = paddle.ones(emissions.shape[:2], dtype=paddle.float)
scores = self._compute_scores(emissions, tags, mask=mask)
partition = self._compute_log_partition(emissions, mask=mask)
return paddle.sum(scores - partition)
def decode(self, emissions, mask=None):
"""Find the most probable sequence of labels given the emissions using
the Viterbi algorithm.
Args:
emissions (paddle.Tensor): Sequence of emissions for each label.
Shape (batch_size, seq_len, nb_labels) if batch_first is True,
(seq_len, batch_size, nb_labels) otherwise.
mask (paddle.FloatTensor, optional): Tensor representing valid positions.
If None, all positions are considered valid.
Shape (batch_size, seq_len) if batch_first is True,
(seq_len, batch_size) otherwise.
Returns:
paddle.Tensor: the viterbi score for the for each batch.
Shape of (batch_size,)
list of lists: the best viterbi sequence of labels for each batch. [B, T]
"""
# fix tensors order by setting batch as the first dimension
if not self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
if mask is None:
mask = paddle.ones(emissions.shape[:2], dtype=paddle.float)
scores, sequences = self._viterbi_decode(emissions, mask)
return scores, sequences
def _compute_scores(self, emissions, tags, mask):
"""Compute the scores for a given batch of emissions with their tags.
Args:
emissions (paddle.Tensor): (batch_size, seq_len, nb_labels)
tags (Paddle.LongTensor): (batch_size, seq_len)
mask (Paddle.FloatTensor): (batch_size, seq_len)
Returns:
paddle.Tensor: Scores for each batch.
Shape of (batch_size,)
"""
batch_size, seq_length = tags.shape
scores = paddle.zeros([batch_size])
# save first and last tags to be used later
first_tags = tags[:, 0]
last_valid_idx = mask.int().sum(1) - 1
# TODO(<NAME>): not support fancy index.
# last_tags = tags.gather(last_valid_idx.unsqueeze(1), axis=1).squeeze()
batch_idx = paddle.arange(batch_size, dtype=last_valid_idx.dtype)
gather_last_valid_idx = paddle.stack(
[batch_idx, last_valid_idx], axis=-1)
last_tags = tags.gather_nd(gather_last_valid_idx)
# add the transition from BOS to the first tags for each batch
# t_scores = self.transitions[self.BOS_TAG_ID, first_tags]
t_scores = self.transitions[self.BOS_TAG_ID].gather(first_tags)
# add the [unary] emission scores for the first tags for each batch
# for all batches, the first word, see the correspondent emissions
# for the first tags (which is a list of ids):
# emissions[:, 0, [tag_1, tag_2, ..., tag_nblabels]]
# e_scores = emissions[:, 0].gather(1, first_tags.unsqueeze(1)).squeeze()
gather_first_tags_idx = paddle.stack([batch_idx, first_tags], axis=-1)
e_scores = emissions[:, 0].gather_nd(gather_first_tags_idx)
# the scores for a word is just the sum of both scores
scores += e_scores + t_scores
# now lets do this for each remaining word
for i in range(1, seq_length):
# we could: iterate over batches, check if we reached a mask symbol
# and stop the iteration, but vecotrizing is faster due to gpu,
# so instead we perform an element-wise multiplication
is_valid = mask[:, i]
previous_tags = tags[:, i - 1]
current_tags = tags[:, i]
# calculate emission and transition scores as we did before
# e_scores = emissions[:, i].gather(1, current_tags.unsqueeze(1)).squeeze()
gather_current_tags_idx = paddle.stack(
[batch_idx, current_tags], axis=-1)
e_scores = emissions[:, i].gather_nd(gather_current_tags_idx)
# t_scores = self.transitions[previous_tags, current_tags]
gather_transitions_idx = paddle.stack(
[previous_tags, current_tags], axis=-1)
t_scores = self.transitions.gather_nd(gather_transitions_idx)
# apply the mask
e_scores = e_scores * is_valid
t_scores = t_scores * is_valid
scores += e_scores + t_scores
# add the transition from the end tag to the EOS tag for each batch
# scores += self.transitions[last_tags, self.EOS_TAG_ID]
scores += self.transitions.gather(last_tags)[:, self.EOS_TAG_ID]
return scores
def _compute_log_partition(self, emissions, mask):
"""Compute the partition function in log-space using the forward-algorithm.
Args:
emissions (paddle.Tensor): (batch_size, seq_len, nb_labels)
mask (Paddle.FloatTensor): (batch_size, seq_len)
Returns:
paddle.Tensor: the partition scores for each batch.
Shape of (batch_size,)
"""
batch_size, seq_length, nb_labels = emissions.shape
# in the first iteration, BOS will have all the scores
alphas = self.transitions[self.BOS_TAG_ID, :].unsqueeze(
0) + emissions[:, 0]
for i in range(1, seq_length):
# (bs, nb_labels) -> (bs, 1, nb_labels)
e_scores = emissions[:, i].unsqueeze(1)
# (nb_labels, nb_labels) -> (bs, nb_labels, nb_labels)
t_scores = self.transitions.unsqueeze(0)
# (bs, nb_labels) -> (bs, nb_labels, 1)
a_scores = alphas.unsqueeze(2)
scores = e_scores + t_scores + a_scores
new_alphas = paddle.logsumexp(scores, axis=1)
# set alphas if the mask is valid, otherwise keep the current values
is_valid = mask[:, i].unsqueeze(-1)
alphas = is_valid * new_alphas + (1 - is_valid) * alphas
# add the scores for the final transition
last_transition = self.transitions[:, self.EOS_TAG_ID]
end_scores = alphas + last_transition.unsqueeze(0)
# return a *log* of sums of exps
return paddle.logsumexp(end_scores, axis=1)
def _viterbi_decode(self, emissions, mask):
"""Compute the viterbi algorithm to find the most probable sequence of labels
given a sequence of emissions.
Args:
emissions (paddle.Tensor): (batch_size, seq_len, nb_labels)
mask (Paddle.FloatTensor): (batch_size, seq_len)
Returns:
paddle.Tensor: the viterbi score for the for each batch.
Shape of (batch_size,)
list of lists of ints: the best viterbi sequence of labels for each batch
"""
batch_size, seq_length, nb_labels = emissions.shape
# in the first iteration, BOS will have all the scores and then, the max
alphas = self.transitions[self.BOS_TAG_ID, :].unsqueeze(
0) + emissions[:, 0]
backpointers = []
for i in range(1, seq_length):
# (bs, nb_labels) -> (bs, 1, nb_labels)
e_scores = emissions[:, i].unsqueeze(1)
# (nb_labels, nb_labels) -> (bs, nb_labels, nb_labels)
t_scores = self.transitions.unsqueeze(0)
# (bs, nb_labels) -> (bs, nb_labels, 1)
a_scores = alphas.unsqueeze(2)
# combine current scores with previous alphas
scores = e_scores + t_scores + a_scores
# so far is exactly like the forward algorithm,
# but now, instead of calculating the logsumexp,
# we will find the highest score and the tag associated | |
self.body.default_name else names
# Replace commas in xephem string with tildes to avoid clashes with main structure
fields += [self.body.to_edb(edb_names).replace(',', '~')]
if fluxinfo:
fields += [fluxinfo]
return ', '.join(fields)
@classmethod
def from_description(cls, description):
"""Construct Target object from description string.
For more information on the description string format, see the help string
for :class:`Target`.
Parameters
----------
description : str
String containing target name(s), tags, location and flux model
Returns
-------
target : :class:`Target`
Constructed target object
Raises
------
ValueError
If *description* has the wrong format
"""
prefix = f"Target description '{description}'"
try:
description.encode('ascii')
except UnicodeError as err:
raise NonAsciiError(f"{prefix} contains non-ASCII characters") from err
fields = [s.strip() for s in description.split(',')]
if len(fields) < 2:
raise ValueError(f"{prefix} must have at least two fields")
# Check if first name starts with body type tag, while the next field does not
# This indicates a missing names field -> add an empty name list in front
body_types = ['azel', 'radec', 'gal', 'special', 'tle', 'xephem']
def tags_in(field): return any([field.startswith(s) for s in body_types])
if tags_in(fields[0]) and not tags_in(fields[1]):
fields.insert(0, '')
# Extract preferred name from name list (starred or first entry), and make the rest aliases
name_field = fields.pop(0)
names = [s.strip() for s in name_field.split('|')]
if len(names) == 0:
preferred_name, aliases = '', []
else:
try:
ind = [name.startswith('*') for name in names].index(True)
preferred_name, aliases = names[ind][1:], names[:ind] + names[ind + 1:]
except ValueError:
preferred_name, aliases = names[0], names[1:]
tag_field = fields.pop(0)
tags = [s.strip() for s in tag_field.split(' ')]
if not tags:
raise ValueError(f"{prefix} needs at least one tag (body type)")
body_type = tags.pop(0).lower()
# Remove empty fields starting from the end (useful when parsing CSV files with fixed number of fields)
while fields and not fields[-1]:
fields.pop()
# Create appropriate Body based on body type
if body_type == 'azel':
if len(fields) < 2:
raise ValueError(f"{prefix} contains *azel* body with no (az, el) coordinates")
az = fields.pop(0)
el = fields.pop(0)
body = StationaryBody(az, el)
elif body_type == 'radec':
if len(fields) < 2:
raise ValueError(f"{prefix} contains *radec* body with no (ra, dec) coordinates")
ra = to_angle(fields.pop(0), sexagesimal_unit=u.hour)
dec = to_angle(fields.pop(0))
# Extract epoch info from tags
if ('B1900' in tags) or ('b1900' in tags):
frame = FK4(equinox=Time(1900.0, format='byear'))
elif ('B1950' in tags) or ('b1950' in tags):
frame = FK4(equinox=Time(1950.0, format='byear'))
else:
frame = ICRS
body = FixedBody(SkyCoord(ra=ra, dec=dec, frame=frame))
elif body_type == 'gal':
if len(fields) < 2:
raise ValueError(f"{prefix} contains *gal* body with no (l, b) coordinates")
gal_l = to_angle(fields.pop(0))
gal_b = to_angle(fields.pop(0))
body = GalacticBody(SkyCoord(l=gal_l, b=gal_b, frame=Galactic))
elif body_type == 'tle':
if len(fields) < 2:
raise ValueError(f"{prefix} contains *tle* body without "
"the expected two comma-separated lines")
line1 = fields.pop(0)
line2 = fields.pop(0)
try:
body = EarthSatelliteBody.from_tle(line1, line2)
except ValueError as err:
raise ValueError(f"{prefix} contains malformed *tle* body: {err}") from err
elif body_type == 'special':
try:
if preferred_name.capitalize() != 'Nothing':
body = SolarSystemBody(preferred_name)
else:
body = NullBody()
except ValueError as err:
raise ValueError(f"{prefix} contains unknown "
f"*special* body '{preferred_name}'") from err
elif body_type == 'xephem':
if len(fields) < 1:
raise ValueError(f"Target description '{description}' contains *xephem* body "
"without EDB string")
edb_string = fields.pop(0).replace('~', ',')
edb_name_field, comma, edb_coord_fields = edb_string.partition(',')
edb_names = [name.strip() for name in edb_name_field.split('|')]
if not preferred_name:
preferred_name = edb_names[0]
for edb_name in edb_names:
if edb_name and edb_name != preferred_name and edb_name not in aliases:
aliases.append(edb_name)
try:
body = Body.from_edb(comma + edb_coord_fields)
except ValueError as err:
raise ValueError(f"{prefix} contains malformed *xephem* body: {err}") from err
else:
raise ValueError(f"{prefix} contains unknown body type '{body_type}'")
# Extract flux model if it is available
if fields and fields[0].strip(' ()'):
flux_model = FluxDensityModel.from_description(fields[0])
else:
flux_model = None
return cls(body, preferred_name, tags, aliases, flux_model)
@classmethod
def from_azel(cls, az, el):
"""Create unnamed stationary target (*azel* body type).
Parameters
----------
az, el : :class:`~astropy.coordinates.Angle` or equivalent, string or float
Azimuth and elevation, as anything accepted by `Angle`, in 'D:M:S' or
decimal degree string format, or as a float in radians
Returns
-------
target : :class:`Target`
Constructed target object
"""
return cls(StationaryBody(az, el))
@classmethod
def from_radec(cls, ra, dec):
"""Create unnamed fixed target (*radec* body type, ICRS frame).
Parameters
----------
ra : :class:`~astropy.coordinates.Angle` or equivalent, string or float
Right ascension, as anything accepted by `Angle`, in 'H:M:S' or
decimal degree string format, or as a float in radians
dec : :class:`~astropy.coordinates.Angle` or equivalent, string or float
Declination, as anything accepted by `Angle`, in 'D:M:S' or decimal
degree string format, or as a float in radians
Returns
-------
target : :class:`Target`
Constructed target object
"""
ra = to_angle(ra, sexagesimal_unit=u.hour)
dec = to_angle(dec)
return cls(FixedBody(SkyCoord(ra=ra, dec=dec, frame=ICRS)))
def add_tags(self, tags):
"""Add tags to target object.
This adds tags to a target, while checking the sanity of the tags. It
also prevents duplicate tags without resorting to a tag set, which would
be problematic since the tag order is meaningful (tags[0] is the body
type). Since tags should not contain whitespace, any string consisting of
whitespace-delimited words will be split into separate tags.
Parameters
----------
tags : str, list of str, or None
Tag or list of tags to add (strings will be split on whitespace)
Returns
-------
target : :class:`Target`
Updated target object
"""
if tags is None:
tags = []
if isinstance(tags, str):
tags = [tags]
for tag_str in tags:
for tag in tag_str.split():
if tag not in self.tags:
self.user_tags.append(tag)
return self
def _astropy_funnel(self, timestamp, antenna):
"""Turn time and location objects into their Astropy equivalents.
Parameters
----------
timestamp : :class:`~astropy.time.Time`, :class:`Timestamp` or equivalent
Timestamp(s) in katpoint or Astropy format
antenna : :class:`~astropy.coordinates.EarthLocation`, :class:`Antenna` or None
Antenna location(s) in katpoint or Astropy format (None uses default antenna)
Returns
-------
time : :class:`~astropy.time.Time`
Timestamp(s) in Astropy format
location : :class:`~astropy.coordinates.EarthLocation` or None
Antenna location(s) in Astropy format
"""
time = Timestamp(timestamp).time
if antenna is None:
antenna = self.antenna
location = antenna.location if isinstance(antenna, Antenna) else antenna
return time, location
def _valid_antenna(self, antenna):
"""Set default antenna if unspecified and check that antenna is valid.
If `antenna` is `None`, it is replaced by the default antenna for the
target (which could also be `None`, raising a :class:`ValueError`).
Parameters
----------
antenna : :class:`~astropy.coordinates.EarthLocation`, :class:`Antenna` or None
Antenna which points at target (or equivalent Astropy location)
Returns
-------
antenna : :class:`Antenna`
A valid katpoint Antenna
Raises
------
ValueError
If both `antenna` and default antenna are `None`
"""
if antenna is None:
antenna = self.antenna
if antenna is None:
raise ValueError('Antenna object needed to calculate target position')
return Antenna(antenna)
def azel(self, timestamp=None, antenna=None):
"""Calculate target (az, el) coordinates as seen from antenna at time(s).
Parameters
----------
timestamp : :class:`~astropy.time.Time`, :class:`Timestamp` or equivalent, optional
Timestamp(s), defaults to now
antenna : :class:`~astropy.coordinates.EarthLocation` or :class:`Antenna`, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
azel : :class:`~astropy.coordinates.AltAz`, same shape as *timestamp*
Azimuth and elevation in `AltAz` frame
Raises
------
ValueError
If no antenna is specified and body type requires it for (az, el)
"""
time, location = self._astropy_funnel(timestamp, antenna)
altaz = AltAz(obstime=time, location=location)
return self.body.compute(altaz, time, location)
def apparent_radec(self, timestamp=None, antenna=None):
"""Calculate target's apparent (ra, dec) coordinates as seen from antenna at time(s).
This calculates the *apparent topocentric position* of the target for
the epoch-of-date in equatorial coordinates. Take note that this is
*not* the "star-atlas" position of the target, but the position as is
actually seen from the antenna at the given times. The difference is on
the order of a few arcminutes. These are the coordinates that a telescope
with an equatorial mount would use to track the target.
Parameters
----------
timestamp : :class:`~astropy.time.Time`, :class:`Timestamp` or equivalent, optional
Timestamp(s), defaults to now
antenna : :class:`~astropy.coordinates.EarthLocation` or :class:`Antenna`, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
radec : :class:`~astropy.coordinates.CIRS`, same shape as *timestamp*
Right ascension and declination in CIRS frame
Raises
------
ValueError
If no antenna is specified and body type requires | |
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='add_word')
headers.update(sdk_headers)
data = {'translation': translation, 'part_of_speech': part_of_speech}
url = '/v1/customizations/{0}/words/{1}'.format(
*self._encode_path_vars(customization_id, word))
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_word(self, customization_id: str, word: str,
**kwargs) -> 'DetailedResponse':
"""
Get a custom word.
Gets the translation for a single word from the specified custom model. The output
shows the translation as it is defined in the model. You must use credentials for
the instance of the service that owns a model to list its words.
**Note:** This method is currently a beta release.
**See also:** [Querying a single word from a custom
model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordQueryModel).
:param str customization_id: The customization ID (GUID) of the custom
voice model. You must make the request with credentials for the instance of
the service that owns the custom model.
:param str word: The word that is to be queried from the custom voice
model.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if customization_id is None:
raise ValueError('customization_id must be provided')
if word is None:
raise ValueError('word must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_word')
headers.update(sdk_headers)
url = '/v1/customizations/{0}/words/{1}'.format(
*self._encode_path_vars(customization_id, word))
request = self.prepare_request(method='GET', url=url, headers=headers)
response = self.send(request)
return response
def delete_word(self, customization_id: str, word: str,
**kwargs) -> 'DetailedResponse':
"""
Delete a custom word.
Deletes a single word from the specified custom voice model. You must use
credentials for the instance of the service that owns a model to delete its words.
**Note:** This method is currently a beta release.
**See also:** [Deleting a word from a custom
model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordDelete).
:param str customization_id: The customization ID (GUID) of the custom
voice model. You must make the request with credentials for the instance of
the service that owns the custom model.
:param str word: The word that is to be deleted from the custom voice
model.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if customization_id is None:
raise ValueError('customization_id must be provided')
if word is None:
raise ValueError('word must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_word')
headers.update(sdk_headers)
url = '/v1/customizations/{0}/words/{1}'.format(
*self._encode_path_vars(customization_id, word))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
#########################
# User data
#########################
def delete_user_data(self, customer_id: str,
**kwargs) -> 'DetailedResponse':
"""
Delete labeled data.
Deletes all data that is associated with a specified customer ID. The method
deletes all data for the customer ID, regardless of the method by which the
information was added. The method has no effect if no data is associated with the
customer ID. You must issue the request with credentials for the same instance of
the service that was used to associate the customer ID with the data.
You associate a customer ID with data by passing the `X-Watson-Metadata` header
with a request that passes the data.
**See also:** [Information
security](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-information-security#information-security).
:param str customer_id: The customer ID for which all data is to be
deleted.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if customer_id is None:
raise ValueError('customer_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_user_data')
headers.update(sdk_headers)
params = {'customer_id': customer_id}
url = '/v1/user_data'
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
class GetVoiceEnums(object):
class Voice(Enum):
"""
The voice for which information is to be returned.
"""
AR_AR_OMARVOICE = 'ar-AR_OmarVoice'
DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice'
DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice'
DE_DE_DIETERVOICE = 'de-DE_DieterVoice'
DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice'
DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice'
EN_GB_KATEVOICE = 'en-GB_KateVoice'
EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice'
EN_US_ALLISONVOICE = 'en-US_AllisonVoice'
EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice'
EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice'
EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice'
EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice'
EN_US_LISAVOICE = 'en-US_LisaVoice'
EN_US_LISAV3VOICE = 'en-US_LisaV3Voice'
EN_US_MICHAELVOICE = 'en-US_MichaelVoice'
EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice'
EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice'
ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice'
ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice'
ES_ES_LAURAVOICE = 'es-ES_LauraVoice'
ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice'
ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice'
ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice'
ES_US_SOFIAVOICE = 'es-US_SofiaVoice'
ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice'
FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice'
FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice'
IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice'
IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice'
JA_JP_EMIVOICE = 'ja-JP_EmiVoice'
JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice'
KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice'
KO_KR_YUNAVOICE = 'ko-KR_YunaVoice'
NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice'
NL_NL_LIAMVOICE = 'nl-NL_LiamVoice'
PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice'
PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice'
ZH_CN_LINAVOICE = 'zh-CN_LiNaVoice'
ZH_CN_WANGWEIVOICE = 'zh-CN_WangWeiVoice'
ZH_CN_ZHANGJINGVOICE = 'zh-CN_ZhangJingVoice'
class SynthesizeEnums(object):
class Accept(Enum):
"""
The requested format (MIME type) of the audio. You can use the `Accept` header or
the `accept` parameter to specify the audio format. For more information about
specifying an audio format, see **Audio formats (accept types)** in the method
description.
"""
AUDIO_BASIC = 'audio/basic'
AUDIO_FLAC = 'audio/flac'
AUDIO_L16 = 'audio/l16'
AUDIO_OGG = 'audio/ogg'
AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus'
AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis'
AUDIO_MP3 = 'audio/mp3'
AUDIO_MPEG = 'audio/mpeg'
AUDIO_MULAW = 'audio/mulaw'
AUDIO_WAV = 'audio/wav'
AUDIO_WEBM = 'audio/webm'
AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus'
AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis'
class Voice(Enum):
"""
The voice to use for synthesis.
"""
AR_AR_OMARVOICE = 'ar-AR_OmarVoice'
DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice'
DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice'
DE_DE_DIETERVOICE = 'de-DE_DieterVoice'
DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice'
DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice'
EN_GB_KATEVOICE = 'en-GB_KateVoice'
EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice'
EN_US_ALLISONVOICE = 'en-US_AllisonVoice'
EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice'
EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice'
EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice'
EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice'
EN_US_LISAVOICE = 'en-US_LisaVoice'
EN_US_LISAV3VOICE = 'en-US_LisaV3Voice'
EN_US_MICHAELVOICE = 'en-US_MichaelVoice'
EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice'
EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice'
ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice'
ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice'
ES_ES_LAURAVOICE = 'es-ES_LauraVoice'
ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice'
ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice'
ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice'
ES_US_SOFIAVOICE = 'es-US_SofiaVoice'
ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice'
FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice'
FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice'
IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice'
IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice'
JA_JP_EMIVOICE = 'ja-JP_EmiVoice'
JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice'
KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice'
KO_KR_YUNAVOICE = 'ko-KR_YunaVoice'
NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice'
NL_NL_LIAMVOICE = 'nl-NL_LiamVoice'
PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice'
PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice'
ZH_CN_LINAVOICE = 'zh-CN_LiNaVoice'
ZH_CN_WANGWEIVOICE = 'zh-CN_WangWeiVoice'
ZH_CN_ZHANGJINGVOICE = 'zh-CN_ZhangJingVoice'
class GetPronunciationEnums(object):
class Voice(Enum):
"""
A voice that specifies the language in which the pronunciation is to be returned.
All voices for the same language (for example, `en-US`) return the same
translation.
"""
AR_AR_OMARVOICE = 'ar-AR_OmarVoice'
DE_DE_BIRGITVOICE = 'de-DE_BirgitVoice'
DE_DE_BIRGITV3VOICE = 'de-DE_BirgitV3Voice'
DE_DE_DIETERVOICE = 'de-DE_DieterVoice'
DE_DE_DIETERV3VOICE = 'de-DE_DieterV3Voice'
DE_DE_ERIKAV3VOICE = 'de-DE_ErikaV3Voice'
EN_GB_KATEVOICE = 'en-GB_KateVoice'
EN_GB_KATEV3VOICE = 'en-GB_KateV3Voice'
EN_US_ALLISONVOICE = 'en-US_AllisonVoice'
EN_US_ALLISONV3VOICE = 'en-US_AllisonV3Voice'
EN_US_EMILYV3VOICE = 'en-US_EmilyV3Voice'
EN_US_HENRYV3VOICE = 'en-US_HenryV3Voice'
EN_US_KEVINV3VOICE = 'en-US_KevinV3Voice'
EN_US_LISAVOICE = 'en-US_LisaVoice'
EN_US_LISAV3VOICE = 'en-US_LisaV3Voice'
EN_US_MICHAELVOICE = 'en-US_MichaelVoice'
EN_US_MICHAELV3VOICE = 'en-US_MichaelV3Voice'
EN_US_OLIVIAV3VOICE = 'en-US_OliviaV3Voice'
ES_ES_ENRIQUEVOICE = 'es-ES_EnriqueVoice'
ES_ES_ENRIQUEV3VOICE = 'es-ES_EnriqueV3Voice'
ES_ES_LAURAVOICE = 'es-ES_LauraVoice'
ES_ES_LAURAV3VOICE = 'es-ES_LauraV3Voice'
ES_LA_SOFIAVOICE = 'es-LA_SofiaVoice'
ES_LA_SOFIAV3VOICE = 'es-LA_SofiaV3Voice'
ES_US_SOFIAVOICE = 'es-US_SofiaVoice'
ES_US_SOFIAV3VOICE = 'es-US_SofiaV3Voice'
FR_FR_RENEEVOICE = 'fr-FR_ReneeVoice'
FR_FR_RENEEV3VOICE = 'fr-FR_ReneeV3Voice'
IT_IT_FRANCESCAVOICE = 'it-IT_FrancescaVoice'
IT_IT_FRANCESCAV3VOICE = 'it-IT_FrancescaV3Voice'
JA_JP_EMIVOICE = 'ja-JP_EmiVoice'
JA_JP_EMIV3VOICE = 'ja-JP_EmiV3Voice'
KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice'
KO_KR_YUNAVOICE = 'ko-KR_YunaVoice'
NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice'
NL_NL_LIAMVOICE = 'nl-NL_LiamVoice'
PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice'
PT_BR_ISABELAV3VOICE = 'pt-BR_IsabelaV3Voice'
ZH_CN_LINAVOICE = 'zh-CN_LiNaVoice'
ZH_CN_WANGWEIVOICE = 'zh-CN_WangWeiVoice'
ZH_CN_ZHANGJINGVOICE = 'zh-CN_ZhangJingVoice'
class Format(Enum):
"""
The phoneme format in which to return the pronunciation. The Arabic, Chinese,
Dutch, and Korean languages support only IPA. Omit the parameter to obtain the
pronunciation in the default format.
"""
IBM = 'ibm'
IPA = 'ipa'
class ListVoiceModelsEnums(object):
class Language(Enum):
"""
The language for which custom voice models that are owned by the requesting
credentials are to be returned. Omit the parameter to see all custom voice models
that are owned by the requester.
"""
AR_AR = 'ar-AR'
DE_DE = 'de-DE'
EN_GB = 'en-GB'
EN_US = 'en-US'
ES_ES = 'es-ES'
ES_LA = 'es-LA'
ES_US = 'es-US'
FR_FR = 'fr-FR'
IT_IT = 'it-IT'
JA_JP = 'ja-JP'
KO_KR = 'ko-KR'
NL_NL = 'nl-NL'
PT_BR = 'pt-BR'
ZH_CN = 'zh-CN'
##############################################################################
# Models
##############################################################################
class Pronunciation():
"""
The pronunciation of the specified text.
:attr str pronunciation: The pronunciation of the specified text in the
requested voice and format. If a custom voice model is specified, the
pronunciation also reflects that custom voice.
"""
def __init__(self, pronunciation: str) -> None:
"""
Initialize a Pronunciation object.
:param str pronunciation: The pronunciation of the specified text in the
requested voice and format. If a custom voice | |
"""
@Author <NAME>
@Title Final Year Project
@Institution - University of California, Berkeley
@Date - 2015-2016
"""
from __future__ import division
import os
import sys
import constants
import random
from pandas import DataFrame
import shutil
# we need to import python modules from the $SUMO_HOME/tools directory
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
import traci
from enum import Enum
import xml.etree.ElementTree as ET
import math
class Platoon:
#Class variables, default values
_vehicleList = [] #elements are of type 'Vehicle'
_id = ""
_headway = 0.1#[m]
_platoonDesSpeed = 12 #[m/s]
_baseRoute =[]
_edgeList = []
_laneList = []
#Access to classVariables
def GetVehicleList(self):
return self._vehicleList
def SetVehicleList(self, vehicleList):
self._vehicleList = vehicleList
def GetID(self):
return self._id
def SetHeadway(self, headway):
self._headway = headway
def GetHeadway(self):
return self._headway
def SetHeadway(self, speed):
self._platoonDesSpeed = speed
def GetPlatoonDesSpeed(self):
return self._platoonDesSpeed
def GetPosition(self):
#Return the position of the lead vehicle
#Notes: assumes tat the vehicle list is ordered
if(len(self._vehicleList)>0):
return self._vehicleList[0].GetPosition()
else:
return None
def GetBaseRoute(self):
return self._baseRoute
def SetBaseRoute(self, newBaseRoute):
self._baseRoute = newBaseRoute
def Count(self):
return len(self._vehicleList)
def GetVehicleListID(self):
theList = []
for k in range(0, len(self._vehicleList)):
theList.append(self._vehicleList[k].GetID())
return theList
#Constructor
def __init__(self, platoonID):
#Provides an id for the platoon
self._id = platoonID
#Resets the class
self._vehicleList = []
self._baseRoute =[]
self._getEdgeIDList()
self._laneList = traci.lane.getIDList() #*This contains internal lanes...
#Methods
def Add(self, veh):
self._vehicleList.append(veh)#Adds the car to the platoon
veh.AddToPlatoon(self._id,len(self._vehicleList)-1)
def GetLeaderID(self):
return _vehicleList[0].GetID()
def Remove(self, veh):
self._vehicleList.remove(veh)#Removes the car from the platoon
veh.RemoveFromPlatoon()
def Update(self):
#Update
self.UpdatePlatoonOrder()#Makes sure they are in order
self.UpdateBaseRoute()#Updates the platoon route
self.UpdateVehicleLaneDynamics()#Make sure they are on the correct lanes
self.CheckRemovalVehicle()#Removes vehicle from the platoon if need be
self.CheckPlatoonIntruders()
self.UpdateVehicles()
def UpdateVehicles(self):
for v in self._vehicleList:
v.Update()
def _determineLeadVehicle(self, veh1, veh2): #Certified
#Returns the id of the lead vehicle or None otherwise, returns none if at a junction
#Returns 1 if first vehicle, 2 if second, 0 if undertermined, None if not relevant
#Gets the ID's of the vehicles
vehID1 = veh1.GetID()
vehID2 = veh2.GetID()
#Stores the roads the vehicles are on
Road1 = traci.vehicle.getRoadID(vehID1)#This could say it's at a junction
Road2 = traci.vehicle.getRoadID(vehID2)#Ditto
Route1 = traci.vehicle.getRoute(vehID1)
Route2 = traci.vehicle.getRoute(vehID2)
#First determines if each vehicle is on it's own route or if it's @ a junction
if ((Road1 in Route1) and (Road2 in Route2)):
#Checks if they are both on the same edge
if (Road1 == Road2):
return (1 if (traci.vehicle.getLanePosition(vehID1) > traci.vehicle.getLanePosition(vehID2)) else 2)
else:
#The vehicles are on different edges --> check which one is ahead
#Assume vehicle 1 is ahead and if it isn't then return the other
if (Road1 in Route2):
ind1 = Route2.index(Road1)
ind2 = Route2.index(Road2)
return (1 if (ind1>ind2) else 2)
elif(Road2 in Route1):
ind1 = Route1.index(Road1)
ind2 = Route1.index(Road2)
return (1 if (ind1>ind2) else 2)
else:
raise ValueError('They should not be in the same platoon')
return None
return 0
else:
#The routes just before the ineterection
R1 = Route1[traci.vehicle.getRouteIndex(vehID1)]
R2 = Route2[traci.vehicle.getRouteIndex(vehID2)]
if((Road1 not in Route1) and (Road2 not in Route2)):#both at intersections
if(R1 == R2):
return (1 if (traci.vehicle.getLanePosition(vehID1) > traci.vehicle.getLanePosition(vehID2)) else 2)
else:
return self._determineLeadeHelper(Route1,Route2,R1,R2)
elif(Road1 not in Route1):#Veh 1 is at the intersection
return (1 if (R1 == R2) else self._determineLeadeHelper(Route1,Route2,R1,R2))
else:#Veh 2 is at the intersection
return(2 if (R1 == R2) else self._determineLeadeHelper(Route1,Route2,R1,R2))
def _determineLeadeHelper(self, Route1, Route2, R1, R2):
if (R1 in Route2):
return (1 if (Route2.index(R1)>Route2.index(R2)) else 2)
elif (R2 in Route1):
return (1 if (Route1.index(R1)>Route1.index(R2)) else 2)
else:
raise ValueError('They should not be in the same platoon')
return None
def UpdatePlatoonOrder(self):#Works but may be inneficient
#Updates the position of each vehicle within the platoon
i = 0
while i < len(self._vehicleList) - 1:#Loops through all vehicles in platoon
rank = self._determineLeadVehicle(self._vehicleList[i],self._vehicleList[i+1])#Compares the two first vehicles
if(rank == 2):#If the second vehicle is in front
#Swap the order of the vehicles
tempVeh = self._vehicleList[i]
self._vehicleList[i] = self._vehicleList[i+1]
self._vehicleList[i+1] = tempVeh
i = 0 #Restart the looping.. this may be inneficient
else:
#Itterate
i+=1
#Re-itterates and numbers the position of each vehicle
j = 0
while j < len(self._vehicleList):
self._vehicleList[j].SetPosition(j)
#print str(self._vehicleList[j].GetID()) + ': ' + str(self._vehicleList[j].GetPosition())
j+=1
def PrintPlatoon(self):
word = self.GetID() + ":"
for i in range(0,len(self._vehicleList)):
word += self._vehicleList[i].GetID() + ' - '
print word
def UpdateBaseRoute(self):
#This updates the base route of the platoon. Should be updated everytime a platoon
# is created or when a vehicle is added/removed
#Assumes that the _vehicleList is ordered
vid = self._vehicleList[len(self._vehicleList)-1].GetID()
#New test
ind = traci.vehicle.getRouteIndex(vid)
R = traci.vehicle.getRoute(vid)
for i in range(ind, len(R)):
valid = True
commonEdge = R[i]
for j in range(0,len(self._vehicleList)-1):
if(commonEdge not in traci.vehicle.getRoute(self._vehicleList[j].GetID())):
valid = False
break
if(valid):
break
#This is the most recent common edge they share (the last vehicle is on it)
#commonEdge = traci.vehicle.getRoadID(self._vehicleList[len(self._vehicleList)-1].GetID())
newBaseRoute = [commonEdge]#The first common road... Make sure it's not a junction
newListVeh = self._vehicleList
#We assume that the routes of all vehicles in the platoon contains the baseRoute
#Loop through all of the vehicles within the platoon
ex = False
j = 1#Continues looping until is has found a route which two vehicles are taking
while(len(newListVeh) > 1 and ex == False):
i = 0#Loops through all of the vehicles
myNewList = []
while i < len(newListVeh):
R = traci.vehicle.getRoute(newListVeh[i].GetID())#Get the route of veh i
edgeIndex = R.index(newBaseRoute[0])+j #Get the edge index within veh i's route
#If the curr veh has no more edges, get rid of it
if(edgeIndex >= len(R)):
ex = True
i+=1
break
#get the name of the edge
e = R[edgeIndex]
#Creates a list of edges and the number of vehicles
# travelling along the edges in the next 'step'
#Adds the vehicle to the list
if(len(myNewList)>0):
if (e in myNewList[0]) == False:
myNewList[0].append(e)
#Adds the vehicle to the correct list
myNewList.append([newListVeh[i]])
else:
#Adds the vehicle to the correct list
myNewList[myNewList[0].index(e)+1].append(newListVeh[i])
else:
myNewList.append([e])
#Adds the vehicle to the correct list
myNewList.append([newListVeh[i]])
i+=1#iterate
if(len(myNewList) < 1):
break
#default value for the index of the edge with the most vehicles
maxI = [-1]
m = 0
#print myNewList
#Determines which is the longest list
for k in range(0,len(myNewList[0])):
if(len(myNewList[k+1])>m):
maxI = [k]
m = len(myNewList[k+1])
elif (len(myNewList[k+1]) == m):
oldMaxI = maxI
maxI = [oldMaxI, k]
if(m < 1):
print 'm less than 1'
break
#If there are equally many vehicles travelling on some path,
#then we need to look deeper and see how many are follow the next
if(len(maxI) == 1):
newBaseRoute.append(myNewList[0][maxI[0]])
newListVeh = myNewList[maxI[0]+1]
#print newListVeh
else:
'ERROR - HAVE NOT PROGRAMMED THIS YET'
j+=1
self.SetBaseRoute(newBaseRoute)#Update base route
def PrintPlatoonVehicleInfo(self):
word = ""
for i in range(0,len(self._vehicleList)):
word += str(traci.vehicle.getSpeed(self._vehicleList[i].GetID())) + ' - '
print word
def RemoveVehicle(self, veh):
for v in self._vehicleList:
if v == veh:
self._vehicleList.remove(v)
def CheckRemovalVehicle(self):
#Checks if the upcoming platoon edge is the same as it's own.
#If not then checks how long before it reaches end of lane. If it's less than a certain distance, it leaves the platoon
#This case is when a vehicle has a different course then the platoon
for v in self._vehicleList:
vid = v.GetID()
R = traci.vehicle.getRoute(vid)
i = traci.vehicle.getRouteIndex(vid)
distance = 0
buff = False
if(traci.vehicle.getRoadID(vid) == R[i]):#If the vehicle is on a road and not at a junction
distance += traci.lane.getLength(R[i] + "_0") - traci.vehicle.getLanePosition(vid)
while(i+1 < len(R)):#If it's not on the last edge of the route
buff = True
nextEdge =R[i+1]
if(nextEdge not in self.GetBaseRoute()):
break
else:
distance+=traci.lane.getLength(nextEdge + '_0')
if(distance > constants.CONST_EXIT_PLATOON_BUFFER):#If it's already bigger, don't waste time looping
break
i+=1
if(distance < constants.CONST_EXIT_PLATOON_BUFFER and buff):
#Remove the vehicle from the platoon
self.Remove(v)
#If the gap between vehicles becomes too large, split the platoon at that gap.
for i in range(1,len(self._vehicleList)):
#ASSUMES IN ORDER
#Vehicles
veh1 = self._vehicleList[i-1]
veh2 = self._vehicleList[i]
vid1 = veh1.GetID()
vid2 = veh2.GetID()
#routes of the subsequent vehicles & #index of the edge which the vehicle is on within its routs
Ro1 = traci.vehicle.getRoute(vid1)
ind1 = traci.vehicle.getRouteIndex(vid1)
Ro2 = traci.vehicle.getRoute(vid2)
ind2 = traci.vehicle.getRouteIndex(vid2)
splitDistance = 0#Distance between two vehicles
#If on the same edge -->Ignores fact they may be at a junction (Assumes that the junction length is negligible)
if(Ro1[ind1] == Ro2[ind2]):
splitDistance = traci.vehicle.getLanePosition(vid1) - traci.vehicle.getLanePosition(vid2)
else:#If not on the same edge
#If the second vehicle won't eventuaally be on the leaders edge, then skip them!
if(Ro1[ind1] not in Ro2):
continue
for j in range(ind2,Ro2.index(Ro1[ind1])):
splitDistance += traci.lane.getLength(Ro2[j] + "_0")
#Need to consider the case where one of the vehicles is at a junction
if(traci.vehicle.getRoadID(vid2)==Ro2[ind2]):#Not at junction
splitDistance-=traci.vehicle.getLanePosition(vid2)
else:#At junction
splitDistance-=traci.lane.getLength(Ro2[ind2] + "_0")
if(traci.vehicle.getRoadID(vid1)==Ro1[ind1]):
splitDistance+=traci.vehicle.getLanePosition(vid1)
if(splitDistance > constants.CONST_SPLIT_DISTANCE):
#May be a better way to do this but I just remove all subsequent vehicles from the platoon
#print 'Platoon Split (' + self.GetID() + '), distance = ' + str(splitDistance) + ' between ' + vid1 + ' and ' + vid2
#Keeps the first i vehicles
while(i<len(self._vehicleList)):
self.Remove(self._vehicleList[i])
break;
def UpdateVehicleSpeedDynamics(self):
if(len(self._vehicleList) <1):
return
#Limits the speed of the leader vehicle to the platoon speed
if(traci.vehicle.getSpeed(self._vehicleList[0].GetID()) > self._platoonDesSpeed):
traci.vehicle.setSpeed(self._vehicleList[0].GetID(), self._platoonDesSpeed)
if(len(self._vehicleList)>1):
#Update the second cars speed to get a fixed distance from the second car
K = 0.1
for j in range(1,len(self._vehicleList)):
lead = traci.vehicle.getLeader(self._vehicleList[j].GetID())
if(lead != None):
if (traci.vehicle.getSpeed(self._vehicleList[j-1].GetID()) != 0):
#print 'update'
speed = traci.vehicle.getSpeed(self._vehicleList[j-1].GetID()) + K*(lead[1]-self._headway)
#Makes sure the speed does't exceed the speed limit
speedLim = traci.lane.getMaxSpeed(traci.vehicle.getRoadID(self._vehicleList[j-1].GetID()) + "_0")
if(speed>speedLim):
speed = speedLim
traci.vehicle.setSpeed(self._vehicleList[j].GetID(), speed)
#If the vehicle is deccelerating then match it
leadVAccel = traci.vehicle.getAccel(self._vehicleList[j-1].GetID())
if(traci.vehicle.getAccel(self._vehicleList[j-1].GetID()) < 0 or traci.vehicle.getAccel(self._vehicleList[j].GetID())<0):
print "ERROROOOROOR "
else:
#The vehicle follows the previous speed until is is given a new speed--?if the leader is out of sight, start the car model again
traci.vehicle.setSpeed(self._vehicleList[j].GetID(), -1)
def printSpeeds(self):
word = ""
for i in range(0,len(self._vehicleList)):
word += | |
<reponame>PhilipGarnero/slack-sounds
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import traceback
import subprocess
import string
from datetime import datetime
from distutils.util import strtobool
import os
import re
import json
import urllib2
from slackclient import SlackClient
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
SOUNDS_DIR = os.path.join(BASE_DIR, 'sounds')
CONFIG_FILE = os.path.join(BASE_DIR, 'config.json')
LOGGING_FILE = os.path.join(BASE_DIR, 'commands.log')
VALID_CHARS = string.ascii_letters + string.digits + " .'_-"
FOLDER_SEP = ':/|'
PLAYER = 'mpg123'
FILETYPE = 'mp3'
EQUALIZER = ['mp3gain', '-r']
PAD_SILENCE = ['sox', 'in.mp3', 'out.mp3', 'pad', '0.5', '0']
TRIM = ['sox', 'in.mp3', 'out.mp3', 'trim', 'from', 'to']
FADE = ['sox', 'in.mp3', 'out.mp3', 'fade', '0', '-0', '2']
YOUTUBE_DOWNLOAD = ['youtube-dl', '--extract-audio', '--audio-format', 'mp3', 'url', '-o', '{}.%(ext)s']
DEFAULT_OPTIONS = {
"_token": None,
"throttling": True,
"throttling_reset": 10 * 60,
"throttling_count": 5,
"default_ban_length": 30,
}
PLAY_REGEX = re.compile(u"play\s([a-z0-9_' ]+)", re.IGNORECASE)
REMOVE_REGEX = re.compile(u"remove\s([a-z0-9_' ]+)", re.IGNORECASE)
UPDATE_CONF_REGEX = re.compile("^set\s([A-Z0-9_]+)\sto\s([A-Z0-9_]+)$", re.IGNORECASE)
SHOW_CONF_REGEX = re.compile("^show\sconf$", re.IGNORECASE)
LIST_SOUNDS_REGEX = re.compile("list\ssounds", re.IGNORECASE)
PUNISH_USER_REGEX = re.compile("punish\s<?@([A-Z0-9_-]+)>?\s?(\d+)?", re.IGNORECASE)
HELP_REGEX = re.compile("^help$", re.IGNORECASE)
SHOW_LOGS_REGEX = re.compile("^show\slogs$", re.IGNORECASE)
TRIM_REGEX = re.compile("^trim\s([a-z0-9_' ]+)\s([\d\.]+)\s([\d\.]+)$", re.IGNORECASE)
FADE_OUT_REGEX = re.compile("^fade\s([a-z0-9_' ]+)$", re.IGNORECASE)
YOUTUBE_REGEX = re.compile("^download\s<?(https?://[^\s/$.?#].[^\s]*)>?\s([a-z0-9_' :/|]+)$", re.IGNORECASE)
PAD_REGEX = re.compile("^pad\s([a-z0-9_' ]+)$", re.IGNORECASE)
users = {}
throttling_record = {}
punished = {}
logs = []
def load_config():
config = {}
with open(CONFIG_FILE, 'r') as f:
config = json.loads(f.read())
for key, value in DEFAULT_OPTIONS.iteritems():
config.setdefault(key, value)
return config
def write_config(config):
with open(CONFIG_FILE, 'w') as f:
f.write(json.dumps(config))
def find_sound(sound_name):
directories = (file_ for file_ in os.listdir(SOUNDS_DIR)
if os.path.isdir(os.path.join(SOUNDS_DIR, file_)))
for d in directories:
path = os.path.join(SOUNDS_DIR, d, '{}.{}'.format(sound_name.replace(' ', '_'), FILETYPE))
if os.path.isfile(path):
return path
def play_action(match, user, config):
sound_name = match.group(1).strip()
sound_file = find_sound(sound_name)
def throttle():
if not config["throttling"] or user["is_admin"]:
return False, None
record = throttling_record.get(user["name"], {"time": time.time(), "count": 0})
if (time.time() - record["time"]) < config["throttling_reset"]:
record["count"] += 1
else:
record["count"] = 1
record["time"] = time.time()
throttling_record[user["name"]] = record
return record["count"] > config["throttling_count"], record
def check_punished():
if user["is_admin"]:
return False
release = punished.get(user["name"], time.time())
if release > time.time():
return release
return False
if sound_file:
throttled, record = throttle()
punished_release = check_punished()
if throttled:
message = 'You reached your throttling limit. Try again later.'
elif punished_release:
message = 'You have been punished ! No sounds until {}.'.format(datetime.fromtimestamp(punished_release).strftime('%H:%M:%S'))
else:
logs.append((user, sound_name, time.time()))
message = 'Playing ' + sound_name
subprocess.Popen([PLAYER, "{}".format(sound_file)])
if record:
message += '\n {} plays left. Reset at {}.'.format(
max(config["throttling_count"] - record["count"], 0),
datetime.fromtimestamp(record["time"] + config["throttling_reset"]).strftime('%H:%M:%S')
)
else:
message = 'No sound matching ' + sound_name
return message
def remove_action(match, user, config):
if not user["is_admin"]:
return
sound_name = match.group(1).strip()
sound_file = find_sound(sound_name)
if sound_file:
os.remove(sound_file)
message = 'Removed ' + sound_name
else:
message = 'No sound matching ' + sound_name
return message
def show_logs_action(match, user, config):
return '\n'.join(['{} played {} at {}'.format(l[0]['name'], l[1], datetime.fromtimestamp(l[2]).strftime('%H:%M:%S'))
for l in logs[-10:]])
def list_sounds_action(match, user, config):
message = '```\nAvailable sounds are :\n'
directories = sorted(file_ for file_ in os.listdir(SOUNDS_DIR)
if os.path.isdir(os.path.join(SOUNDS_DIR, file_)))
def split_by_cols(l, n=4):
output = ''
for row in (l[i:i + n] for i in xrange(0, len(l), n)):
fmt = "| {:<30s} " * len(row)
output += fmt.format(*row) + '\n'
return output
for directory in directories:
message += '\n' + directory.upper() + ':\n'
sounds = sorted(s.split('.')[0].replace('_', ' ') for s in os.listdir(os.path.join(SOUNDS_DIR, directory)))
message += split_by_cols(sounds)
message += '```'
return message
def show_conf_action(match, user, config):
if not user["is_admin"]:
return
message = ''
for key, value in config.iteritems():
message += '{}: {}\n'.format(key, value)
return message
def show_help_action(match, user, config):
message = """
Welcome to sounds, the bot that brings fun to your team.
To interact with the bot, simply use these commands:
list sounds: shows the full list of all the sounds available
play replace_with_sound: plays the sound you chose from the list
show logs: shows a list who played the last 10 sounds
pad replace_with_sound: adds 0.5s at the beginning of the sound
trim replace_with_sound 2.5 10: trim the selected sound to be only between 2.5 and 10 seconds
fade replace_with_sound: adds a 1s fadeout on your sound
download replace_with_youtube_url replace_with_sound: downloads a sound from youtube
help: shows this help"""
if user["is_admin"]:
message += """
remove sound_name: removes the sound from the list
show conf: show the config variables
set x to y: updates the x config variable with y value
punish @user 30: prevent user from playing a sound for 30 minutes"""
message += """
How to upload a sound ?
In the bot channel, upload your mp3 file. This file should already be cut properly and have 0.5s of silence at the beginning.
You can use various websites like sonyoutube.com to convert a youtube video to an mp3 file and then use a software like audacity or a website like audiotrimmer.com to edit it.
Be sure you filename ends with .mp3 and if you want to put your file in a specific folder separate the folder from the filename like so folder:filename.mp3
That's it with the instructions, have fun !"""
return message
def update_conf_action(match, user, config):
if not user["is_admin"]:
return
key = match.group(1)
value = match.group(2)
if key.startswith('_'):
return "Can't set private variables"
try:
value = int(value)
except ValueError:
try:
value = bool(strtobool(value))
except ValueError:
pass
config[key] = value
write_config(config)
return "Config set"
def punish_user_action(match, user, config):
if not user["is_admin"]:
return
who = match.group(1)
r = users[who]
if r:
who = r
else:
return "Couldn't find user {}".format(user)
try:
how_long = int(match.group(2) or config.get('default_ban_length'))
except ValueError:
how_long = 30
punished[who["name"]] = time.time() + how_long * 60
return "{} has been punished for {} minutes.".format(who["name"], how_long)
def trim_action(match, user, config):
sound_name = match.group(1).strip()
sound_file = find_sound(sound_name)
tmp_file = '__NEW__' + os.path.basename(sound_file)
if sound_file:
trim_command = list(TRIM)
trim_command[1] = sound_file
trim_command[2] = tmp_file
trim_command[4] = match.group(2)
trim_command[5] = '=' + match.group(3)
process = subprocess.Popen(trim_command)
process.wait()
os.rename(tmp_file, sound_file)
message = 'Trimmed ' + sound_name
else:
message = 'No sound matching ' + sound_name
return message
def pad_action(match, user, config):
sound_name = match.group(1).strip()
sound_file = find_sound(sound_name)
tmp_file = '__NEW__' + os.path.basename(sound_file)
if sound_file:
pad_command = list(PAD_SILENCE)
pad_command[1] = sound_file
pad_command[2] = tmp_file
process = subprocess.Popen(pad_command)
process.wait()
os.rename(tmp_file, sound_file)
message = 'Padded ' + sound_name
else:
message = 'No sound matching ' + sound_name
return message
def fade_out_action(match, user, config):
sound_name = match.group(1).strip()
sound_file = find_sound(sound_name)
tmp_file = '__NEW__' + os.path.basename(sound_file)
if sound_file:
fade_command = list(FADE)
fade_command[1] = sound_file
fade_command[2] = tmp_file
process = subprocess.Popen(fade_command)
process.wait()
os.rename(tmp_file, sound_file)
message = 'Faded ' + sound_name
else:
message = 'No sound matching ' + sound_name
return message
def slugify(raw):
return "".join([x for x in raw if x in VALID_CHARS]).replace("-", "_").strip().replace(" ", "_").lower()
def download_action(match, user, config):
url = match.group(1)
filename = match.group(2)
folder = 'misc'
for sep in FOLDER_SEP:
if sep in filename:
folder, filename = filename.split(sep)
break
if filename.endswith('.mp3'):
filename = filename[:-4]
filename = slugify(filename)
dl_command = list(YOUTUBE_DOWNLOAD)
dl_command[-1] = dl_command[-1].format(filename)
dl_command[-3] = url
process = subprocess.Popen(dl_command)
process.wait()
path_to_sound = os.path.join(SOUNDS_DIR, slugify(folder), filename + '.mp3')
try:
os.makedirs(os.path.join(SOUNDS_DIR, slugify(folder)))
except OSError:
pass
os.rename(filename + '.mp3', path_to_sound)
subprocess.Popen(EQUALIZER + [path_to_sound])
return "Sound added correctly"
def add_sound(sc, file_id, config):
info = sc.api_call("files.info", file=file_id)
file_url = info.get("file").get("url_private") if info["ok"] else ''
filename = info.get("file").get("title") if info["ok"] else ''
if filename.endswith('.mp3') and file_url.endswith('.mp3'):
folder = 'misc'
for sep in FOLDER_SEP:
if sep in filename:
folder, filename = filename.split(sep)
break
try:
os.makedirs(os.path.join(SOUNDS_DIR, slugify(folder)))
except OSError:
pass
req = urllib2.Request(file_url, headers={"Authorization": "Bearer " + config["_token"]})
path_to_sound = os.path.join(SOUNDS_DIR, slugify(folder), slugify(filename))
with open(path_to_sound, 'w+') as f:
f.write(urllib2.urlopen(req).read())
subprocess.Popen(EQUALIZER + [path_to_sound])
ACTIONS = {
PLAY_REGEX: play_action,
REMOVE_REGEX: remove_action,
UPDATE_CONF_REGEX: update_conf_action,
SHOW_CONF_REGEX: show_conf_action,
PUNISH_USER_REGEX: punish_user_action,
HELP_REGEX: show_help_action,
LIST_SOUNDS_REGEX: list_sounds_action,
SHOW_LOGS_REGEX: show_logs_action,
YOUTUBE_REGEX: download_action,
PAD_REGEX: pad_action,
TRIM_REGEX: trim_action,
FADE_OUT_REGEX: fade_out_action,
}
def load_users(sc):
user_list = sc.api_call("users.list")
for user in user_list["members"]:
users[user["id"]] = {
"name": user["name"],
"is_admin": user.get("is_admin", False),
"id": user["id"]
}
def start():
config = load_config()
sc = SlackClient(config["_token"])
if sc.rtm_connect():
bot_id = sc.api_call("auth.test")["user_id"]
load_users(sc)
while True:
for event in sc.rtm_read():
event_type = event.get('type', None)
print event_type, event.get('text')
if event_type == 'message':
text = event.get('text', '').replace(u'’', "'")
user = users.get(event.get('user', None), None)
channel = event.get('channel', None)
if not user or not text or not channel:
continue
message = None
for regex, action in ACTIONS.iteritems():
match = regex.match(text)
if match:
message = action(match, user, config)
break
if message:
sc.api_call("chat.postEphemeral", channel=channel, text=message, user=user["id"])
elif event_type == 'file_created' or event_type == 'file_shared':
file_id = event.get('file', {}).get('id', None)
if file_id:
add_sound(sc, file_id, config)
time.sleep(1);
else:
print 'Connection failed, invalid token?'
if | |
Pointer = 0x664E3
I3Flag.append(["Woodman", GiveI3])
elif GetI3 == "Metalman":
Pointer = 0x664E5
I3Flag.append(["Metalman", GiveI3])
elif GetI3 == "Crashman":
Pointer = 0x664E9
I3Flag.append(["Crashman", GiveI3])
elif GetI3 == "Sparkman":
Pointer = 0x66503
I3Flag.append(["Sparkman", GiveI3])
elif GetI3 == "Snakeman":
Pointer = 0x66505
I3Flag.append(["Snakeman", GiveI3])
elif GetI3 == "Needleman":
Pointer = 0x66507
I3Flag.append(["Needleman", GiveI3])
elif GetI3 == "Hardman":
Pointer = 0x66509
I3Flag.append(["Hardman", GiveI3])
elif GetI3 == "Topman":
Pointer = 0x6650B
I3Flag.append(["Topman", GiveI3])
elif GetI3 == "Geminiman":
Pointer = 0x6650D
I3Flag.append(["Geminiman", GiveI3])
elif GetI3 == "Magnetman":
Pointer = 0x6650F
I3Flag.append(["Magnetman", GiveI3])
elif GetI3 == "Shadowman" :
Pointer = 0x66511
I3Flag.append(["Shadowman", GiveI3])
Seek = ROM.seek(Pointer,0)
Value = b'\x13' #Writes Item 3 value at address specified above
ROM.write(Value)
#!Doc Bot duo byte writing section
Pointer = [0x7B7F3,0x7B8DF,0x7BA41,0x7BC11,0x7BDF9,0x7BF1B,0x7C0DB,0x7C1C7] #Doc Robot Programming addresses
Pointer2 = [0x7B7FB,0x7B8E7,0x7BA49,0x7BC19,0x7BE01,0x7BF23,0x7C0D3,0x7C1CF] #Doc Robot Boss Addresses
DocRobots = []
DocHeat = b'\xA7' #What values to look for when determining duo byte
DocMetal = b'|'
DocQuick = b'\xA8'
DocAir = b'\xA9'
DocCrash = b'\xAA'
DocFlash = b'\xAB'
DocWood = b'\xAD'
DocBubble = b'\xAC'
DocRobots2 = [b'|',b'\xA7',b'\xA8',b'\xA9',b'\xAA',b'\xAB',b'\xAC',b'\xAD']
Docs0 = []
Docs2 = []
DocRobots0 = []
First = [b'|',b'\xA9',b'\xAB',b'\xAD']
for x in range(8):
Seek = ROM.seek(Pointer[x],0) #Grab randomized values from addresses
Byte =(ROM.read(1))
DocRobots0.append(Byte)
Error = False
for x in range(8):
Value = DocRobots0.count(DocRobots2[x]) #Seeing if there is duplicates by counting
if Value == 0:
Error = True
Docs0.append(DocRobots2[x]) #If value not found, add to Docs0
elif Value >= 2:
Error = True
Docs2.append(DocRobots2[x])#If multiple value found, add to Docs2
if Value == 3:
Docs2.append(DocRobots2[x])
if Value >= 4:
print("Something went wrong when generating the ROM. Please delete the ROM, generate a new one, then run this script again.")
ROM.close()
time.sleep(4)
if Error == True:
Value = len(Docs0)
for x in range(Value):
Group1 = False
Group2 = False
Notfound = Docs0.pop()
Multiple = Docs2.pop()
for y in range(4):
if Notfound == First[y]: #If value from Docs0 is in Group 1, Group1 = True
Group1 = True
break
if Group1 == False: #If value from Docs0 not found in First list, Group2 = True
Group2 = True
if Group1 == True:
Gonext = False
Match = False
for z in range(Value):
Check = 0
if Match == True:
break
for y in range(4):
if Multiple == First[y]: #Checks to see if Docs2 is in Group 1
Match = True
break
elif Multiple != First[y]:
Check += 1
if Check == 4: #If no match found, go to next value in Doc2 list
Gonext = True
if Gonext == True: #Replace value with the next value and return first value
Multiple2 = Docs2.pop()
Docs2.insert(0, Multiple)
Multiple = Multiple2
Gonext = False
elif Group2 == True:
Gonext = False
Match = False
for z in range(Value):
Check = 0
if Match == True:
break
for y in range(4):
if Multiple != First[y]:
Check += 1
elif Multiple == First[y]:
Gonext = True
if Check == 4:
Match = True
if Gonext == True:
Multiple2 = Docs2.pop()
Docs2.insert(0, Multiple)
Multiple = Multiple2
Gonext = False
break
Index = DocRobots0.index(Multiple) #Getting second position of duplicate value
Index += 1
Index2 = DocRobots0.index(Multiple, Index)
Index = Index2
DocRobots0.pop(Index) #Remove duplicate value from list
print(DocRobots0)
DocRobots0.insert(Index, Notfound) #Insert new value into proper position
print(DocRobots0)
Pointer3 = Pointer[Index]
Seek = ROM.seek(Pointer3, 0)
ROM.write(Notfound)
for x in range(8):
DocRobots.append([DocRobots0[x], x])
for x in range(8):
if DocRobots[x][0] == DocHeat: #If value from DocRobots matches values, writes duo byte at that address from Pointer2
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x48')
elif DocRobots[x][0] == DocMetal:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x43')
elif DocRobots[x][0] == DocQuick:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x46')
elif DocRobots[x][0] == DocAir:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x41')
elif DocRobots[x][0] == DocCrash:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x42')
elif DocRobots[x][0] == DocFlash:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x45')
elif DocRobots[x][0] == DocWood:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x44')
elif DocRobots[x][0] == DocBubble:
Value = DocRobots[x][1]
Pointer3 = Pointer2[Value]
Seek = ROM.seek(Pointer3,0)
ROM.write(b'\x47')
#!Wily Refight Capsule boss writing section
PointersC = [0x859B0,0x859B1,0x859B2,0x859B3,0x859B4,0x859B5,0x859B6,0x859B7] #Wily4 random capsule bosses values
Capsuleorder = []
Bossorder = []
Bossvalue = []
first = False
second = False
FlatSp4Arena = False
FlatSn4Arena = False
Adjust = False
Pointer2=[0x7C88D,0x7C89F,0x7C8D5,0x7C8E7]
Ypos = [b'\xC4',b'\xB4',b'\xB4',b'\xB5']
y = 9
y2 = 9
Order0 = []
Order2 = []
Wily2 = False
Wily3 = False
if Wily == True:
Pointer = 0x857FE
Seek = ROM.seek(Pointer,0)
Value = ROM.read(1)
if Value == b'\x20':
Wily2 = True
elif Value == b'\x38':
Wily3 = True
for x in range(8):
Seek = ROM.seek(PointersC[x],0)
Value = ROM.read(1)
Boss = int.from_bytes(Value, "big") #Grabs randomized values and converts them to int, appends to Capsuleorder
Boss -= 1
Capsuleorder.append(Boss)
Error = False
for x in range(8):
Value = Capsuleorder.count(x)
if Value == 0:
Error = True
Order0.append(x)
elif Value >= 2:
Error = True
Order2.append(x)
if Value > 2:
Order2.append(x)
if Error == True:
Value = len(Order0)
for x in range(Value):
Notfound = Order0.pop()
Multiple = Order2.pop()
Index = Capsuleorder.index(Multiple)
Capsuleorder.pop(Index)
Capsuleorder.insert(Index, Notfound)
if randomboss == False:
for x in range(8):
Value=Capsuleorder.pop() #Finds boss based on randomized value and appends them to Bossorder
if Value == pos[0][1]:
Bossorder.append(pos[0][0])
elif Value == pos[1][1]:
Bossorder.append(pos[1][0])
elif Value == pos[2][1]:
Bossorder.append(pos[2][0])
elif Value == pos[3][1]:
Bossorder.append(pos[3][0])
elif Value == pos[4][1]:
Bossorder.append(pos[4][0])
elif Value == pos[5][1]:
Bossorder.append(pos[5][0])
elif Value == pos[6][1]:
Bossorder.append(pos[6][0])
elif Value == pos[7][1]:
Bossorder.append(pos[7][0])
elif randomboss == True:
for x in range(8):
Value=Capsuleorder.pop() #Finds boss based on randomized value and appends them to Bossorder
if Value == posB[0][1]:
Bossorder.append(posB[0][0])
elif Value == posB[1][1]:
Bossorder.append(posB[1][0])
elif Value == posB[2][1]:
Bossorder.append(posB[2][0])
elif Value == posB[3][1]:
Bossorder.append(posB[3][0])
elif Value == posB[4][1]:
Bossorder.append(posB[4][0])
elif Value == posB[5][1]:
Bossorder.append(posB[5][0])
elif Value == posB[6][1]:
Bossorder.append(posB[6][0])
elif Value == posB[7][1]:
Bossorder.append(posB[7][0])
Bossorder.reverse() #Reverses boss order to be correct
for x in range(8):
if Bossorder[x] == "Flashman": #Appends byte to Bossvalue based on boss
Value = b'\x45'
Bossvalue.append(Value)
elif Bossorder[x] == "Cutman":
Value = b'\x18'
Bossvalue.append(Value)
elif Bossorder[x] == "Gutsman":
Value = b'\x19'
Bossvalue.append(Value)
elif Bossorder[x] == "Iceman":
Value = b'\x14'
Bossvalue.append(Value)
elif Bossorder[x] == "Bombman":
Value = b'\x15'
Bossvalue.append(Value)
elif Bossorder[x] == "Fireman":
Value = b'\x16'
Bossvalue.append(Value)
elif Bossorder[x] == "Elecman":
Value = b'\x17'
Bossvalue.append(Value)
elif Bossorder[x] == "Bubbleman":
Value = b'\x47'
Bossvalue.append(Value)
elif Bossorder[x] == "Airman":
Value = b'\x41'
Bossvalue.append(Value)
elif Bossorder[x] == "Quickman":
Value = b'\x46'
Bossvalue.append(Value)
elif Bossorder[x] == "Heatman":
Value = b'\x48'
Bossvalue.append(Value)
elif Bossorder[x] == "Woodman":
Value = b'\x44'
Bossvalue.append(Value)
elif Bossorder[x] == "Metalman":
Value = b'\x43'
Bossvalue.append(Value)
elif Bossorder[x] == "Crashman":
Value = b'\x42'
Bossvalue.append(Value)
elif Bossorder[x] == "Sparkman":
Value = b'\x78'
first = True
y = x
Bossvalue.append(Value)
elif Bossorder[x] == "Snakeman":
Value = b'\x76'
second = True
Bossvalue.append(Value)
elif Bossorder[x] == "Needleman":
Value = b'\x7A'
Bossvalue.append(Value)
elif Bossorder[x] == "Hardman":
Value = b'\x74'
Bossvalue.append(Value)
elif Bossorder[x] == "Topman":
Value = b'\x75'
Bossvalue.append(Value)
elif Bossorder[x] == "Geminiman":
Value = b'\x7B'
Bossvalue.append(Value)
elif Bossorder[x] == "Magnetman":
Value = b'\x77'
Bossvalue.append(Value)
elif Bossorder[x] == "Shadowman" :
Value = b'\x79'
Bossvalue.append(Value)
if Wily3 == True or MM3 == True:
if first == True: #If Sparkman is a boss, makes him have his capsule
Value = Bossvalue.pop(y)
Bossvalue.insert(0, Value)
if second == True: #If Snakeman is a boss, makes him have his capsule
y2 = Bossvalue.index(b'v')
Value = Bossvalue.pop(y2)
Bossvalue.insert(1, Value)
Value = b'\x14'
ValueC = b'\x18'
ValueG = b'\x19'
ValueB = b'\x15'
ValueF = b'\x16'
ValueE = b'\x17'
ValueL = b'\xB4'
if Wily == False:
Pointer = 0x7C869
if MM2 == True:
Pointer = 0x7A463
for x in range(8): #Writes bossvalue to ROM address
Seek = ROM.seek(Pointer,0)
ROM.write(Bossvalue[x])
if MM3 == True:
| |
import argparse
from bisect import bisect, bisect_left, bisect_right
import contextlib
import importlib
import io
import itertools
import math
import operator
import pathlib
import time
import os
import ast
import random
import re
import sys
import traceback
import bisect
from py2many.exceptions import AstUnsupportedOperation
from pyjl.global_vars import RESUMABLE
from pyjl.helpers import get_func_def
import pyjl.juliaAst as juliaAst
from tempfile import NamedTemporaryFile
from typing import Any, Callable, Dict, List, Tuple, Union
from py2many.ast_helpers import get_id
from py2many.tracer import find_node_by_name_and_type, find_node_by_type, is_class_type
try:
from dataclasses import dataclass
except ImportError:
ArgumentParser = "ArgumentParser"
ap_dataclass = "ap_dataclass"
class JuliaTranspilerPlugins:
########## Decorators ##########
def visit_jl_dataclass(t_self, node: ast.ClassDef, decorator):
t_self._usings.add("DataClass")
_, field_repr = JuliaTranspilerPlugins._generic_dataclass_visit(node, decorator)
# Visit class fields
fields = "\n".join([
node.fields_str,
"_initvars = [" + ", ".join(field_repr) + "]\n"
])
# Struct definition
bases = [t_self.visit(base) for base in node.jl_bases]
struct_def = f"mutable struct {node.name} <: {bases[0]}" \
if bases else f"mutable struct {node.name}"
body = []
for b in node.body:
if isinstance(b, ast.FunctionDef):
body.append(t_self.visit(b))
body = "\n".join(body)
if hasattr(node, "constructor_str"):
return f"""@dataclass {struct_def} begin
{fields}
{node.constructor_str}
end
{body}"""
return f"""
@dataclass {struct_def} begin
{fields}
end
{body}
"""
def visit_py_dataclass(t_self, node: ast.ClassDef, decorator) -> str:
dataclass_data = JuliaTranspilerPlugins._generic_dataclass_visit(node, decorator)
[d_fields, _] = dataclass_data[0], dataclass_data[1]
fields: str = node.fields_str
struct_fields = fields.split("\n") if fields else ""
# Abstract type
struct_name = "".join(["Abstract", get_id(node)])
# get struct variables using getfield
attr_vars = []
key_vars = []
str_struct_fields = []
for field in struct_fields:
field_name = field
field_type = None
field_split = field.split("::")
if len(field_split) > 1:
field_name = field_split[0]
field_type = field_split[1]
if field_type:
st_name = field_type[8:] if field_type.startswith("Abstract") else field_type
str_struct_fields.append(f"{field_name}::{field_type}"
if is_class_type(field_type, node.scopes)
else f"{field_name}::Abstract{field_type}")
key_vars.append(f"self.{field_name}"
if (not is_class_type(st_name, node.scopes)) else f"__key(self.{field_name})")
else:
str_struct_fields.append(f"{field_name}")
key_vars.append(f"self.{field_name}")
attr_vars.append(f"self.{field_name}")
# Convert into string
key_vars = ", ".join(key_vars)
attr_vars = ", ".join(attr_vars)
str_struct_fields = ", ".join(str_struct_fields)
# Visit class body
body = []
for b in node.body:
if isinstance(b, ast.FunctionDef):
body.append(t_self.visit(b))
# Add functions to body
if d_fields["repr"]:
body.append(f"""
function __repr__(self::{struct_name})::String
return {struct_name}({attr_vars})
end
""")
if d_fields["eq"]:
body.append(f"""
function __eq__(self::{struct_name}, other::{struct_name})::Bool
return __key(self) == __key(other)
end
""")
if d_fields["order"]:
body.append(f"""
function __lt__(self::{struct_name}, other::{struct_name})::Bool
return __key(self) < __key(other)
end\n
function __le__(self::{struct_name}, other::{struct_name})::Bool
return __key(self) <= __key(other)
end\n
function __gt__(self::{struct_name}, other::{struct_name})::Bool
return __key(self) > __key(other)
end\n
function __ge__(self::{struct_name}, other::{struct_name})::Bool
return __key(self) >= __key(other)
end
""")
if d_fields["unsafe_hash"]:
if d_fields["eq"]: # && ismutable
body.append(f"""
function __hash__(self::{struct_name})
return __key(self)
end
""")
body.append(f"""
function __key(self::{struct_name})
({key_vars})
end
""")
body = "\n".join(body)
bases = [t_self.visit(base) for base in node.jl_bases]
struct_def = f"mutable struct {node.name} <: {bases[0]}" \
if bases else f"mutable struct {node.name}"
if hasattr(node, "constructor_str"):
return f"{struct_def}\n{fields}\n{node.constructor_str}\nend\n{body}"
return f"{struct_def}\n{fields}\nend\n{body}"
def _generic_dataclass_visit(node, decorator):
fields = {}
field_repr = []
keywords = {'init': True, 'repr': True, 'eq': True, 'order': False,
'unsafe_hash': False, 'frozen': False}
parsed_decorators: Dict[str, Dict[str, str]] = node.parsed_decorators
# Parse received keywords if needed
if isinstance(decorator, ast.Call):
parsed_keywords: Dict[str, str] = parsed_decorators[get_id(decorator.func)]
for (key, value) in parsed_keywords.items():
keywords[key] = value
key_map = {False: "false", True: "true"}
for kw in keywords:
arg = kw
value = keywords[arg]
if value == None:
return (None, None)
fields[arg] = value
val = key_map[value] if value in key_map else value
field_repr.append(f"_{arg}={val}")
return fields, field_repr
def visit_JuliaClass(t_self, node: ast.ClassDef, decorator) -> Any:
t_self._usings.add("Classes")
# Struct definition
fields = []
bases = []
for b in node.jl_bases:
b_name = t_self.visit(b)
if b_name != f"Abstract{node.name}":
bases.append(b_name)
# Don't repeat elements of superclasses
base_class = find_node_by_name_and_type(b_name, ast.ClassDef, node.scopes)[0]
if base_class:
base_class_decs = list(map(lambda x: x[0], base_class.fields))
for (declaration, typename, _) in node.fields:
if declaration not in base_class_decs:
fields.append((declaration, typename))
# Change string representation if fields have been changed
if fields and fields != node.fields:
fields_str = list(map(lambda x: f"{x[0]}::{x[1]}" if x[1] else x[0], fields))
node.fields_str = ", ".join(fields_str) if fields else ""
struct_def = f"{node.name} <: {', '.join(bases)}" \
if bases else f"{node.name}"
body = []
for b in node.body:
if isinstance(b, ast.FunctionDef):
body.append(f"{t_self.visit(b)}")
body = "\n".join(body)
if hasattr(node, "constructor"):
return f"@class {struct_def}begin\n{node.fields_str}\n{node.constructor_str}\nend\n{body}"
return f"@class {struct_def} begin\n{node.fields_str}\nend\n{body}"
def visit_resumables(t_self, node, decorator):
# node.scopes[-2] because node.scopes[-1] is the current function
parent = node.scopes[-2]
if isinstance(parent, ast.FunctionDef):
raise AstUnsupportedOperation(
"Cannot use resumable functions when function is nested", node)
t_self._usings.add("ResumableFunctions")
funcdef = f"function {node.name}{node.template}({node.parsed_args}){node.return_type}"
# Visit function body
body = "\n".join(t_self.visit(n) for n in node.body)
if body == "...":
body = ""
maybe_main = "\nmain()" if node.is_python_main else ""
return f"@resumable {funcdef}\n{body}\nend\n{maybe_main}"
def visit_offsetArrays(t_self, node, decorator):
t_self._usings.add("OffsetArrays")
# def visit_array(self, node, vargs):
# type_code: str = re.sub(r"\"", "", vargs[0])
# if type_code in TYPE_CODE_MAP:
# return f"Vector{{{TYPE_CODE_MAP[type_code]}}}"
######################################################################
########## Range ##########
def visit_range(t_self, node, vargs: List[str]) -> str:
start = 0
stop = 0
step = None
if len(vargs) == 1:
stop = vargs[0]
else:
start = vargs[0]
stop = vargs[1]
if len(node.args) == 3:
step = vargs[2]
if step:
return f"{start}:{step}:{stop}"
return f"{start}:{stop}"
########## Builtin Functions ##########
def visit_getattr(t_self, node, vargs: list[str]):
parsed_args = JuliaTranspilerPlugins._parse_attrs(t_self, node)
if len(parsed_args) == 3:
# Cannot remove the quotes from default
default = t_self.visit(node.args[-1])
return f"""(hasfield(typeof({parsed_args[0]}), :{parsed_args[1]}) ?
getfield({parsed_args[0]}, :{parsed_args[1]}) : {default})"""
elif len(parsed_args) == 2:
return f"getfield({parsed_args[0]}, :{parsed_args[1]})"
return "getfield"
def visit_hasattr(t_self, node, vargs: list[str]):
parsed_args = JuliaTranspilerPlugins._parse_attrs(t_self, node)
if len(parsed_args) == 2:
return f"hasfield(typeof({parsed_args[0]}), :{parsed_args[1]})"
return "hasfield"
def _parse_attrs(t_self, node):
parsed_args = []
for arg in node.args:
if isinstance(arg, ast.Constant):
parsed_args.append(t_self.visit_Constant(arg, quotes=False))
else:
parsed_args.append(t_self.visit(arg))
return parsed_args
########## Cast ##########
def visit_cast_int(t_self, node, vargs) -> str:
if hasattr(node, "args") and node.args:
needs_parsing = False
is_float = False
for arg in node.args:
if isinstance(arg, ast.Compare):
continue
elif arg_type := t_self._typename_from_annotation(arg):
if arg_type.startswith("Float"):
is_float = True
elif not arg_type.startswith("Int"):
needs_parsing = True
break
if needs_parsing:
return f"parse(Int, {vargs[0]})"
elif is_float:
return f"Int(floor({vargs[0]}))"
else:
return f"Int({vargs[0]})"
return f"zero(Int)" # Default int value
########## String operations ##########
def visit_maketrans(t_self, node, vargs: list[str]):
original_lst = [vargs[0][i] for i in range(2, len(vargs[0]) - 1)]
replacement_lst = []
byte_replacements = str(vargs[1][2:-1])
i = 0
while i < len(byte_replacements):
if byte_replacements[i:i+2] == "\\x":
replacement_lst.append(str(byte_replacements[i:i+4]))
i += 4
else:
replacement_lst.append(byte_replacements[i])
i += 1
element_lst = []
for o, r in zip(original_lst, replacement_lst):
if o in t_self._str_special_character_map:
o = t_self._str_special_character_map[o]
if r in t_self._str_special_character_map:
r = t_self._str_special_character_map[r]
element_lst.append(f'b"{o}" => b"{r}"')
element_lst_str = ", ".join(element_lst)
return f"Dict({element_lst_str})"
def visit_join(t_self, node, vargs):
if len(vargs) == 2:
return f"join({vargs[1]}, {vargs[0]})"
elif len(vargs) == 1:
return f"x -> join(x, {vargs[0]})"
return "join"
# TODO: Optimize (possibly using regex)
def visit_format(t_self, node, vargs):
subst_values: list[str] = vargs[1:]
res: str = re.split("{|}", vargs[0])
# Fill in empty curly braces
cnt = 0
for i in range(len(res)):
r = res[i]
if r == "":
res[i] = f"{cnt}"
cnt+=1
# Create replacement map to replace original strings
replacement_map = {}
for i in range(len(subst_values)):
subst_val = re.split("\s*=\s*", subst_values[i])
if len(subst_val) > 1:
original = subst_val[0]
replacement = subst_val[1]
else:
original = f"{i}"
replacement = subst_val[0]
replacement_map[original] = replacement
# Replace placeholders for values
for j in range(1, len(res), 2):
split_res = res[j].split(".")
split_res[0] = split_res[0].translate(str.maketrans(replacement_map))
res[j] = f"$({'.'.join(split_res)})"
return "".join(res)
def visit_translate(t_self,node, vargs):
if len(vargs) < 2:
return "replace!"
if len(vargs) == 2:
translation_map = vargs[1]
elif len(vargs) == 3:
# Include "delete" parameter
key_map = []
del_args = vargs[2][2:-1]
i = 0
while i < len(del_args):
if del_args[i] == "\\":
arg = del_args[i:i+2]
i += 2
else:
arg = del_args[i]
i += 1
if arg in t_self._str_special_character_map:
arg = t_self._str_special_character_map[arg]
key_map.append(f"b\"{arg}\" => b\"\"")
key_map = ", ".join(key_map)
translation_map = f"merge!({vargs[1]}, Dict({key_map}))"
return f"replace!(collect({vargs[0]}), {translation_map}...)"
########## Array Operations ##########
def visit_bytearray(t_self, node, vargs: list[str]):
if len(vargs) == 0:
return "Vector{UInt8}()"
else:
parsed_args = vargs[0]
if isinstance(node.args[0], ast.GeneratorExp) \
or getattr(node.args[0], "is_gen_expr", None):
parsed_args = parsed_args.removeprefix("(").removesuffix(")")
parsed_args = f"[{vargs[0][1:-1]}]"
return f"Vector{{UInt8}}({parsed_args})"
def visit_islice(t_self, node, vargs: list[str]) -> str:
node.is_gen_expr = True
return f"({vargs[0]} for _ in (0:{vargs[1]}))"
def visit_iter(t_self, node, vargs: list[str]) -> str:
node.is_gen_expr = True
return f"(x for x in {vargs[0]})"
def visit_next(t_self, node: ast.Call, vargs: list[str]) -> str:
func_def = get_func_def(node, vargs[0])
if get_id(getattr(func_def, "annotation", None)) == "Generator":
decs = getattr(func_def, "parsed_decorators", None)
if RESUMABLE in decs:
if len(vargs) > 1:
return f"{vargs[0]}({', '.split(vargs[1:])})"
elif getattr(node, | |
"""
ActionBase superclass
"""
try:
from urllib.parse import urlencode, urlparse # Python 3
from io import StringIO
from builtins import str
except ImportError:
from urllib import urlencode # Python 2
from urlparse import urlparse
from StringIO import StringIO
import importlib
import io
import json
import logging
import os
import random
import string
import requests
import pandas
from ..util import obtain_owner_org, package_search, dataset_query, create_output_dir
from ..catalog_query import ActionException
class ActionBase(object):
"""
Attributes
----------
catalog_api_url : str
URL of CKAN API to sumbit queries to
params_list: list
list of query parameters specified by input param (use query_params instead)
query_params : dict
dict of query params passed (note: this dict by nature won't include reapeated keys, so beware. Already used in various code locations so kept for simplicity. Use params_list if repeated query keys necessary - e.g. for Solr filter queries)
action_name: str
name of the Action specified by input param
label: str
random 5 char string for labeling output
results_filename: file
output file to write results from the Action
errors_filename: file
output file to write errors encountered while running Action
out: file
output file for logging purposes
"""
# def __init__(self, *args, **kwargs):
def __init__(self, **kwargs):
# logging:
m = importlib.import_module(self.__module__)
self.logger = logging.getLogger(m.__name__)
self.logger.setLevel(logging.INFO)
log = logging.FileHandler(m.__name__.split(".")[-1] + ".log", mode='w')
log.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s'))
self.logger.addHandler(log)
# decode parameters:
self.catalog_api_url = kwargs.get("catalog_api_url")
# the query parameters for this action are all passed in list form in the 'query' parameter arg, and must be decoded:
# this is a bit of a hack to extract query parameter keys into instance variables to use in the queries
# expected values are along the lines of:
# -name:<organization_name>
# -resource_format: <format of a dataset's resource>
# -resource_name: <name of a dataset's resource>
# - more...?
if kwargs.get("query") is not None:
self.params_list = kwargs.get("query").split(",")
else:
self.params_list = []
# split the params by ':' into self.query_params:
# note: dicts do not accommodate repeating keys, so may lose repeated param keys passed (eg. res_format:, res_format:)
# self.query_params was already incorporated in the code, so kept, but should use self.params_list instead
if len(self.params_list) >= 1:
self.query_params = dict(param.split(":") for param in self.params_list)
else:
self.query_params = {}
# set the operator passed:
self.operator = kwargs.get("operator")
# get the Action file name to use in naming output file, using os.path.split and create a random string label:
# first need a reference to subclass __module__ to obtain __file__:
self.action_name = os.path.split(m.__file__)[1].split(".")[0]
self.label = "".join(random.choice(string.ascii_lowercase) for i in range(5))
# create the results_filename (path to results output file) depending on if an 'output' filename parameter was provided or not:
if "output" in kwargs:
self.results_filename = kwargs['output']
else:
# utf-8 issues resolved by just passing results_filename to DataFrame.to_csv, rather than opening filehandle here and echoing output to it:
if "name" in self.query_params.keys():
self.results_filename = os.path.join(self.query_params.get("name"), "_".join([self.query_params.get("name"), self.action_name, self.label]) + ".csv")
else:
self.results_filename = os.path.join(os.getcwd(), "_".join([self.action_name, self.label]) + ".csv")
# create the errpr_output_filename (path to error output file) depending on if an 'error_output' filename parameter was provided or not:
if "error_output" in kwargs:
self.errors_filename = kwargs['error_output']
else:
if "name" in self.query_params.keys():
self.errors_filename = os.path.join(self.query_params.get("name"), "_".join([self.query_params.get("name"), "error", self.action_name, self.label]) + ".csv")
else:
self.errors_filename = os.path.join(os.getcwd(), "_".join([self.action_name, "error", self.label]) + ".csv")
def obtain_owner_org(self, org_name):
"""
obtain_owner_org: return org info from the CKAN API via query by Org Name (self.query_org)
obtain the organization id:
https://data.ioos.us/api/3/action/organization_list?q=
"""
action = "organization_list"
payload = {'q': org_name, 'all_fields': 'true'}
url = ("/").join([self.catalog_api_url, "action", action])
if self.logger:
self.logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
r = requests.post(url=url, json=payload)
result = json.loads(r.text)
print(json.dumps(result, indent=4, sort_keys=True))
# the step to loop through the 'result' array isn't really necessary since we expect the org name
# to match what was passed in the query, but it is safer than assuming it will (API may return multiple)
for org in result['result']:
# could also use org['title'] it seems
if org['display_name'] == org_name:
org_result = org
# check to make sure a valid Org name was passed:
try:
org_result
except NameError:
raise ActionException("Error: no Organization matching {org} exists in the Catalog. Please try again (query is case sensitive).".format(org=org_name))
print("Organization id: {id}".format(id=org_result['id']))
return org_result
def package_search(self, org_id=None, params=None, operator=None, start_index=0, rows=100):
"""
package_search: run the package_search CKAN API query, filtering by org_id, iterating by 100, starting with 'start_index'
perform package_search by owner_org:
https://data.ioos.us/api/3/action/package_search?q=owner_org:
"""
action = "package_search"
payload = {'start': start_index, 'rows': rows}
if org_id is not None:
payload['owner_org'] = org_id
if params is not None:
query = " {} ".format(operator).join(params)
payload['q'] = query
print(payload)
url = ("/").join([self.catalog_api_url, "action", action])
if self.logger:
self.logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
#r = requests.get(url=url, headers = {'content-type': 'application/json'}, params=payload)
#r = requests.post(url=url, headers = {'content-type': 'application/json'}, data=json.dumps(payload))
r = requests.post(url=url, headers = {'content-type': 'application/json'}, json=payload)
# either works:
#result = json.loads(r.text)
result = r.json()
# this is the full package_search result:
#print(r.text)
#if out:
# out.write(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False))
return result
def dataset_query(self, org_id=None, params=None, operator=None, rows=100):
"""
Wrapper function that queries CKAN package_search API endpoint via package_search function and collects results into list
"""
count = 0
dataset_results = []
while True:
package_results = self.package_search(org_id=org_id, params=params, operator=operator, start_index=count, rows=rows)
# obtain the total result count to iterate if necessary:
result_count = package_results['result']['count']
if count == 0:
print("result_count: " + str(result_count))
# here we just append to dataset_results a nested dict with package['id'] and package JSON string
for package in package_results['result']['results']:
count += 1
#print(package)
"""
for resource in package['resources']:
# perform the resource filtering logic:
# this entails parsing out all the query_params that start with 'resource_', then parsing the
# remaining key string (after 'resource_') and using that as the attribute of the CKAN resource
# to filter by (ie 'resource_name' = resource['name'], resource_format = resource['format'], etc)
# NOTE: query parameters are ANDed together:
resource_query_keys = [key for key in self.query_params.keys() if key.startswith("resource_")]
for i, key in enumerate(resource_query_keys):
# this is the step where we filter out by resource['name'], resource['format'] etc, by taking
# the second part of the resource_query_key string after 'resource_' and filtering.
# break from loop if a query parameter check fails:
if resource[key.split("_", 1)[1]] != self.query_params[key]:
break
# if all checks pass, we add this to the resource_results list:
elif len(resource_query_keys) == i + 1:
resource_results.append(resource)
"""
dataset_results.append({
'id': package['id'],
'package': package
})
if count == result_count:
break
return dataset_results
def parse_dataset_results(self, results):
"""
parse the results list, write output to self.out
"""
# handle results (list of dicts):
# [{'id': 'package_id', 'package': 'package_json'},]
datasets = []
for result in results:
#print("id: {id}".format(id=result['id']))
#print("package: {package}".format(package=result['package']))
# for this action, we just want to extract some attributes of the dataset and dump to .csv:
# ['id']: dataset id
# ['name']: used to contstruct a URL
# ['dataset_url']: CKAN catalog URL for the dataset (contstructed from 'name')
# ['title']: the real 'name'
# ['harvest_object_url']: CKAN harvest object URL (stored ISO XML)
# ['waf_location']: URL to the orignal harvested XML file
# ['type']: usually 'dataset', but whatever
# ['num_resources']: number of associated resources
# ['num_tags']: number of associated tags
# ['bbox']: the bounding box JSON (extracted from an 'extra' of the dataset with key='spatial')
# ['resources']['format']: resource format
# ['organization']['title']: the dataset's organization title
parsed_url = urlparse(self.catalog_api_url, allow_fragments=False)
try:
bbox = [extra['value'] for extra in result['package']['extras'] if extra['key'] == "spatial"][0]
except IndexError:
bbox = ""
try:
harvest_object_id = [extra['value'] for extra in result['package']['extras'] if extra['key'] == "harvest_object_id"][0]
harvest_object_url = "{scheme}://{netloc}/harvest/object/{id}".format(scheme=parsed_url.scheme, netloc=parsed_url.netloc, id=harvest_object_id)
except IndexError:
harvest_object_url = ""
try:
waf_location = [extra['value'] for extra in result['package']['extras'] if extra['key'] == "waf_location"][0]
except IndexError:
waf_location = ""
dataset_url = "{scheme}://{netloc}/dataset/{name}".format(scheme=parsed_url.scheme, netloc=parsed_url.netloc, name=result['package']['name'])
# necessary to quote ("") any fields that may have commas or semicolons for CSV output:
if any(x in result['package']['title'] for x in [",",";"]):
title = "\"{title}\"".format(title=result['package']['title'])
else:
title = result['package']['title']
resource_formats = [resource['format'] for resource in result['package']['resources']]
#formats_list = "\"{list}\"".format(list=",".join(resource_formats))
formats_list = "-".join(resource_formats)
organization = result['package']['organization']['title']
datasets.append({
'id': result['package']['id'],
'name': result['package']['name'],
| |
= c_int()
dwf.FDwfAnalogInChannelEnableGet(self.hdwf, c_int(channel), byref(enabled))
return enabled.value
def analog_in_channel_filter_info(self):
filter = c_int()
dwf.FDwfAnalogInChannelFilterInfo(self.hdwf, byref(filter))
return filter
def analog_in_channel_filter_set(self, channel, filter):
dwf.FDwfAnalogInChannelFilterSet(self.hdwf, c_int(channel), filter._value)
def analog_in_channel_filter_get(self, channel):
filter = c_int()
dwf.FDwfAnalogInChannelFilterGet(self.hdwf, c_int(channel), byref(filter))
return AnalogAcquisitionFilter(filter)
def analog_in_channel_range_info(self):
"""
Returns
-------
volts_min : float
volts_max : float
volts_steps : float
"""
volts_min = c_double()
volts_max = c_double()
volts_steps = c_double()
dwf.FDwfAnalogInChannelRangeInfo(self.hdwf, byref(volts_min), byref(volts_max), byref(volts_steps))
return volts_min.value, volts_max.value, volts_steps.value
def analog_in_channel_range_set(self, channel, channel_range):
dwf.FDwfAnalogInChannelRangeSet(self.hdwf, c_int(channel), c_double(channel_range))
def analog_in_channel_range_get(self, channel):
channel_range = c_double()
dwf.FDwfAnalogInChannelRangeGet(self.hdwf, c_int(channel), byref(channel_range))
return channel_range.value
def analog_in_channel_offset_info(self):
volts_min = c_double()
volts_max = c_double()
steps = c_double()
dwf.FDwfAnalogInChannelOffsetInfo(self.hdwf, byref(volts_min), byref(volts_max), byref(steps))
return volts_min.value, volts_max.value, steps.value
def analog_in_channel_offset_set(self, channel, offset):
dwf.FDwfAnalogInChannelOffsetSet(self.hdwf, c_int(channel), c_double(offset))
def analog_in_channel_offset_get(self, channel):
offset = c_double()
dwf.FDwfAnalogInChannelOffsetGet(self.hdwf, c_int(channel), byref(offset))
return offset.value
def analog_in_channel_attenuation_set(self, channel, attenuation):
"""
Configures the attenuation for each channel. When channel index is specified as -1, each enabled AnalogIn
channel attenuation will be configured to the same level. The attenuation does not change the attenuation on the device, just informs the library about the externally applied attenuation.
Parameters
----------
channel : int
attenuation : float
"""
dwf.FDwfAnalogInChannelAttenuationSet(self.hdwf, c_int(channel), c_double(attenuation))
def analog_in_channel_attenuation_get(self, channel):
attenuation = c_double()
dwf.FDwfAnalogInChannelAttenuationGet(self.hdwf, c_int(channel), byref(attenuation))
return attenuation.value
def analog_in_trigger_source_set(self, source):
dwf.FDwfAnalogInTriggerSourceSet(self.hdwf, source._value)
def analog_in_trigger_source_get(self):
source = c_int()
dwf.FDwfAnalogInTriggerSourceGet(self.hdwf, byref(source))
return TriggerSource(source)
def analog_in_trigger_position_info(self):
"""
Returns the minimum and maximum values of the trigger position in seconds.
For Single/Repeated acquisition mode the horizontal trigger position is used is relative to the buffer middle point.
For Record mode the position is relative to the start of the capture.
.. todo:: The documentation specifies steps as double, but it makes more sense for it to be an integer. Other
methods like :meth:`~analog_in_trigger_auto_timeout_info` use an integer
Returns
-------
min_trigger : float
max_trigger : float
steps : float
"""
min_trigger = c_double()
max_trigger = c_double()
steps = c_double()
dwf.FDwfAnalogInTriggerPositionInfo(self.hdwf, byref(min_trigger), byref(max_trigger), byref(steps))
return min_trigger.value, max_trigger.value, steps.value
def analog_in_trigger_position_set(self, position):
dwf.FDwfAnalogInTriggerPositionSet(self.hdwf, c_double(position))
def analog_in_trigger_position_get(self):
position = c_double()
dwf.FDwfAnalogInTriggerPositionGet(self.hdwf, byref(position))
return position.value
def analog_in_trigger_auto_timeout_info(self):
min_timeout = c_double()
max_timeout = c_double()
steps = c_int()
dwf.FDwfAnalogInTriggerAutoTimeoutInfo(self.hdwf, byref(min_timeout), byref(max_timeout), byref(steps))
return min_timeout.value, max_timeout.value, steps.value
def analog_in_trigger_auto_timeout_set(self, timeout=0):
dwf.FDwfAnalogInTriggerAutoTimeoutSet(self.hdwf, c_double(timeout))
def analog_in_trigger_auto_timeout_get(self):
timeout = c_double()
dwf.FDwfAnalogInTriggerAutoTimeoutGet(self.hdwf, byref(timeout))
return timeout.value
def analog_in_trigger_holdoff_info(self):
""" Returns the supported range of the trigger Hold-Off time in Seconds. The trigger hold-off is an
adjustable period of time during which the acquisition will not trigger. This feature is used when you are triggering on burst waveform shapes, so the oscilloscope triggers only on the first eligible trigger point.
Returns
-------
min_holdoff : float
max_holdoff : float
steps : float
"""
min_holdoff = c_double()
max_holdoff = c_double()
steps = c_double()
dwf.FDwfAnalogInTriggerHoldOffInfo(self.hdwf, byref(min_holdoff), byref(max_holdoff), byref(steps))
return min_holdoff.value, max_holdoff.value, steps.value
def analog_in_trigger_holdoff_set(self, holdoff):
dwf.FDwfAnalogInTriggerHoldOffSet(self.hdwf, c_double(holdoff))
def analog_in_trigger_holdoff_get(self):
holdoff = c_double()
dwf.FDwfAnalogInTriggerHoldOffGet(self.hdwf, byref(holdoff))
return holdoff.value
def analog_in_trigger_type_set(self, trig_type):
dwf.FDwfAnalogInTriggerTypeSet(self.hdwf, trig_type._value)
def analog_in_trigger_type_get(self):
trig_type = c_int()
dwf.FDwfAnalogInTriggerTypeGet(self.hdwf, byref(trig_type))
return AnalogInTriggerMode(trig_type)
def analog_in_trigger_channel_info(self):
min_channel = c_int()
max_channel = c_int()
dwf.FDwfAnalogInTriggerChannelInfo(self.hdwf, byref(min_channel), byref(max_channel))
return min_channel.value, max_channel.value
def analog_in_trigger_channel_set(self, channel):
"""Sets the trigger channel."""
dwf.FDwfAnalogInTriggerChannelSet(self.hdwf, c_int(channel))
def analog_in_trigger_filter_info(self):
""" Returns the supported trigger filters. They are returned (by reference) as a bit field which can be
parsed using the IsBitSet Macro. Individual bits are defined using the FILTER constants in DWF.h. Select
trigger detector sample source, FILTER:
- filterDecimate: Looks for trigger in each ADC conversion, can detect glitches.
- filterAverage: Looks for trigger only in average of N samples, given by :meth:`~analog_in_frequency_set`.
"""
filter_info = c_int()
dwf.FDwfAnalogInTriggerFilterInfo(self.hdwf, byref(filter_info))
return filter_info.value
def analog_in_trigger_filter_set(self, trig_filter):
dwf.FDwfAnalogInTriggerFilterSet(self.hdwf, trig_filter._value)
def analog_in_trigger_filter_get(self):
trig_filter = c_int()
dwf.FDwfAnalogInTriggerFilterGet(self.hdwf, byref(trig_filter))
return AnalogAcquisitionFilter(trig_filter)
def analog_in_trigger_channel_get(self):
channel = c_int()
dwf.FDwfAnalogInTriggerChannelGet(self.hdwf, byref(channel))
return channel.value
def analog_in_trigger_condition_info(self):
""" Returns the supported trigger type options for the instrument. They are returned (by reference) as a bit
field. This bit field can be parsed using the IsBitSet Macro. Individual bits are defined using the DwfTriggerSlope constants in dwf.h. These trigger condition options are:
- DwfTriggerSlopeRise (This is the default setting):
- For edge and transition trigger on rising edge.
- For pulse trigger on positive pulse; For window exiting.
- DwfTriggerSlopeFall
- For edge and transition trigger on falling edge.
- For pulse trigger on negative pulse; For window entering.
- DwfTriggerSlopeEither
- For edge and transition trigger on either edge.
- For pulse trigger on either positive or negative pulse.
Returns
-------
info : int
"""
info = c_int()
dwf.FDwfAnalogInTriggerConditionInfo(self.hdwf, byref(info))
return info.value
def analog_in_trigger_condition_set(self, condition):
dwf.FDwfAnalogInTriggerConditionSet(self.hdwf, condition._value)
def analog_in_trigger_condition_get(self):
condition = c_int()
dwf.FDwfAnalogInTriggerConditionSet(self.hdwf, byref(condition))
return TriggerSlope(condition)
def analog_in_trigger_level_info(self):
volts_min = c_double()
volts_max = c_double()
steps = c_int()
dwf.FDwfAnalogInTriggerLevelInfo(self.hdwf, byref(volts_min), byref(volts_max), byref(steps))
return volts_min.value, volts_max.value, steps.value
def analog_in_trigger_level_set(self, level):
dwf.FDwfAnalogInTriggerLevelSet(self.hdwf, c_double(level))
def analog_in_trigger_level_get(self):
level = c_double()
dwf.FDwfAnalogInTriggerLevelGet(self.hdwf, byref(level))
return level.value
def analog_in_trigger_hysteresis_info(self):
""" Retrieves the range of valid trigger hysteresis voltage levels for the AnalogIn instrument in Volts. The
trigger detector uses two levels: low level (TriggerLevel - Hysteresis) and high level (TriggerLevel + Hysteresis). Trigger hysteresis can be used to filter noise for Edge or Pulse trigger. The low and high levels are used in transition time triggering."""
volts_min = c_double()
volts_max = c_double()
steps = c_int()
dwf.FDwfAnalogInTriggerHysteresisInfo(self.hdwf, byref(volts_min), byref(volts_max), byref(steps))
return volts_min.value, volts_max.value, steps.value
def analog_in_trigger_hysteresis_set(self, level):
dwf.FDwfAnalogInTriggerHysteresisSet(self.hdwf, c_double(level))
def analog_in_trigger_hysteresis_get(self):
level = c_double()
dwf.FDwfAnalogInTriggerHysteresisGet(self.hdwf, byref(level))
return level.value
def analog_in_trigger_length_condition_info(self):
"""
Returns the supported trigger length condition options for the AnalogIn instrument. They are returned (by
reference) as a bit field. This bit field can be parsed using the IsBitSet Macro. Individual bits are defined
using the TRIGLEN constants in DWF.h. These trigger length condition options are:
- triglenLess: Trigger immediately when a shorter pulse or transition time is detected.
- triglenTimeout: Trigger immediately as the pulse length or transition time is reached.
- triglenMore: Trigger when the length/time is reached, and pulse or transition has ended.
Returns
-------
supported trigger length conditions
"""
condition = c_int()
dwf.FDwfAnalogInTriggerLengthConditionInfo(self.hdwf, byref(condition))
return condition.value
def analog_in_trigger_length_condition_set(self, length):
dwf.FDwfAnalogInTriggerLengthConditionSet(self.hdwf, length._value)
def analog_in_trigger_length_condition_hysteresis_get(self):
length = c_double()
dwf.FDwfAnalogInTriggerHysteresisGet(self.hdwf, byref(length))
return TriggerLength(length)
def analog_in_trigger_length_info(self):
"""
Returns the supported range of trigger length for the instrument in Seconds. The trigger length specifies the
minimal or maximal pulse length or transition time.
"""
min_length = c_double()
max_length = c_double()
steps = c_double()
dwf.FDwfAnalogInTriggerLengthInfo(self.hdwf, byref(min_length), byref(max_length), byref(steps))
return min_length.value, max_length.value, steps.value
def analog_in_trigger_length_set(self, length):
dwf.FDwfAnalogInTriggerLengthSet(self.hdwf, c_double(length))
def analog_in_trigger_length_condition_get(self):
length = c_double()
dwf.FDwfAnalogInTriggerHysteresisGet(self.hdwf, byref(length))
return length.value
def digital_out_reset(self):
dwf.FDwfDigitalOutReset(self.hdwf)
def digital_out_configure(self, status):
dwf.FDwfDigitalOutConfigure(self.hdwf, c_int(status))
def digital_out_status(self):
status = c_ubyte()
dwf.FDwfDigitalOutStatus(self.hdwf, byref(status))
return InstrumentState(status)
def digital_out_internal_clock_info(self):
frequency = c_double()
dwf.FDwfDigitalOutInternalClockInfo(self.hdwf, byref(frequency))
return frequency.value
def digital_out_trigger_source_set(self, source):
dwf.FDwfDigitalOutTriggerSourceSet(self.hdwf, source._value)
def digital_out_trigger_source_get(self):
source = c_ubyte()
dwf.FDwfDigitalOutTriggerSourceGet(self.hdwf, byref(source))
return TriggerSource(source)
def digital_out_trigger_slope_set(self, slope):
dwf.FDwfDigitalOutTriggerSlopeSet(self.hdwf, slope._value)
def digital_out_trigger_slope_get(self):
slope = c_int()
dwf.FDwfDigitalOutTriggerSlopeGet(self.hdwf, byref(slope))
return TriggerSlope(slope)
def digital_out_run_info(self):
min_run_len = c_double()
max_run_len = c_double()
dwf.FDwfDigitalOutRunInfo(self.hdwf, byref(min_run_len), byref(max_run_len))
return min_run_len.value, max_run_len.value
def digital_out_run_set(self, run_len):
dwf.FDwfDigitalOutRunSet(self.hdwf, c_double(run_len))
def digital_out_run_get(self):
run_len = c_double()
dwf.FDwfDigitalOutRunGet(self.hdwf, byref(run_len))
return run_len.value
def digital_out_run_status(self):
"""Reads the remaining run length. It returns data from the last :meth:`~digital_out_status` call."""
run_len = c_double()
dwf.FDwfDigitalOutRunStatus(self.hdwf, byref(run_len))
return run_len.value
def digital_out_wait_info(self):
""" Returns the supported wait length range in seconds. The wait length is how long the instrument waits
after being triggered to generate the signal. Default value is zero."""
min_wait_length = c_double()
max_wait_length = c_double()
dwf.FDwfDigitalOutWaitInfo(self.hdwf, byref(min_wait_length), byref(max_wait_length))
return min_wait_length.value, max_wait_length.value
def digital_out_wait_set(self, wait):
dwf.FDwfDigitalOutWaitSet(self.hdwf, c_double(wait))
def digital_out_wait_get(self):
wait = c_double()
dwf.FDwfDigitalOutWaitGet(self.hdwf, byref(wait))
return wait.value
def digital_out_repeat_info(self):
min_repeat = c_uint()
max_repeat = c_uint()
dwf.FDwfDigitalOutRepeatInfo(self.hdwf, byref(min_repeat), byref(max_repeat))
return min_repeat.value, max_repeat.value
def digital_out_repeat_set(self, repeat):
dwf.FDwfDigitalOutRepeatSet(self.hdwf, c_uint(repeat))
def digital_out_repeat_get(self):
repeat = c_uint()
dwf.FDwfDigitalOutRepeatGet(self.hdwf, byref(repeat))
return repeat.value
def digital_out_repeat_status(self):
repeat_counts = c_uint()
dwf.FDwfDigitalOutRepeatStatus(self.hdwf, byref(repeat_counts))
return repeat_counts.value
def digital_out_repeat_trigger_set(self, trigger):
"""Sets the repeat trigger option. To include the trigger in wait-run repeat cycles, set fRepeatTrigger to
TRUE. It is disabled by default."""
dwf.FDwfDigitalOutRepeatTriggerSet(self.hdwf, c_int(trigger))
def digital_out_repeat_trigger_get(self):
trigger = c_int()
dwf.FDwfDigitalOutRepeatTriggerGet(self.hdwf, byref(trigger))
return trigger.value
def digital_out_count(self):
"""Returns the number of Digital Out channels by the device specified by hdwf."""
count = c_int()
dwf.FDwfDigitalOutCount(self.hdwf, byref(count))
| |
Period" (2006, Bulletin of the
Seismological Society of America, Volume 96, No. 3, pages
898-913). This class implements the equations for 'Subduction
Interface' (that's why the class name ends with 'SInter'). This
class extends the
:class:`openquake.hazardlib.gsim.zhao_2006.ZhaoEtAl2006Asc`
because the equation for subduction interface is obtained from the
equation for active shallow crust, by removing the faulting style
term and adding a subduction interface term.
"""
#: Supported tectonic region type is subduction interface, this means
#: that factors FR, SS and SSL are assumed 0 in equation 1, p. 901.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTERFACE
#: Required rupture parameters are magnitude and focal depth.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'hypo_depth'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_ASC[imt]
C_SINTER = self.COEFFS_SINTER[imt]
# mean value as given by equation 1, p. 901, without considering the
# faulting style and intraslab terms (that is FR, SS, SSL = 0) and the
# inter and intra event terms, plus the magnitude-squared term
# correction factor (equation 5 p. 909)
mean = self._compute_magnitude_term(C, rup.mag) +\
self._compute_distance_term(C, rup.mag, dists.rrup) +\
self._compute_focal_depth_term(C, rup.hypo_depth) +\
self._compute_site_class_term(C, sites.vs30) + \
self._compute_magnitude_squared_term(P=0.0, M=6.3,
Q=C_SINTER['QI'],
W=C_SINTER['WI'],
mag=rup.mag) +\
C_SINTER['SI']
# convert from cm/s**2 to g
mean = np.log(np.exp(mean) * 1e-2 / g)
stddevs = self._get_stddevs(C['sigma'], C_SINTER['tauI'], stddev_types,
num_sites=len(sites.vs30))
return mean, stddevs
#: Coefficient table containing subduction interface coefficients,
#: taken from table 4, p. 903 (only column SI), and table 6, p. 907
#: (only columns QI, WI, TauI)
COEFFS_SINTER = CoeffsTable(sa_damping=5, table="""\
IMT SI QI WI tauI
pga 0.000 0.0 0.0 0.308
0.05 0.000 0.0 0.0 0.343
0.10 0.000 0.0 0.0 0.403
0.15 0.000 -0.0138 0.0286 0.367
0.20 0.000 -0.0256 0.0352 0.328
0.25 0.000 -0.0348 0.0403 0.289
0.30 0.000 -0.0423 0.0445 0.280
0.40 -0.041 -0.0541 0.0511 0.271
0.50 -0.053 -0.0632 0.0562 0.277
0.60 -0.103 -0.0707 0.0604 0.296
0.70 -0.146 -0.0771 0.0639 0.313
0.80 -0.164 -0.0825 0.0670 0.329
0.90 -0.206 -0.0874 0.0697 0.324
1.00 -0.239 -0.0917 0.0721 0.328
1.25 -0.256 -0.1009 0.0772 0.339
1.50 -0.306 -0.1083 0.0814 0.352
2.00 -0.321 -0.1202 0.0880 0.360
2.50 -0.337 -0.1293 0.0931 0.356
3.00 -0.331 -0.1368 0.0972 0.338
4.00 -0.390 -0.1486 0.1038 0.307
5.00 -0.498 -0.1578 0.1090 0.272
""")
class ZhaoEtAl2006SSlab(ZhaoEtAl2006Asc):
"""
Implements GMPE developed by <NAME> et al and published as
"Attenuation Relations of Strong Ground Motion in Japan Using Site
Classification Based on Predominant Period" (2006, Bulletin of the
Seismological Society of America, Volume 96, No. 3, pages
898-913). This class implements the equations for 'Subduction
Slab'. (that's why the class name ends with 'SSlab'). This class
extends the
:class:`openquake.hazardlib.gsim.zhao_2006.ZhaoEtAl2006Asc`
because the equation for subduction slab is obtained from the
equation for active shallow crust, by removing the faulting style
term and adding subduction slab terms.
"""
#: Supported tectonic region type is subduction interface, this means
#: that factors FR, SS and SSL are assumed 0 in equation 1, p. 901.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.SUBDUCTION_INTRASLAB
#: Required rupture parameters are magnitude and focal depth.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'hypo_depth'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_ASC[imt]
C_SSLAB = self.COEFFS_SSLAB[imt]
# to avoid singularity at 0.0 (in the calculation of the
# slab correction term), replace 0 values with 0.1
d = dists.rrup
d[d == 0.0] = 0.1
# mean value as given by equation 1, p. 901, without considering the
# faulting style and intraslab terms (that is FR, SS, SSL = 0) and the
# inter and intra event terms, plus the magnitude-squared term
# correction factor (equation 5 p. 909)
mean = self._compute_magnitude_term(C, rup.mag) +\
self._compute_distance_term(C, rup.mag, d) +\
self._compute_focal_depth_term(C, rup.hypo_depth) +\
self._compute_site_class_term(C, sites.vs30) +\
self._compute_magnitude_squared_term(P=C_SSLAB['PS'], M=6.5,
Q=C_SSLAB['QS'],
W=C_SSLAB['WS'],
mag=rup.mag) +\
C_SSLAB['SS'] + self._compute_slab_correction_term(C_SSLAB, d)
# convert from cm/s**2 to g
mean = np.log(np.exp(mean) * 1e-2 / g)
stddevs = self._get_stddevs(C['sigma'], C_SSLAB['tauS'], stddev_types,
num_sites=len(sites.vs30))
return mean, stddevs
def _compute_slab_correction_term(self, C, rrup):
"""
Compute path modification term for slab events, that is
the 8-th term in equation 1, p. 901.
"""
slab_term = C['SSL'] * np.log(rrup)
return slab_term
#: Coefficient table containing subduction slab coefficients taken from
#: table 4, p. 903 (only columns for SS and SSL), and table 6, p. 907
#: (only columns for PS, QS, WS, TauS)
COEFFS_SSLAB = CoeffsTable(sa_damping=5, table="""\
IMT SS SSL PS QS WS tauS
pga 2.607 -0.528 0.1392 0.1584 -0.0529 0.321
0.05 2.764 -0.551 0.1636 0.1932 -0.0841 0.378
0.10 2.156 -0.420 0.1690 0.2057 -0.0877 0.420
0.15 2.161 -0.431 0.1669 0.1984 -0.0773 0.372
0.20 1.901 -0.372 0.1631 0.1856 -0.0644 0.324
0.25 1.814 -0.360 0.1588 0.1714 -0.0515 0.294
0.30 2.181 -0.450 0.1544 0.1573 -0.0395 0.284
0.40 2.432 -0.506 0.1460 0.1309 -0.0183 0.278
0.50 2.629 -0.554 0.1381 0.1078 -0.0008 0.272
0.60 2.702 -0.575 0.1307 0.0878 0.0136 0.285
0.70 2.654 -0.572 0.1239 0.0705 0.0254 0.290
0.80 2.480 -0.540 0.1176 0.0556 0.0352 0.299
0.90 2.332 -0.522 0.1116 0.0426 0.0432 0.289
1.00 2.233 -0.509 0.1060 0.0314 0.0498 0.286
1.25 2.029 -0.469 0.0933 0.0093 0.0612 0.277
1.50 1.589 -0.379 0.0821 -0.0062 0.0674 0.282
2.00 0.966 -0.248 0.0628 -0.0235 0.0692 0.300
2.50 0.789 -0.221 0.0465 -0.0287 0.0622 0.292
3.00 1.037 -0.263 0.0322 -0.0261 0.0496 0.274
4.00 0.561 -0.169 0.0083 -0.0065 0.0150 0.281
5.00 0.225 -0.120 -0.0117 0.0246 -0.0268 0.296
""")
class ZhaoEtAl2006SInterNSHMP2008(ZhaoEtAl2006SInter):
"""
Extend :class:`ZhaoEtAl2006SInter` and fix hypocentral depth at 20 km
as defined the by National Seismic Hazard Mapping Project for the 2008 US
hazard model.
The calculation of the total standard deviation is done considering the
inter-event standard deviation as defined in table 5, page 903 of Zhao's
paper.
The class implement the equation as coded in ``subroutine zhao`` in
``hazSUBXnga.f`` Fotran code available at:
http://earthquake.usgs.gov/hazards/products/conterminous/2008/software/
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
Call super class method with hypocentral depth fixed at 20 km
"""
# create new rupture context to avoid changing the original one
new_rup = copy.deepcopy(rup)
new_rup.hypo_depth = 20.
mean, stddevs = super(ZhaoEtAl2006SInterNSHMP2008, self). \
get_mean_and_stddevs(sites, new_rup, dists, imt, stddev_types)
return mean, stddevs
COEFFS_SINTER = CoeffsTable(sa_damping=5, table="""\
IMT SI QI WI tauI
pga 0.000 0.0 0.0 0.3976
0.05 0.000 0.0 0.0 0.4437
0.10 0.000 0.0 0.0 0.4903
0.15 0.000 -0.0138 0.0286 0.4603
0.20 0.000 -0.0256 0.0352 0.4233
0.25 0.000 -0.0348 0.0403 0.3908
0.30 0.000 -0.0423 0.0445 0.3790
0.40 -0.041 -0.0541 0.0511 0.3897
0.50 -0.053 -0.0632 0.0562 0.3890
0.60 -0.103 -0.0707 0.0604 0.4014
0.70 -0.146 -0.0771 0.0639 0.4079
0.80 -0.164 -0.0825 0.0670 0.4183
0.90 -0.206 -0.0874 0.0697 0.4106
1.00 -0.239 -0.0917 0.0721 0.4101
1.25 -0.256 -0.1009 0.0772 0.4021
1.50 -0.306 -0.1083 0.0814 0.4076
2.00 -0.321 -0.1202 0.0880 0.4138
2.50 -0.337 -0.1293 0.0931 0.4108
3.00 -0.331 -0.1368 0.0972 0.3961
4.00 -0.390 -0.1486 0.1038 0.3821
5.00 -0.498 -0.1578 0.1090 0.3766
""")
class ZhaoEtAl2006SSlabNSHMP2014(ZhaoEtAl2006SSlab):
"""
For the 2014 US National Seismic Hazard Maps the magnitude of Zhao et al.
(2006) for the subduction inslab events is capped at magnitude Mw 7.8
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_ASC[imt]
C_SSLAB = self.COEFFS_SSLAB[imt]
# to avoid singularity at 0.0 (in the calculation of the
# slab correction term), replace 0 values with 0.1
d = dists.rrup
d[d == 0.0] = 0.1
if rup.mag > 7.8:
rup_mag = 7.8
else:
rup_mag = rup.mag
# mean value as given by equation 1, p. 901, without considering the
# faulting style and intraslab terms (that is FR, SS, SSL = 0) and the
# inter and intra event terms, plus the magnitude-squared term
# correction factor (equation 5 | |
<filename>third_party/blink/renderer/bindings/scripts/web_idl/idl_type.py
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
from blinkbuild.name_style_converter import NameStyleConverter
from .composition_parts import WithDebugInfo
from .composition_parts import WithExtendedAttributes
from .composition_parts import WithIdentifier
from .extended_attribute import ExtendedAttributes
from .reference import RefById
from .reference import RefByIdFactory
from .typedef import Typedef
from .user_defined_type import UserDefinedType
# The implementation class hierarchy of IdlType
#
# IdlType
# + SimpleType
# + ReferenceType
# + DefinitionType
# + TypedefType
# + _ArrayLikeType
# | + SequenceType
# | + FrozenArrayType
# | + ObservableArrayType
# | + VariadicType
# + RecordType
# + PromiseType
# + UnionType
# + NullableType
_IDL_TYPE_PASS_KEY = object()
class IdlTypeFactory(object):
"""
Creates a group of instances of IdlType, over which you can iterate later.
There are two phases; instantiation phase and iteration phase. The factory
is initially in the instantiation phase and you can create instances of
IdlType. Once it enters to the iteration phase (through the first attempt
to iterate), you can no longer create a new instance.
"""
def __init__(self):
self._idl_types = []
# Factory to initialize instances of ReferenceType.
self._ref_by_id_factory = RefByIdFactory(
target_attrs_with_priority=RefById.get_all_attributes(IdlType))
# |_is_frozen| is initially False and you can create new instances of
# IdlType. The first invocation of |for_each| freezes the factory and
# you can no longer create a new instance of IdlType.
self._is_frozen = False
def for_each(self, callback):
"""
Applies |callback| to all the instances of IdlType created by this
factory.
Instantiation of IdlType is no longer possible.
Args:
callback: A callable that takes an IdlType as only the argument.
Return value is not used.
"""
assert callable(callback)
self._is_frozen = True
for idl_type in self._idl_types:
callback(idl_type)
def for_each_reference(self, callback):
"""
Applies |callback| to all the instances of IdlType that is referencing
to another IdlType.
Instantiation of referencing IdlType is no longer possible, but it's
still possible to instantiate other IdlTypes.
Args:
callback: A callable that takes an IdlType as only the argument.
Return value is not used.
"""
self._ref_by_id_factory.for_each(callback)
def simple_type(self, *args, **kwargs):
return self._create(SimpleType, args, kwargs)
def reference_type(self, *args, **kwargs):
assert 'ref_by_id_factory' not in kwargs
kwargs['ref_by_id_factory'] = self._ref_by_id_factory
return self._create(ReferenceType, args, kwargs)
def definition_type(self, *args, **kwargs):
return self._create(DefinitionType, args, kwargs)
def typedef_type(self, *args, **kwargs):
return self._create(TypedefType, args, kwargs)
def sequence_type(self, *args, **kwargs):
return self._create(SequenceType, args, kwargs)
def frozen_array_type(self, *args, **kwargs):
return self._create(FrozenArrayType, args, kwargs)
def observable_array_type(self, *args, **kwargs):
return self._create(ObservableArrayType, args, kwargs)
def variadic_type(self, *args, **kwargs):
return self._create(VariadicType, args, kwargs)
def record_type(self, *args, **kwargs):
return self._create(RecordType, args, kwargs)
def promise_type(self, *args, **kwargs):
return self._create(PromiseType, args, kwargs)
def union_type(self, *args, **kwargs):
return self._create(UnionType, args, kwargs)
def nullable_type(self, *args, **kwargs):
return self._create(NullableType, args, kwargs)
def _create(self, idl_type_concrete_class, args, kwargs):
assert not self._is_frozen
idl_type = idl_type_concrete_class(
*args, pass_key=_IDL_TYPE_PASS_KEY, **kwargs)
self._idl_types.append(idl_type)
return idl_type
class IdlType(WithExtendedAttributes, WithDebugInfo):
"""
Represents a 'type' in Web IDL.
IdlType is an interface of types in Web IDL, and also provides all the
information that is necessary for type conversions. For example, given the
conversion rules of ECMAScript bindings, you can produce a type converter
between Blink types and V8 types with using an IdlType.
Note that IdlType is designed to _not_ include knowledge about a certain
language bindings (such as ECMAScript bindings), thus it's out of scope for
IdlType to tell whether IDL dictionary type accepts ES null value or not.
Nullable type and typedef are implemented as if they're a container type
like record type and promise type.
"""
class Optionality(object):
"""https://webidl.spec.whatwg.org/#dfn-optionality-value"""
class Type(str):
pass
REQUIRED = Type('required')
OPTIONAL = Type('optional')
VARIADIC = Type('variadic')
def __init__(self,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(is_optional, bool)
assert pass_key is _IDL_TYPE_PASS_KEY
WithExtendedAttributes.__init__(
self, extended_attributes, readonly=True)
WithDebugInfo.__init__(self, debug_info)
self._is_optional = is_optional
def __eq__(self, other):
"""Returns True if |self| and |other| represent the equivalent type."""
return (self.__class__ == other.__class__
and ExtendedAttributes.equals(self.extended_attributes,
other.extended_attributes)
and self.is_optional == other.is_optional)
def __ne__(self, other):
return not self == other
def __hash__(self):
raise NotImplementedError()
def make_copy(self, memo):
return self
@property
def syntactic_form(self):
"""
Returns a text representation of the type in the form of Web IDL syntax.
"""
raise NotImplementedError()
@property
def type_name(self):
"""
Returns the type name.
https://webidl.spec.whatwg.org/#dfn-type-name
Note that a type name is not necessarily unique.
"""
return '{}{}'.format(
self.type_name_without_extended_attributes, ''.join(
sorted(self.effective_annotations.keys())))
@property
def type_name_with_extended_attribute_key_values(self):
name_pieces = []
name_pieces.append(self.type_name_without_extended_attributes)
annotations = self.effective_annotations
for key in sorted(annotations.keys()):
name_pieces.append(key)
name_pieces.extend(annotations.values_of(key))
return ''.join(name_pieces)
@property
def type_name_without_extended_attributes(self):
raise NotImplementedError()
@property
def keyword_typename(self):
"""
Returns the keyword name of the type if this is a simple built-in type,
e.g. "any", "boolean", "unsigned long long", "void", etc. Otherwise,
returns None.
"""
return None
def apply_to_all_composing_elements(self, callback):
"""
Applies |callback| to all instances of IdlType of which this IdlType
consists, including |self|.
In case of x.apply_to_all_composing_elements(callback), |callback| will
be recursively called back on x, x.inner_type, x.element_type,
x.result_type.original_type, etc. if any.
If |callback| raises a StopIteration, then this function stops
traversing deeper than this type (inner type, etc.), however, siblings
are still traversed. E.g. For record<K, V>, raising a StopIteration at
K doesn't prevent from traversing V.
"""
try:
callback(self)
except StopIteration:
return
def unwrap(self, nullable=None, typedef=None, variadic=None):
"""
Returns the body part of the actual type, i.e. returns the interesting
part of this type.
Args:
nullable:
typedef:
variadic:
All these arguments take tri-state value: True, False, or None.
True unwraps that type, False stops unwrapping that type. All
of specified arguments' values must be consistent, and mixture
of True and False is not allowed. Unspecified arguments are
automatically set to the opposite value. If no argument is
specified, unwraps all types.
"""
switches = {
'nullable': nullable,
'typedef': typedef,
'variadic': variadic,
}
value_counts = {None: 0, False: 0, True: 0}
for value in switches.values():
assert value is None or isinstance(value, bool)
value_counts[value] += 1
assert value_counts[False] == 0 or value_counts[True] == 0, (
"Specify only True or False arguments. Unspecified arguments are "
"automatically set to the opposite value.")
default = value_counts[True] == 0
for arg, value in switches.items():
if value is None:
switches[arg] = default
return self._unwrap(switches)
@property
def effective_annotations(self):
"""
Returns the extended attributes associated with this IDL type.
https://webidl.spec.whatwg.org/#idl-type-extended-attribute-associated-with
For example, given the following IDL fragments,
typedef [ExtAttr1] long NewLong;
void f([ExtAttr2] NewLong arg);
arg.idl_type.extended_attributes returns [ExtAttr2],
arg.idl_type.unwrap().extended_attributes returns [ExtAttr1], and
arg.idl_type.effective_annotations returns [ExtAttr1, ExtAttr2].
"""
return self.extended_attributes
@property
def does_include_nullable_type(self):
"""
Returns True if this type includes a nulllable type.
https://webidl.spec.whatwg.org/#dfn-includes-a-nullable-type
"""
return False
@property
def does_include_nullable_or_dict(self):
"""
Returns True if this type includes a nullable type or a dictionary type.
IdlType's own definition of "includes a dictionary type" just follows
the definition of "includes a nullable type".
"""
return False
@property
def is_numeric(self):
"""
Returns True if this is an integer type or floating point number type.
"""
return False
@property
def is_integer(self):
"""Returns True if this is an integer type."""
return False
@property
def is_floating_point_numeric(self):
"""Returns True if this is a floating point numeric type."""
return False
@property
def is_boolean(self):
"""Returns True if this is boolean."""
return False
@property
def is_string(self):
"""
Returns True if this is one of DOMString, ByteString, or USVString.
"""
return False
@property
def is_buffer_source_type(self):
"""Returns True if this is a buffer source type."""
return False
@property
def is_array_buffer(self):
"""Returns True if this is ArrayBuffer."""
return False
@property
def is_array_buffer_view(self):
"""Returns True if this is ArrayBufferView."""
return False
@property
def is_data_view(self):
"""Returns True if this is DataView."""
return False
@property
def is_typed_array_type(self):
"""Returns True if this is a typed array type."""
return False
@property
def is_object(self):
"""
Returns True if this is exactly type 'object'.
Note that this method doesn't return True for an interface or dictionary
type, or type 'any'.
"""
return False
@property
def is_symbol(self):
"""Returns True if this is type 'symbol'."""
return False
@property
def is_any(self):
"""Returns True if this is type 'any'."""
return False
@property
def is_void(self):
"""Returns True if this is type 'void'."""
return False
@property
def is_interface(self):
"""Returns True if this is an interface type."""
return False
@property
def is_dictionary(self):
"""Returns True if this is | |
# -*- coding: utf-8 -*-
"""
Tests of the neo.core.irregularlysampledsignal.IrregularySampledSignal class
"""
import unittest
import os
import pickle
import warnings
from copy import deepcopy
import numpy as np
import quantities as pq
from numpy.testing import assert_array_equal
from neo.core.dataobject import ArrayDict
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
from neo.core import Segment, ChannelIndex
from neo.core.baseneo import MergeError
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant, assert_same_sub_schema,
assert_same_attributes, assert_same_annotations,
assert_same_array_annotations)
from neo.test.generate_datasets import (get_fake_value, get_fake_values, fake_neo,
TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict(
[(str(x), TEST_ANNOTATIONS[x]) for x in range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
times = get_fake_value('times', pq.Quantity, seed=0, dim=1)
signal = get_fake_value('signal', pq.Quantity, seed=1, dim=2)
name = get_fake_value('name', str, seed=2, obj=IrregularlySampledSignal)
description = get_fake_value('description', str, seed=3, obj='IrregularlySampledSignal')
file_origin = get_fake_value('file_origin', str)
arr_ann = get_fake_value('array_annotations', dict, seed=5,
obj=IrregularlySampledSignal, n=1)
attrs1 = {'name': name, 'description': description, 'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
attrs2['array_annotations'] = arr_ann
res11 = get_fake_values(IrregularlySampledSignal, annotate=False, seed=0)
res12 = get_fake_values('IrregularlySampledSignal', annotate=False, seed=0)
res21 = get_fake_values(IrregularlySampledSignal, annotate=True, seed=0)
res22 = get_fake_values('IrregularlySampledSignal', annotate=True, seed=0)
assert_array_equal(res11.pop('times'), times)
assert_array_equal(res12.pop('times'), times)
assert_array_equal(res21.pop('times'), times)
assert_array_equal(res22.pop('times'), times)
assert_array_equal(res11.pop('signal'), signal)
assert_array_equal(res12.pop('signal'), signal)
assert_array_equal(res21.pop('signal'), signal)
assert_array_equal(res22.pop('signal'), signal)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
# Array annotations need to be compared separately
# because numpy arrays define equality differently
arr_ann_res21 = res21.pop('array_annotations')
arr_ann_attrs2 = attrs2.pop('array_annotations')
self.assertEqual(res21, attrs2)
assert_arrays_equal(arr_ann_res21['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res21['number'], arr_ann_attrs2['number'])
arr_ann_res22 = res22.pop('array_annotations')
self.assertEqual(res22, attrs2)
assert_arrays_equal(arr_ann_res22['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res22['number'], arr_ann_attrs2['number'])
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = IrregularlySampledSignal
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'IrregularlySampledSignal'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestIrregularlySampledSignalConstruction(unittest.TestCase):
def test_IrregularlySampledSignal_creation_times_units_signal_units(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.ms, signal=[20., 40., 60.] * pq.mV,
name='test', description='tester', file_origin='test.file',
test1=1, array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_arg(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7], signal=[20., 40., 60.], units=pq.V,
time_units=pq.s, name='test', description='tester',
file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.s)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.V)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_rescale(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
units=pq.mV, time_units=pq.ms, name='test',
description='tester', file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1100, 1500, 1700] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([2000., 4000., 6000.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_different_lens_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60., 70.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_signal_units_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60.]
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_time_units_ValueError(self):
times = [1.1, 1.5, 1.7]
signal = [20., 40., 60.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
class TestIrregularlySampledSignalProperties(unittest.TestCase):
def setUp(self):
self.times = [np.arange(10.0) * pq.s, np.arange(-100.0, 100.0, 10.0) * pq.ms,
np.arange(100) * pq.ns]
self.data = [np.arange(10.0) * pq.nA, np.arange(-100.0, 100.0, 10.0) * pq.mV,
np.random.uniform(size=100) * pq.uV]
self.signals = [IrregularlySampledSignal(t, signal=D, testattr='test') for D, t in
zip(self.data, self.times)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_start_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_start, times[0], delta=1e-15)
def test__t_stop_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_stop, times[-1], delta=1e-15)
def test__duration_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.duration, times[-1] - times[0], delta=1e-15)
def test__sampling_intervals_getter(self):
for signal, times in zip(self.signals, self.times):
assert_arrays_almost_equal(signal.sampling_intervals, np.diff(times), threshold=1e-15)
def test_IrregularlySampledSignal_repr(self):
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
name='test', description='tester', file_origin='test.file',
test1=1)
assert_neo_object_is_compliant(sig)
if np.__version__.split(".")[:2] > ['1', '13']:
# see https://github.com/numpy/numpy/blob/master/doc/release/1.14.0-notes.rst#many
# -changes-to-array-printing-disableable-with-the-new-legacy-printing-mode
targ = (
'<IrregularlySampledSignal(array([[2.],\n [4.],\n [6.]]) * V '
'' + 'at times [1.1 1.5 1.7] s)>')
else:
targ = (
'<IrregularlySampledSignal(array([[ 2.],\n [ 4.],\n [ 6.]]) '
'* V ' + 'at times [ 1.1 1.5 1.7] s)>')
res = repr(sig)
self.assertEqual(targ, res)
class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
self.signal1.segment = Segment()
self.signal1.channel_index = ChannelIndex([0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__slice_should_return_IrregularlySampledSignal(self):
result = self.signal1[3:8]
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.size, 5)
self.assertEqual(result.t_start, self.time1quant[3])
self.assertEqual(result.t_stop, self.time1quant[7])
assert_array_equal(self.time1quant[3:8], result.times)
assert_array_equal(self.data1[3:8].reshape(-1, 1), result.magnitude)
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(result.file_origin, self.signal1.file_origin)
self.assertEqual(result.name, self.signal1.name)
self.assertEqual(result.description, self.signal1.description)
self.assertEqual(result.annotations, self.signal1.annotations)
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__getitem_should_return_single_quantity(self):
self.assertEqual(self.signal1[0], 0 * pq.mV)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test__getitem_out_of_bounds_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test_comparison_operators(self):
assert_array_equal(self.signal1 >= 5 * pq.mV, np.array(
[[False, False, False, False, False, True, True, True, True, True]]).T)
assert_array_equal(self.signal1 == 5 * pq.mV, np.array(
[[False, False, False, False, False, True, False, False, False, False]]).T)
assert_array_equal(self.signal1 == self.signal1, np.array(
[[True, True, True, True, True, True, True, True, True, True]]).T)
def test__comparison_as_indexing_single_trace(self):
self.assertEqual(self.signal1[self.signal1 == 5], [5 * pq.mV])
def test__comparison_as_indexing_multi_trace(self):
signal = IrregularlySampledSignal(self.time1quant, np.arange(20).reshape((-1, 2)) * pq.V)
assert_array_equal(signal[signal < 10],
np.array([[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]).T * pq.V)
def test__indexing_keeps_order_across_channels(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting one entry per trace
mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]]) * pq.V)
def test__indexing_keeps_order_across_time(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting two entries per trace
temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1]
mask[temporal_ids, list(range(10)) + list(range(10))] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 13, 4, 15, 26, 27, 18, 19],
[40, 31, 22, 33, 14, 25, 46, 37, 28,
49]]) * pq.V)
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5 * pq.nA)
def test_simple_statistics(self):
targmean = self.signal1[:-1] * np.diff(self.time1quant).reshape(-1, 1)
targmean = targmean.sum() / (self.time1quant[-1] - self.time1quant[0])
self.assertEqual(self.signal1.max(), 9 * pq.mV)
self.assertEqual(self.signal1.min(), 0 * pq.mV)
self.assertEqual(self.signal1.mean(), targmean)
def test_mean_interpolation_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.mean, True)
def test_resample_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.resample, True)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
assert_same_sub_schema(result, self.signal1)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.uV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.uV)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1) * 1000., 1e-10)
assert_array_equal(result.times, self.time1quant)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.nA)
def test_time_slice(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_deepcopy_annotations(self):
params1 = {'test0': 'y1', 'test1': ['deeptest'], 'test2': True}
self.signal1.annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
| |
the nodes weren't removed
_, nodes = c.catalog.nodes()
nodes.remove(current)
assert [x['Node'] for x in nodes] == ['n1', 'n2']
# check n2's s1 service was removed though
_, nodes = c.catalog.service('s1')
assert set([x['Node'] for x in nodes]) == set(['n1'])
# cleanup
assert c.catalog.deregister('n1') is True
assert c.catalog.deregister('n2') is True
_, nodes = c.catalog.nodes()
nodes.remove(current)
assert [x['Node'] for x in nodes] == []
def test_health_service(self, consul_port):
c = consul.Consul(port=consul_port)
# check there are no nodes for the service 'foo'
index, nodes = c.health.service('foo')
assert nodes == []
# register two nodes, one with a long ttl, the other shorter
c.agent.service.register(
'foo',
service_id='foo:1',
check=Check.ttl('10s'),
tags=['tag:foo:1'])
c.agent.service.register(
'foo', service_id='foo:2', check=Check.ttl('100ms'))
time.sleep(40/1000.0)
# check the nodes show for the /health/service endpoint
index, nodes = c.health.service('foo')
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# but that they aren't passing their health check
index, nodes = c.health.service('foo', passing=True)
assert nodes == []
# ping the two node's health check
c.agent.check.ttl_pass('service:foo:1')
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# both nodes are now available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# wait until the short ttl node fails
time.sleep(120/1000.0)
# only one node available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1']
# ping the failed node's health check
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# check both nodes are available
index, nodes = c.health.service('foo', passing=True)
assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2']
# check that tag works
index, nodes = c.health.service('foo', tag='tag:foo:1')
assert [node['Service']['ID'] for node in nodes] == ['foo:1']
# deregister the nodes
c.agent.service.deregister('foo:1')
c.agent.service.deregister('foo:2')
time.sleep(40/1000.0)
index, nodes = c.health.service('foo')
assert nodes == []
def test_health_state(self, consul_port):
c = consul.Consul(port=consul_port)
# The empty string is for the Serf Health Status check, which has an
# empty ServiceID
index, nodes = c.health.state('any')
assert [node['ServiceID'] for node in nodes] == ['']
# register two nodes, one with a long ttl, the other shorter
c.agent.service.register(
'foo', service_id='foo:1', check=Check.ttl('10s'))
c.agent.service.register(
'foo', service_id='foo:2', check=Check.ttl('100ms'))
time.sleep(40/1000.0)
# check the nodes show for the /health/state/any endpoint
index, nodes = c.health.state('any')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# but that they aren't passing their health check
index, nodes = c.health.state('passing')
assert [node['ServiceID'] for node in nodes] != 'foo'
# ping the two node's health check
c.agent.check.ttl_pass('service:foo:1')
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# both nodes are now available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# wait until the short ttl node fails
time.sleep(2200/1000.0)
# only one node available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1'])
# ping the failed node's health check
c.agent.check.ttl_pass('service:foo:2')
time.sleep(40/1000.0)
# check both nodes are available
index, nodes = c.health.state('passing')
assert set([node['ServiceID'] for node in nodes]) == set(
['', 'foo:1', 'foo:2'])
# deregister the nodes
c.agent.service.deregister('foo:1')
c.agent.service.deregister('foo:2')
time.sleep(40/1000.0)
index, nodes = c.health.state('any')
assert [node['ServiceID'] for node in nodes] == ['']
def test_health_node(self, consul_port):
c = consul.Consul(port=consul_port)
# grab local node name
node = c.agent.self()['Config']['NodeName']
index, checks = c.health.node(node)
assert node in [check["Node"] for check in checks]
def test_health_checks(self, consul_port):
c = consul.Consul(port=consul_port)
c.agent.service.register(
'foobar', service_id='foobar', check=Check.ttl('10s'))
time.sleep(40/1000.00)
index, checks = c.health.checks('foobar')
assert [check['ServiceID'] for check in checks] == ['foobar']
assert [check['CheckID'] for check in checks] == ['service:foobar']
c.agent.service.deregister('foobar')
time.sleep(40/1000.0)
index, checks = c.health.checks('foobar')
assert len(checks) == 0
def test_session(self, consul_port):
c = consul.Consul(port=consul_port)
# session.create
pytest.raises(consul.ConsulException, c.session.create, node='n2')
pytest.raises(consul.ConsulException, c.session.create, dc='dc2')
session_id = c.session.create('my-session')
# session.list
pytest.raises(consul.ConsulException, c.session.list, dc='dc2')
_, sessions = c.session.list()
assert [x['Name'] for x in sessions] == ['my-session']
# session.info
pytest.raises(
consul.ConsulException, c.session.info, session_id, dc='dc2')
index, session = c.session.info('1'*36)
assert session is None
index, session = c.session.info(session_id)
assert session['Name'] == 'my-session'
# session.node
node = session['Node']
pytest.raises(
consul.ConsulException, c.session.node, node, dc='dc2')
_, sessions = c.session.node(node)
assert [x['Name'] for x in sessions] == ['my-session']
# session.destroy
pytest.raises(
consul.ConsulException, c.session.destroy, session_id, dc='dc2')
assert c.session.destroy(session_id) is True
_, sessions = c.session.list()
assert sessions == []
def test_session_delete_ttl_renew(self, consul_port):
c = consul.Consul(port=consul_port)
s = c.session.create(behavior='delete', ttl=20)
# attempt to renew an unknown session
pytest.raises(consul.NotFound, c.session.renew, '1'*36)
session = c.session.renew(s)
assert session['Behavior'] == 'delete'
assert session['TTL'] == '20s'
# trying out the behavior
assert c.kv.put('foo', '1', acquire=s) is True
index, data = c.kv.get('foo')
assert data['Value'] == six.b('1')
c.session.destroy(s)
index, data = c.kv.get('foo')
assert data is None
def test_acl_disabled(self, consul_port):
c = consul.Consul(port=consul_port)
pytest.raises(consul.ACLDisabled, c.acl.list)
pytest.raises(consul.ACLDisabled, c.acl.info, '1'*36)
pytest.raises(consul.ACLDisabled, c.acl.create)
pytest.raises(consul.ACLDisabled, c.acl.update, 'foo')
pytest.raises(consul.ACLDisabled, c.acl.clone, 'foo')
pytest.raises(consul.ACLDisabled, c.acl.destroy, 'foo')
def test_acl_permission_denied(self, acl_consul):
c = consul.Consul(port=acl_consul.port)
pytest.raises(consul.ACLPermissionDenied, c.acl.list)
pytest.raises(consul.ACLPermissionDenied, c.acl.create)
pytest.raises(consul.ACLPermissionDenied, c.acl.update, 'anonymous')
pytest.raises(consul.ACLPermissionDenied, c.acl.clone, 'anonymous')
pytest.raises(consul.ACLPermissionDenied, c.acl.destroy, 'anonymous')
def test_acl_explict_token_use(self, acl_consul):
c = consul.Consul(port=acl_consul.port)
master_token = acl_consul.token
acls = c.acl.list(token=master_token)
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
assert c.acl.info('1'*36) is None
compare = [c.acl.info(master_token), c.acl.info('anonymous')]
compare.sort(key=operator.itemgetter('ID'))
assert acls == compare
rules = """
key "" {
policy = "read"
}
key "private/" {
policy = "deny"
}
service "foo-" {
policy = "write"
}
service "bar-" {
policy = "read"
}
"""
token = c.acl.create(rules=rules, token=master_token)
assert c.acl.info(token)['Rules'] == rules
token2 = c.acl.clone(token, token=master_token)
assert c.acl.info(token2)['Rules'] == rules
assert c.acl.update(token2, name='Foo', token=master_token) == token2
assert c.acl.info(token2)['Name'] == 'Foo'
assert c.acl.destroy(token2, token=master_token) is True
assert c.acl.info(token2) is None
c.kv.put('foo', 'bar')
c.kv.put('private/foo', 'bar')
assert c.kv.get('foo', token=token)[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied, c.kv.put, 'foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied, c.kv.delete, 'foo', token=token)
assert c.kv.get('private/foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied,
c.kv.get, 'private/foo', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.put, 'private/foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.delete, 'private/foo', token=token)
# test token pass through for service registration
pytest.raises(
consul.ACLPermissionDenied,
c.agent.service.register, "bar-1", token=token)
c.agent.service.register("foo-1", token=token)
index, data = c.health.service('foo-1', token=token)
assert data[0]['Service']['ID'] == "foo-1"
index, data = c.health.checks('foo-1', token=token)
assert data == []
index, data = c.health.service('bar-1', token=token)
assert not data
# clean up
assert c.agent.service.deregister('foo-1') is True
c.acl.destroy(token, token=master_token)
acls = c.acl.list(token=master_token)
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
def test_acl_implicit_token_use(self, acl_consul):
# configure client to use the master token by default
c = consul.Consul(port=acl_consul.port, token=acl_consul.token)
master_token = acl_consul.token
acls = c.acl.list()
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
assert c.acl.info('foo') is None
compare = [c.acl.info(master_token), c.acl.info('anonymous')]
compare.sort(key=operator.itemgetter('ID'))
assert acls == compare
rules = """
key "" {
policy = "read"
}
key "private/" {
policy = "deny"
}
"""
token = c.acl.create(rules=rules)
assert c.acl.info(token)['Rules'] == rules
token2 = c.acl.clone(token)
assert c.acl.info(token2)['Rules'] == rules
assert c.acl.update(token2, name='Foo') == token2
assert c.acl.info(token2)['Name'] == 'Foo'
assert c.acl.destroy(token2) is True
assert c.acl.info(token2) is None
c.kv.put('foo', 'bar')
c.kv.put('private/foo', 'bar')
c_limited = consul.Consul(port=acl_consul.port, token=token)
assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2')
pytest.raises(
consul.ACLPermissionDenied, c_limited.kv.delete, 'foo')
assert c.kv.get('private/foo')[1]['Value'] == six.b('bar')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.get, 'private/foo')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.put, 'private/foo', 'bar2')
pytest.raises(
consul.ACLPermissionDenied,
c_limited.kv.delete, 'private/foo')
# check we can override the client's default token
pytest.raises(
consul.ACLPermissionDenied,
c.kv.get, 'private/foo', token=token
)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.put, 'private/foo', 'bar2', token=token)
pytest.raises(
consul.ACLPermissionDenied,
c.kv.delete, 'private/foo', token=token)
# clean up
c.acl.destroy(token)
acls = c.acl.list()
assert set([x['ID'] for x in acls]) == \
set(['anonymous', master_token])
def test_status_leader(self, consul_port):
c = consul.Consul(port=consul_port)
agent_self = c.agent.self()
leader = c.status.leader()
addr_port = agent_self['Stats']['consul']['leader_addr']
assert leader == addr_port, \
"Leader value was {0}, expected value " \
"was {1}".format(leader, addr_port)
def test_status_peers(self, consul_port):
c = consul.Consul(port=consul_port)
agent_self = c.agent.self()
addr_port = agent_self['Stats']['consul']['leader_addr']
peers = c.status.peers()
assert addr_port in peers, \
"Expected value '{0}' " \
"in peer list but it was not present".format(addr_port)
def test_query(self, consul_port):
c = consul.Consul(port=consul_port)
# check that query list is empty
queries = c.query.list()
assert queries == []
# create a new named query
query_service = 'foo'
query_name = 'fooquery'
query = c.query.create(query_service, query_name)
# assert response contains query ID
assert 'ID' in query \
and query['ID'] is not None \
and str(query['ID']) != ''
# retrieve query using id and name
queries = c.query.get(query['ID'])
assert queries != [] \
and len(queries) == 1
assert queries[0]['Name'] == query_name \
and queries[0]['ID'] == query['ID']
# explain query
assert c.query.explain(query_name)['Query']
# delete query
assert c.query.delete(query['ID'])
| |
import glob
import json
import logging
import os
import shutil
import sys
import re
import patoolib
import requests
import DCSLM.Utilities as Utilities
from pprint import pprint
from .DCSUFParser import DCSUFParser, ArchiveExtensions
from .Livery import Livery
class LiveryManager:
def __init__(self):
self.LiveryData = self.make_default_data()
self.Liveries = {}
self.FolderRoot = "DCSLM"
def make_default_data(self):
ld = {
"config": {
"ovgme": False
},
"liveries": {}
}
return ld
def load_data(self):
configPath = os.path.join(os.getcwd(), self.FolderRoot, "dcslm.json")
if os.path.isfile(configPath):
try:
with open(configPath, "r") as configFile:
configData = json.load(configFile)
for id,l in configData['liveries'].items():
self.Liveries[id] = Livery().from_JSON(l)
return configData
except:
raise RuntimeError("Unable to open existing DCSLM config file at \'" + configPath + "\'")
return None
def write_data(self):
configPath = os.path.join(os.getcwd(), self.FolderRoot, "dcslm.json")
try:
with open(configPath, "w") as configFile:
outJson = {}
for k,v in self.LiveryData.items():
outJson[k] = v
json.dump(outJson, configFile)
except:
raise RuntimeError("Unable to write DCSLM config file to \'" + configPath + "\'")
def make_dcslm_dirs(self):
dcslmPath = os.path.join(os.getcwd(), self.FolderRoot)
archivesPath = os.path.join(dcslmPath, "archives")
extractPath = os.path.join(dcslmPath, "extract")
try:
if not os.path.isdir(dcslmPath):
os.mkdir(dcslmPath)
if not os.path.isdir(archivesPath):
os.mkdir(archivesPath)
if not os.path.isdir(extractPath):
os.mkdir(extractPath)
except:
raise RuntimeError("Unable to create DCSLM directories at \'" + dcslmPath + "\\\'")
def get_registered_livery(self, id=None, livery=None, title=None):
# TODO: Search by title
userID = id
if livery:
userID = livery.dcsuf.id
if userID:
if str(userID) in self.Liveries.keys():
return self.Liveries[str(userID)]
return None
def is_livery_registered(self, id=None, livery=None):
if self.get_registered_livery(id, livery):
return True
return False
def register_livery(self, livery):
if livery:
self.LiveryData["liveries"][str(livery.dcsuf.id)] = livery.to_JSON()
self.Liveries[str(livery.dcsuf.id)] = livery
def _remove_installed_livery_directory(self, livery, installPath):
if "Liveries" in installPath:
if os.path.isdir(installPath) and Utilities.validate_remove_path(installPath):
shutil.rmtree(installPath, ignore_errors=True)
else:
print("Warning: Livery uninstall path \'" + installPath + "\' is not a valid directory.")
def remove_installed_livery_directories(self, livery):
for i in livery.installs['liveries'].values():
for p in i['paths']:
fullPath = os.path.join(os.getcwd(), livery.destination, p)
self._remove_installed_livery_directory(livery, fullPath)
livery.installs['liveries'] = {}
return None
def unregister_livery(self, livery):
if livery:
if self.is_livery_registered(livery.dcsuf.id):
del self.Liveries[str(livery.dcsuf.id)]
del self.LiveryData["liveries"][str(livery.dcsuf.id)]
return True
return False
def uninstall_livery(self, livery):
self.remove_installed_livery_directories(livery)
self.unregister_livery(livery)
def load_livery_from_livery_registry_file(self, registryPath):
if os.path.isfile(registryPath):
try:
with open(registryPath, "r") as registryFile:
registryData = json.load(registryFile)
loadedLivery = Livery()
loadedLivery.from_JSON(registryData)
return loadedLivery
except:
raise RuntimeError("Unable to open livery registry file at \'" + registryPath + "\'")
else:
raise RuntimeError("Unable to find livery registry file \'" + registryPath + "\'.")
def write_livery_registry_files(self, livery):
for i, v in livery.installs['liveries'].items():
for p in v['paths']:
installRoot = os.path.join(os.getcwd(), livery.destination, p)
if os.path.isdir(installRoot):
installPath = os.path.join(installRoot, ".dcslm.json")
try:
with open(installPath, "w") as registryFile:
json.dump(livery.to_JSON(), registryFile)
except:
raise RuntimeError("Unable to write livery registry file to \'" + installPath + "\'.")
else:
raise RuntimeError("Unable to write livery registry file to \'" + installRoot + "\\\'. Was the livery folder created correctly?")
def remove_livery_registry_files(self, livery):
for i, v in livery.installs['liveries'].items():
for p in v['paths']:
installRoot = os.path.join(os.getcwd(), livery.destination, p)
if os.path.isdir(installRoot):
installPath = os.path.join(installRoot, ".dcslm.json")
if os.path.isfile(installPath):
try:
Utilities.remove_file(installPath)
#os.remove(installPath)
except:
raise RuntimeError("Unable to remove livery registry file at \'" + installPath + "\'.")
else:
raise RuntimeError("Unable to find livery registry file \'" + installPath + "\'.")
def download_livery_archive(self, livery, dlCallback=None):
if livery:
if livery.dcsuf.download:
archiveType = '.' + str.split(livery.dcsuf.download, '.')[-1]
if archiveType in ArchiveExtensions:
destinationPath = os.path.join(os.getcwd(), self.FolderRoot, "archives")
archiveFilename = str.split(livery.dcsuf.download, '/')[-1]
destinationFilename = os.path.join(destinationPath, archiveFilename)
try:
with requests.get(livery.dcsuf.download, stream=True) as req:
req.raise_for_status()
with open(destinationFilename, 'wb') as f:
if dlCallback:
dlCallback['progress'].start_task(dlCallback['task'])
for chunk in req.iter_content(chunk_size=8192):
f.write(chunk)
if dlCallback:
dlCallback['exec'](livery, dlCallback, len(chunk))
return destinationFilename
except (KeyboardInterrupt, IOError, ConnectionError, FileNotFoundError) as e:
if os.path.isfile(destinationFilename):
Utilities.remove_file(destinationFilename)
#os.remove(destinationFilename)
raise RuntimeError("Failed during download of archive " + livery.dcsuf.download + ": " + str(e))
raise RuntimeError("Unable to get downloaded archive path for livery \'" + livery.dcsuf.title + "\'.")
def get_registered_livery_ids(self):
return self.LiveryData['liveries'].keys()
def _remove_existing_extracted_files(self, livery, extractedRoot):
if os.path.isdir(extractedRoot) and Utilities.validate_remove_path(extractedRoot):
shutil.rmtree(extractedRoot, onerror=Utilities.remove_readonly)
#else:
#raise RuntimeError("Invalid path for removing existing extracted files: " + extractedRoot)
def extract_livery_archive(self, livery):
if livery:
if len(livery.archive):
archivePath = os.path.join(os.getcwd(), self.FolderRoot, "archives", livery.archive)
if os.path.isfile(archivePath):
extractRoot = os.path.join(os.getcwd(), self.FolderRoot, "extract", str(livery.dcsuf.id))
if not os.path.isdir(extractRoot):
os.makedirs(extractRoot, exist_ok=True)
archiveFile = livery.archive
archiveFolder = os.path.splitext(archiveFile)[0].split('\\')[-1]
extractedPath = os.path.join(extractRoot, archiveFolder)
self._remove_existing_extracted_files(livery, extractedPath)
self._extract_archive(livery, archivePath, extractedPath)
self._extract_extracted_archive(livery, extractedPath)
return extractedPath
return None
def _extract_archive(self, livery, archivePath, extractPath):
patoolib.extract_archive(archivePath, 0, extractPath)
def _extract_extracted_archive(self, livery, extractedPath):
extractedFiles = glob.glob(extractedPath + "/**/*", recursive=True)
for f in extractedFiles:
if os.path.splitext(f)[-1][1:] in patoolib.ArchiveFormats:
self._extract_archive(livery, f, extractedPath)
def is_valid_livery_directory(self, fileList):
for f in fileList:
if "description.lua" in f:
return True
return False
def detect_extracted_liveries(self, livery, extractPath, extractedLiveryFiles):
liveryDirectories = []
for root, files in extractedLiveryFiles.items():
liveryName = root
if root != "\\":
liveryName = str.split(root,"\\")[-1]
if len(liveryName):
if self.is_valid_livery_directory(files):
liverySize = self._get_size_of_extracted_livery_files(livery, extractPath, files)
liveryDirectories.append({'name': liveryName, 'size': liverySize})
return liveryDirectories
def does_archive_exist(self, archiveName):
archiveFiles = glob.glob(os.path.join(os.getcwd(), self.FolderRoot, "archives") + "/*.*")
for a in archiveFiles:
if archiveName in a:
return a
return None
def compare_archive_sizes(self, archivePath, archiveURL):
if os.path.isfile(archivePath):
fileSize = os.path.getsize(archivePath)
urlSize = self.request_archive_size(archiveURL)
return fileSize == urlSize
return False
def get_extracted_livery_files(self, livery, extractPath):
extractedFiles = glob.glob(extractPath + "/**/*", recursive=True)
for i in range(0, len(extractedFiles)): # Remove extract root from glob filenames
extractedFiles[i] = extractedFiles[i][len(extractPath):]
if livery:
directoryFiles = {}
for f in extractedFiles:
splitF = os.path.split(f)
if splitF[0] not in directoryFiles:
directoryFiles[splitF[0]] = []
directoryFiles[splitF[0]].append(f)
return directoryFiles
return None
def _get_size_of_extracted_livery_files(self, livery, extractPath, fileList):
totalSize = 0
for f in fileList:
extractedFilepath = os.path.join(extractPath, f[1:])
totalSize += os.path.getsize(extractedFilepath)
return totalSize
def _copy_livery_files(self, livery, extractPath, fileList, installLivery):
badFiles = ['desktop.ini', 'thumbs.db']
installDirectory = os.path.join(os.getcwd(), installLivery)
if not os.path.isdir(installDirectory):
os.makedirs(installDirectory, exist_ok=True)
for f in fileList:
splitPath = os.path.split(f)
fileName = splitPath[1]
if not '.' in fileName:
continue
badFileName = False
for bF in badFiles:
if bF in fileName:
badFileName = True
break
if badFileName:
continue
extractedFilepath = os.path.join(extractPath, f[1:])
destinationFilepath = os.path.join(installDirectory, fileName)
shutil.copy2(extractedFilepath, destinationFilepath,)
return True
def copy_detected_liveries(self, livery, extractPath, extractedLiveryFiles, installPaths):
copiedLiveries = []
for install in installPaths:
installPath = os.path.join(os.getcwd(), livery.destination, install)
installLivery = str.split(installPath, "\\")[-1]
for root, files in extractedLiveryFiles.items():
if self.is_valid_livery_directory(files):
rootUnit = livery.dcsuf.title
if root != "\\":
rootUnit = str.split(root, "\\")[-1]
if installLivery == rootUnit:
if self._copy_livery_files(livery, extractPath, files, installPath):
copiedLiveries.append(install)
return copiedLiveries
def remove_extracted_livery_archive(self, livery):
if livery:
extractRoot = os.path.join(os.getcwd(), self.FolderRoot, "extract", str(livery.dcsuf.id))
if Utilities.validate_remove_path(extractRoot):
shutil.rmtree(extractRoot, onerror=Utilities.remove_readonly)
return True
else:
raise RuntimeError("Invalid path provided to remove extracted livery archive: " + extractRoot)
return False
def remove_downloaded_archive(self, livery, downloadPath):
if livery:
archivePath = os.path.join(os.getcwd(), self.FolderRoot, "archives", livery.archive)
if os.path.isfile(archivePath):
Utilities.remove_file(archivePath)
#os.remove(archivePath)
return True
else:
raise RuntimeWarning("Unable to remove archive file \'" + archivePath + "\' as it doesn't exist.")
return False
def generate_livery_destination_path(self, livery):
if self.LiveryData['config']['ovgme']:
return os.path.join(livery.ovgme, "Liveries")
else:
return "Liveries"
def generate_aircraft_livery_install_path(self, livery, unitLiveries):
liveryPaths = []
for unit in unitLiveries:
liveryPaths.append(os.path.join(unit))
return liveryPaths
def generate_livery_install_paths(self, livery, installRoots, detectedLiveries):
installPaths = []
for dl in detectedLiveries:
if dl['name'] == "\\":
dl['name'] = livery.dcsuf.title
livery.installs['liveries'][dl['name']] = {'size': dl['size'], 'paths':[]}
for root in installRoots:
livery.installs['liveries'][dl['name']]['paths'].append(os.path.join(root, dl['name']))
installPaths.append(os.path.join(root, dl['name']))
return installPaths
def get_livery_data_from_dcsuf_url(self, url):
if len(url):
l = Livery()
l.dcsuf = DCSUFParser().get_dcsuserfile_from_url(url)
l.ovgme = l.generate_ovgme_folder()
return l
raise RuntimeError("Unable to get livery data from url " + url)
def request_archive_size(self, archiveURL):
if len(archiveURL):
return Utilities.request_file_size(archiveURL)
return 0
def _get_file_lines(self, filePath):
if os.path.isfile(filePath):
with open(filePath, "r", errors="ignore") as readFile:
return readFile.readlines()
return []
def _optimize_get_lua_statements_from_line(self, line, commentStart=None, commentEnd=None):
if not commentStart:
commentStart = len(line) + 1
if not commentEnd:
commentEnd = -1
luaStatements = []
reStatement = re.findall("(.+[;\n])", line)
if len(reStatement):
for rs in reStatement:
luaStatement = str.strip(rs)
splitStatements = str.split(luaStatement, ';')
for s in splitStatements:
s = s[str.find(s, '{'):str.find(s, '}') + 1]
subStrStart = str.find(line, s)
if not (subStrStart > commentStart and subStrStart < commentEnd - 2) and len(s):
luaStatements.append(s)
return luaStatements
def _optimize_get_py_statements_from_line(self, line, commentStart=None, commentEnd=None):
luaStatements = self._optimize_get_lua_statements_from_line(line, commentStart, commentEnd)
pyStatements = []
for ls in luaStatements:
ps = self._optimize_lua_statement_to_py(ls)
if len(ps) == 4:
pyStatements.append(ps)
return pyStatements
def _optimize_py_statement_to_lua(self, pyStatement, rootLivery = "", rootLiveryPath = "") -> str:
if len(pyStatement) == 4:
if pyStatement[2].startswith("../"):
splitExistingPath = str.split(pyStatement[2], '/')
detectedRootLivery = splitExistingPath[-2] + "/"
if detectedRootLivery != rootLivery and rootLivery:
rootLivery = detectedRootLivery
pyStatement[2] = splitExistingPath[-1]
correctedPath = rootLiveryPath + rootLivery + pyStatement[2]
luaStatement = "{\"" + pyStatement[0] + "\", " + pyStatement[1] + " ,\"" + correctedPath + "\","
if | |
},
"lang": "java",
},
# duplicates in net.java.dev.jna:jna promoted to 4.5.1
# - com.zaxxer:nuprocess:1.2.4 wanted version 4.5.1
# - org.scala-sbt:io_2.12:1.2.0 wanted version 4.5.0
{
"bind_args": {
"actual": "@scala_annex_net_java_dev_jna_jna",
"name": "jar/scala_annex_net/java/dev/jna/jna",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "fbc9de96a0cc193a125b4008dbc348e9ed54e5e13fc67b8ed40e645d303cc51b",
"jar_urls": [
"http://central.maven.org/maven2/net/java/dev/jna/jna/4.5.1/jna-4.5.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_net_java_dev_jna_jna",
"srcjar_sha256": "74145556f7b10be10303b76e9bfb12a7d0d43934c60788ed006a7a8aed5517f4",
"srcjar_urls": [
"http://central.maven.org/maven2/net/java/dev/jna/jna/4.5.1/jna-4.5.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_net_sourceforge_argparse4j_argparse4j",
"name": "jar/scala_annex_net/sourceforge/argparse4j/argparse4j",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "98cb5468cac609f3bc07856f2e34088f50dc114181237c48d20ca69c3265d044",
"jar_urls": [
"http://central.maven.org/maven2/net/sourceforge/argparse4j/argparse4j/0.8.1/argparse4j-0.8.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_net_sourceforge_argparse4j_argparse4j",
"srcjar_sha256": "6baf8893d69bf3b8cac582de8b6407ebfeac992b1694b11897a9a614fb4b892f",
"srcjar_urls": [
"http://central.maven.org/maven2/net/sourceforge/argparse4j/argparse4j/0.8.1/argparse4j-0.8.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_apache_logging_log4j_log4j_api",
"name": "jar/scala_annex_org/apache/logging/log4j/log4j_api",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "1205ab764b1326f7d96d99baa4a4e12614599bf3d735790947748ee116511fa2",
"jar_urls": [
"http://central.maven.org/maven2/org/apache/logging/log4j/log4j-api/2.8.1/log4j-api-2.8.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_apache_logging_log4j_log4j_api",
"srcjar_sha256": "453201e25c223bacfc58e47262390fa2879dfe095c6d883dc913667917665ceb",
"srcjar_urls": [
"http://central.maven.org/maven2/org/apache/logging/log4j/log4j-api/2.8.1/log4j-api-2.8.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_apache_logging_log4j_log4j_core",
"name": "jar/scala_annex_org/apache/logging/log4j/log4j_core",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "815a73e20e90a413662eefe8594414684df3d5723edcd76070e1a5aee864616e",
"jar_urls": [
"http://central.maven.org/maven2/org/apache/logging/log4j/log4j-core/2.8.1/log4j-core-2.8.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_apache_logging_log4j_log4j_core",
"srcjar_sha256": "efb8bd06659beda231375b72fb38f44d884b7d086f34e050204ffc8efe0cf6c2",
"srcjar_urls": [
"http://central.maven.org/maven2/org/apache/logging/log4j/log4j-core/2.8.1/log4j-core-2.8.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_codehaus_groovy_groovy",
"name": "jar/scala_annex_org/codehaus/groovy/groovy",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "81689da0b150893d509d17c523c5becba97e3d7667e98f965d735505e25ad294",
"jar_urls": [
"http://central.maven.org/maven2/org/codehaus/groovy/groovy/2.4.0/groovy-2.4.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_codehaus_groovy_groovy",
"srcjar_sha256": "0a8193f9bdc5bf579275677afe00fcabf62fda96341a289dac592f140cd5d229",
"srcjar_urls": [
"http://central.maven.org/maven2/org/codehaus/groovy/groovy/2.4.0/groovy-2.4.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_codehaus_mojo_animal_sniffer_annotations",
"name": "jar/scala_annex_org/codehaus/mojo/animal_sniffer_annotations",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "2068320bd6bad744c3673ab048f67e30bef8f518996fa380033556600669905d",
"jar_urls": [
"http://central.maven.org/maven2/org/codehaus/mojo/animal-sniffer-annotations/1.14/animal-sniffer-annotations-1.14.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_codehaus_mojo_animal_sniffer_annotations",
"srcjar_sha256": "d821ae1f706db2c1b9c88d4b7b0746b01039dac63762745ef3fe5579967dd16b",
"srcjar_urls": [
"http://central.maven.org/maven2/org/codehaus/mojo/animal-sniffer-annotations/1.14/animal-sniffer-annotations-1.14-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_jctools_jctools_core",
"name": "jar/scala_annex_org/jctools/jctools_core",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "5fba472768fcff372783fad4f8a0b4ffec6a9b632b95885e26f509ba00093b07",
"jar_urls": [
"http://central.maven.org/maven2/org/jctools/jctools-core/2.0.1/jctools-core-2.0.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_jctools_jctools_core",
"srcjar_sha256": "2865cf812ec2bff6fba558034de0d419475096c1884f3be59a87b2f03277ed6a",
"srcjar_urls": [
"http://central.maven.org/maven2/org/jctools/jctools-core/2.0.1/jctools-core-2.0.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_reactivestreams_reactive_streams",
"name": "jar/scala_annex_org/reactivestreams/reactive_streams",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "ef867702a614b96eb6c64fb65a8f5e14bdfcabbc1ae056f78a1643f7b79ca0eb",
"jar_urls": [
"http://central.maven.org/maven2/org/reactivestreams/reactive-streams/1.0.0/reactive-streams-1.0.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_reactivestreams_reactive_streams",
"srcjar_sha256": "7e673b0c8b0ac51bdef8655cacf7804fb9791c47e71161a36c94738d55eefea8",
"srcjar_urls": [
"http://central.maven.org/maven2/org/reactivestreams/reactive-streams/1.0.0/reactive-streams-1.0.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-lang.modules:scala-parser-combinators_2.12 promoted to 1.0.5
# - ch.epfl.scala:zinc-compile-core_2.12:1.1.7+62-0f4ad9d5 wanted version 1.0.5
# - org.scala-sbt:zinc-compile-core_2.12:1.2.1 wanted version 1.0.5
# - org.scalatest:scalatest_2.12:3.0.4 wanted version 1.0.4
{
"bind_args": {
"actual": "@scala_annex_org_scala_lang_modules_scala_parser_combinators_2_12",
"name": "jar/scala_annex_org/scala_lang/modules/scala_parser_combinators_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "f1f2f43cfd8042eb8a5d3021dc7ac3fff08ed6565311b6c145f8efe882a58a75",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-lang/modules/scala-parser-combinators_2.12/1.0.5/scala-parser-combinators_2.12-1.0.5.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_lang_modules_scala_parser_combinators_2_12",
"srcjar_sha256": "3c13525e5b80f12cd3def37c2edf1d3ade43d42af0aac59495fbe24339450475",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-lang/modules/scala-parser-combinators_2.12/1.0.5/scala-parser-combinators_2.12-1.0.5-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-lang.modules:scala-xml_2.12 promoted to 1.0.6
# - io.get-coursier:coursier_2.12:1.1.0-M3 wanted version 1.0.6
# - org.scala-sbt:sbinary_2.12:0.4.4 wanted version 1.0.5
# - org.scalatest:scalatest_2.12:3.0.4 wanted version 1.0.5
{
"bind_args": {
"actual": "@scala_annex_org_scala_lang_modules_scala_xml_2_12",
"name": "jar/scala_annex_org/scala_lang/modules/scala_xml_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "7cc3b6ceb56e879cb977e8e043f4bfe2e062f78795efd7efa09f85003cb3230a",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-lang/modules/scala-xml_2.12/1.0.6/scala-xml_2.12-1.0.6.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_lang_modules_scala_xml_2_12",
"srcjar_sha256": "a7e8aac79394df396afda98b35537791809d815ce15ab2224f7d31e50c753922",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-lang/modules/scala-xml_2.12/1.0.6/scala-xml_2.12-1.0.6-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_lang_scala_compiler",
"name": "jar/scala_annex_org/scala_lang/scala_compiler",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "3023b07cc02f2b0217b2c04f8e636b396130b3a8544a8dfad498a19c3e57a863",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-lang/scala-compiler/2.12.6/scala-compiler-2.12.6.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_lang_scala_compiler",
"srcjar_sha256": "d3e9d7cc7b50c89676481959cebbf231275863c9f74102de28250dc92ffd4a6f",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-lang/scala-compiler/2.12.6/scala-compiler-2.12.6-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_ivy_ivy",
"name": "jar/scala_annex_org/scala_sbt/ivy/ivy",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "c48309f41f30b322704984dd851346bd1717568c4ff2a15bba164939764be4d1",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/ivy/ivy/2.3.0-sbt-a3314352b638afbf0dca19f127e8263ed6f898bd/ivy-2.3.0-sbt-a3314352b638afbf0dca19f127e8263ed6f898bd.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_ivy_ivy",
"srcjar_sha256": "00253bb52115873f4fb51dc44af396222f8648378632206abfd69cf1fb03564b",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/ivy/ivy/2.3.0-sbt-a3314352b638afbf0dca19f127e8263ed6f898bd/ivy-2.3.0-sbt-a3314352b638afbf0dca19f127e8263ed6f898bd-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_compiler_interface",
"name": "jar/scala_annex_org/scala_sbt/compiler_interface",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": ["@scala_annex_org_scala_sbt_util_interface"],
"jar_sha256": "25c7fd6171a58775caa1b80170d0a2256ab57b2eb65022123ebcfc4ea564d961",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/compiler-interface/1.2.1/compiler-interface-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_compiler_interface",
"srcjar_sha256": "bd4153820e556420eda1415df90236ee69662a7490849c0bbaf99019b360c79e",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/compiler-interface/1.2.1/compiler-interface-1.2.1-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:io_2.12 promoted to 1.2.0
# - ch.epfl.scala:zinc-core_2.12:1.1.7+62-0f4ad9d5 wanted version 1.1.4
# - org.scala-sbt:zinc-core_2.12:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_io_2_12",
"name": "jar/scala_annex_org/scala_sbt/io_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_com_swoval_apple_file_events",
"@scala_annex_net_java_dev_jna_jna",
"@scala_annex_net_java_dev_jna_jna_platform",
],
"jar_sha256": "270b67412cf3e5a81f036bfe26bf098434d68f9ac427414996479847ce50fc31",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/io_2.12/1.2.0/io_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_io_2_12",
"srcjar_sha256": "411f890c43658fcd770680a48f084cc4de6a9a98a31381fc5ca8041936459de6",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/io_2.12/1.2.0/io_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_launcher_interface",
"name": "jar/scala_annex_org/scala_sbt/launcher_interface",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "11ab8f0e2c035c90f019e4f5780ee57de978b7018d34e8f020eb88aa8b14af25",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/launcher-interface/1.0.0/launcher-interface-1.0.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_launcher_interface",
"srcjar_sha256": "ca2de13465aee529ebed512ecc1a214e521f436e9a2219042777b32a3cfcf287",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/launcher-interface/1.0.0/launcher-interface-1.0.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:librarymanagement-core_2.12 promoted to 1.2.0
# - org.scala-sbt:librarymanagement-ivy_2.12:1.0.0 wanted version 1.0.0
# - org.scala-sbt:zinc-ivy-integration_2.12:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_librarymanagement_core_2_12",
"name": "jar/scala_annex_org/scala_sbt/librarymanagement_core_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_com_eed3si9n_gigahorse_okhttp_2_12",
"@scala_annex_com_jcraft_jsch",
"@scala_annex_com_squareup_okhttp3_okhttp_urlconnection",
"@scala_annex_org_scala_sbt_util_cache_2_12",
"@scala_annex_org_scala_sbt_util_position_2_12",
],
"jar_sha256": "c0b5fc0d7a32063a4eb61b9d80c3bf8b60490b620c5aed984d0e041563a13947",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/librarymanagement-core_2.12/1.2.0/librarymanagement-core_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_librarymanagement_core_2_12",
"srcjar_sha256": "76257c211485653f4e3b5d59867b8aec5cd7af347b35b176e1d926d127831a62",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/librarymanagement-core_2.12/1.2.0/librarymanagement-core_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_librarymanagement_ivy_2_12",
"name": "jar/scala_annex_org/scala_sbt/librarymanagement_ivy_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_com_eed3si9n_sjson_new_core_2_12",
"@scala_annex_org_scala_sbt_ivy_ivy",
"@scala_annex_org_scala_sbt_librarymanagement_core_2_12",
],
"jar_sha256": "0e37e9a4b695b07aacad9e4dcabe725a2511962901dd15b8fa68184af11fab3f",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/librarymanagement-ivy_2.12/1.0.0/librarymanagement-ivy_2.12-1.0.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_librarymanagement_ivy_2_12",
"srcjar_sha256": "16179441e8ca6d0f25ede41c872b795dc7b74616f0da4d0c04225053e2f20d92",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/librarymanagement-ivy_2.12/1.0.0/librarymanagement-ivy_2.12-1.0.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_sbinary_2_12",
"name": "jar/scala_annex_org/scala_sbt/sbinary_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_org_scala_lang_modules_scala_xml_2_12",
],
"jar_sha256": "24a7a488a6992b6ab4d8e78b170f5fbc02ef13eadada88851fd41cb2ccfa802a",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/sbinary_2.12/0.4.4/sbinary_2.12-0.4.4.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_sbinary_2_12",
"srcjar_sha256": "1bace3a75fa2d5d73c0ea7d3be8107eec76fddeedba301af91fc6c99c6a774c9",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/sbinary_2.12/0.4.4/sbinary_2.12-0.4.4-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_test_agent",
"name": "jar/scala_annex_org/scala_sbt/test_agent",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "3c2685c110db34c5611222b62a4e33e039803e8f9a126513616bab62a7cc0041",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/test-agent/1.0.4/test-agent-1.0.4.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_test_agent",
"srcjar_sha256": "b5bcaef40972e6aead9dba0b3a6ffa4a22259f7297e300091802bfa0b4763ed2",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/test-agent/1.0.4/test-agent-1.0.4-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_test_interface",
"name": "jar/scala_annex_org/scala_sbt/test_interface",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "15f70b38bb95f3002fec9aea54030f19bb4ecfbad64c67424b5e5fea09cd749e",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/test-interface/1.0/test-interface-1.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_test_interface",
"srcjar_sha256": "c314491c9df4f0bd9dd125ef1d51228d70bd466ee57848df1cd1b96aea18a5ad",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/test-interface/1.0/test-interface-1.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_cache_2_12",
"name": "jar/scala_annex_org/scala_sbt/util_cache_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_com_eed3si9n_sjson_new_murmurhash_2_12",
],
"jar_sha256": "622fd806450b232442172b32ff76bc547f015ae8935950c90d336f8920dae07f",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-cache_2.12/1.2.0/util-cache_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_cache_2_12",
"srcjar_sha256": "dbb00a2a92d17d5c01e5eaf0e57bdfedf58d7f5b67a974641e6a026b38d14408",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-cache_2.12/1.2.0/util-cache_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:util-control_2.12 promoted to 1.2.0
# - ch.epfl.scala:zinc-compile-core_2.12:1.1.7+62-0f4ad9d5 wanted version 1.1.3
# - org.scala-sbt:zinc-compile-core_2.12:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_control_2_12",
"name": "jar/scala_annex_org/scala_sbt/util_control_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "87f8f8decb351e50415b5fabb7aa11a110e29bf0a31a4ba0e8662987cb9be580",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-control_2.12/1.2.0/util-control_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_control_2_12",
"srcjar_sha256": "220fc66fb3e7c5c18237e9d308fd3c2f3e988230ee2c4994a46ca09e2cab9597",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-control_2.12/1.2.0/util-control_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:util-interface promoted to 1.2.0
# - ch.epfl.scala:compiler-interface:1.1.7+62-0f4ad9d5 wanted version 1.1.3
# - org.scala-sbt:compiler-interface:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_interface",
"name": "jar/scala_annex_org/scala_sbt/util_interface",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "444bc23ec31e30ed76a34cd7e142c1a7e4fa84b9d838945b46c8f6f780a798c6",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-interface/1.2.0/util-interface-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_interface",
"srcjar_sha256": "94aa85d25647d83e83b31fb55494be70927989542d025608d6eb5650529c738a",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-interface/1.2.0/util-interface-1.2.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:util-logging_2.12 promoted to 1.2.0
# - ch.epfl.scala:zinc-core_2.12:1.1.7+62-0f4ad9d5 wanted version 1.1.3
# - org.scala-sbt:zinc-core_2.12:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_logging_2_12",
"name": "jar/scala_annex_org/scala_sbt/util_logging_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_com_eed3si9n_sjson_new_core_2_12",
"@scala_annex_com_eed3si9n_sjson_new_scalajson_2_12",
"@scala_annex_com_lmax_disruptor",
"@scala_annex_jline_jline",
"@scala_annex_org_apache_logging_log4j_log4j_api",
"@scala_annex_org_apache_logging_log4j_log4j_core",
"@scala_compiler_2_12_8//jar",
],
"jar_sha256": "d3eddf8ab0ed3cfa4065b0f2148babbe763141c034a443cdaeddc62d294a5b92",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-logging_2.12/1.2.0/util-logging_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_logging_2_12",
"srcjar_sha256": "369ce35ccf5dbcb8c24fc097144bb7292c0a7716250b327376a9b291dad8e992",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-logging_2.12/1.2.0/util-logging_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_position_2_12",
"name": "jar/scala_annex_org/scala_sbt/util_position_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "1197b8993602f157b6aea90027b6e579ed7fd5d98ce8a16c089709ed705cf747",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-position_2.12/1.2.0/util-position_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_position_2_12",
"srcjar_sha256": "8170807414a6fa87f557455ac223d650bf5cf0d672c2c028acd0f42f08ebb702",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-position_2.12/1.2.0/util-position_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
# duplicates in org.scala-sbt:util-relation_2.12 promoted to 1.2.0
# - ch.epfl.scala:zinc-core_2.12:1.1.7+62-0f4ad9d5 wanted version 1.1.3
# - org.scala-sbt:zinc-core_2.12:1.2.1 wanted version 1.2.0
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_util_relation_2_12",
"name": "jar/scala_annex_org/scala_sbt/util_relation_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "dd7c1bd57e69032f30c16c4efbd4adcb9cb76374200e37bc39f6b4748cfd6235",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-relation_2.12/1.2.0/util-relation_2.12-1.2.0.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_util_relation_2_12",
"srcjar_sha256": "920f15393ef0869645846b571ebddfc3173b399aa4c45cd528298e886e52222b",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/util-relation_2.12/1.2.0/util-relation_2.12-1.2.0-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_apiinfo_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_apiinfo_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "4248a9ce6ea0f7d217a05fe18407fad4bcbcda5c433cc0c328b9aa46e24e81b2",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-apiinfo_2.12/1.2.1/zinc-apiinfo_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_apiinfo_2_12",
"srcjar_sha256": "9388d0ef0257a2d78acddac5ed43faf1950612fb7f4cbecce6d4b4045d6e5521",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-apiinfo_2.12/1.2.1/zinc-apiinfo_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_classfile_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_classfile_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"jar_sha256": "f15787066975b9da2bdca2b57b2c98c93a01e2d760f35ce040f61e5172b9ad3b",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-classfile_2.12/1.2.1/zinc-classfile_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_classfile_2_12",
"srcjar_sha256": "e34d26f7f2f3300eb05402030b165ab50c29bb3a90fd7ec7c2e6b5782319c2cf",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-classfile_2.12/1.2.1/zinc-classfile_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_classpath_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_classpath_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": ["@scala_annex_org_scala_lang_scala_compiler"],
"jar_sha256": "f955666b8b579bd0ab4c4c9810a25574aaf376976d3365c8810bac448a2f3e59",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-classpath_2.12/1.2.1/zinc-classpath_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_classpath_2_12",
"srcjar_sha256": "c46f07e58e646914d8aa9cfdb185aca6b6eaf325c8eaffbd6bf779b92589eff7",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-classpath_2.12/1.2.1/zinc-classpath_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_compile_core_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_compile_core_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_org_scala_lang_modules_scala_parser_combinators_2_12",
"@scala_annex_org_scala_sbt_launcher_interface",
"@scala_annex_org_scala_sbt_util_control_2_12",
],
"jar_sha256": "05c9f2b23350420de4f9cf08f36c98fdd1521a03776d32bb585160980a89de07",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-compile-core_2.12/1.2.1/zinc-compile-core_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_compile_core_2_12",
"srcjar_sha256": "32aca2964bf88deaa74a0c5301c63229775057636029d0c30b6755c5cf649678",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-compile-core_2.12/1.2.1/zinc-compile-core_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_core_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_core_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_org_scala_sbt_compiler_interface",
"@scala_annex_org_scala_sbt_io_2_12",
"@scala_annex_org_scala_sbt_util_logging_2_12",
"@scala_annex_org_scala_sbt_util_relation_2_12",
"@scala_annex_org_scala_sbt_zinc_apiinfo_2_12",
"@scala_annex_org_scala_sbt_zinc_classpath_2_12",
],
"jar_sha256": "503c2a362be203769eb117d25be022f83a9f1160644b8db3b43c05b40f829eea",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-core_2.12/1.2.1/zinc-core_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_core_2_12",
"srcjar_sha256": "14fe32caa6e5dc0f5128cc9a525807015f45c9535ed3d11fd090cccc0c0f5ae4",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-core_2.12/1.2.1/zinc-core_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_ivy_integration_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_ivy_integration_2_12",
},
"import_args": {
"default_visibility": ["//visibility:public"],
"deps": [
"@scala_annex_org_scala_sbt_librarymanagement_core_2_12",
],
"jar_sha256": "3f3d4997b0f3ffe0cc6e8b775135fefeb5ec3a3c03d1157c80f629ae2149c695",
"jar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-ivy-integration_2.12/1.2.1/zinc-ivy-integration_2.12-1.2.1.jar",
],
"licenses": ["notice"],
"name": "scala_annex_org_scala_sbt_zinc_ivy_integration_2_12",
"srcjar_sha256": "e15b18819da5f9e920b257ee8771d3ea4fcc2e90af26b8254f9708799ce1b69c",
"srcjar_urls": [
"http://central.maven.org/maven2/org/scala-sbt/zinc-ivy-integration_2.12/1.2.1/zinc-ivy-integration_2.12-1.2.1-sources.jar",
],
},
"lang": "java",
},
{
"bind_args": {
"actual": "@scala_annex_org_scala_sbt_zinc_persist_2_12",
"name": "jar/scala_annex_org/scala_sbt/zinc_persist_2_12",
},
| |
is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'max_iter'`` (int, default 1000)
Maximum number of intermediate sample rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfoAlphaRescale)
"""
rng = check_random_state(random_state)
if intermediate_sample_info is None:
intermediate_sample_info = alpha_dpp_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
rng=rng,
**params)
r_func = params.get('r_func', lambda r: r)
intermediate_sample_info = intermediate_sample_info._replace(r=r_func(intermediate_sample_info.deff_alpha_L_hat))
sampl, rej_count, intermediate_sample_info = alpha_dpp_sampling_do_sampling_loop(X_data,
eval_L,
intermediate_sample_info,
rng,
**params)
return sampl, intermediate_sample_info
##########
# k-DPPs #
##########
def k_dpp_vfx_sampler(size,
intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the vfx rejection sampling loop, such as the inner Nystrom approximation, and the RLS of all elements in :math:`\\mathbf{L}`.
Then, given the pre-computed information,run a rejection sampling loop to generate DPP samples.
To guarantee that the returned sample has size ``size``, we internally set desired_expected_size=size and
then repeatedly invoke dpp_vfx_sampler until a sample of the correct size is returned,
or exit with an error after a chosen number of rejections is reached.
:param int size: The size of the sample (i.e. the k of k-DPPs)
:param intermediate_sample_info:
If available, the pre-computed information necessary for the vfx rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfo`` with fields
- ``.alpha_star``: appropriate rescaling such that the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` is equal to a user-indicated constant ``params['desired_expected_size']``, or 1.0 if no such constant was specified by the user.
- ``.logdet_I_A``: :math:`\\log \\det` of the Nystrom approximation of :math:`\\mathbf{L} + I`
- ``.q``: placeholder q constant used for vfx sampling, to be replaced by the user before the sampling loop
- ``.s`` and ``.z``: approximations of the expected sample size of :math:`\\operatorname{DPP}(\\alpha^* \\mathbf{L})` to be used in the sampling loop. For more details see :cite:`DeCaVa19`
- ``.rls_estimate``: approximations of the RLS of all elements in X (i.e. in :math:`\\mathbf{L}`)
:type intermediate_sample_info:
``_IntermediateSampleInfo`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'rls_oversample_dppvfx'`` (float, default 4.0)
Oversampling parameter used to construct dppvfx's internal Nystrom approximation.
The ``rls_oversample_dppvfx``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_dppvfx`` factor.
This makes each rejection round slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_dppvfx``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate.
- ``'rls_oversample_bless'`` (float, default 4.0)
Oversampling parameter used during bless's internal Nystrom approximation.
Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_dppvfx`, and can be tuned separately.
The ``rls_oversample_bless``:math:`\\geq 1` parameter is used to increase the rank of the approximation by a ``rls_oversample_bless`` factor.
This makes the one-time pre-processing slower and more memory intensive, but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
Empirically, a small factor ``rls_oversample_bless``:math:`\\in [2,10]` seems to work.
It is suggested to start with a small number and increase if the algorithm fails to terminate or is not accurate.
- ``'q_func'`` (function, default x: x*x)
Mapping from estimate expected size of the DPP to Poisson intensity used to choose size of the intermediate sample.
Larger intermediate sampler cause less efficient iterations but higher acceptance probability.
- ``'nb_iter_bless'`` (int or None, default None)
Iterations for inner BLESS execution, if None it is set to log(n)
- ``'verbose'`` (bool, default True)
Controls verbosity of debug output, including progress bars.
If intermediate_sample_info is not provided, the first progress bar reports the inner execution of
the bless algorithm, showing:
- lam: lambda value of the current iteration
- m: current size of the dictionary (number of centers contained)
- m_expected: expected size of the dictionary before sampling
- probs_dist: (mean, max, min) of the approximate rlss at the current iteration
Subsequent progress bars show the execution of each rejection sampling loops (i.e. once per sample generated)
- acc_thresh: latest computed probability of acceptance
- rej_iter: iteration of the rejection sampling loop (i.e. rejections so far)
- ``'max_iter'`` (int, default 1000)
Maximum number of intermediate sample rejections before giving up.
- ``'max_iter_size_rejection'`` (int, default 100)
Maximum number of size-based rejections before giving up.
:return:
Sample from a DPP (as a list) and updated intermediate_sample_info
:rtype:
tuple(list, _IntermediateSampleInfo)
"""
rng = check_random_state(random_state)
if (intermediate_sample_info is None
or not np.isclose(intermediate_sample_info.s, size).item()):
intermediate_sample_info = vfx_sampling_precompute_constants(
X_data=X_data,
eval_L=eval_L,
desired_expected_size=size,
rng=rng,
**params)
q_func = params.get('q_func', lambda s: s * s)
intermediate_sample_info = intermediate_sample_info._replace(q=q_func(intermediate_sample_info.s))
max_iter_size_rejection = params.get('max_iter_size_rejection', 100)
for size_rejection_iter in range(max_iter_size_rejection):
sampl, rej_count = vfx_sampling_do_sampling_loop(
X_data,
eval_L,
intermediate_sample_info,
rng,
**params)
intermediate_sample_info = intermediate_sample_info._replace(rej_to_first_sample=intermediate_sample_info.rej_to_first_sample + rej_count)
if len(sampl) == size:
break
else:
raise ValueError('The vfx sampler reached the maximum number of rejections allowed '
'for the k-DPP size rejection ({}), try to increase the q factor '
'(see q_func parameter) or the Nystrom approximation accuracy '
'see rls_oversample_* parameters).'.format(max_iter_size_rejection))
return sampl, intermediate_sample_info
def alpha_k_dpp_sampler(size,
intermediate_sample_info,
X_data,
eval_L,
random_state=None,
**params):
""" First pre-compute quantities necessary for the alpha-dpp rejection sampling loop, such as the inner Nystrom
approximation, the and the initial rescaling alpha_hat for the binary search.
Then, given the pre-computed information,run a rejection sampling loop to generate k-DPP samples.
To guarantee that the returned sample has size ``size``, we internally set desired_expected_size=size and
then repeatedly invoke alpha_dpp_sampler until a sample of the correct size is returned,
or exit with an error after a chosen number of rejections is reached.
:param int size: The size of the sample (i.e. the k of k-DPPs)
:param intermediate_sample_info:
If available, the pre-computed information necessary for the alpha-dpp rejection sampling loop.
If ``None``, this function will compute and return an ``_IntermediateSampleInfoAlphaRescale`` (see :func:`alpha_dpp_sampling_precompute_constants`)
:type intermediate_sample_info:
``_IntermediateSampleInfoAlphaRescale`` or ``None``, default ``None``
:param array_like X_data:
dataset such that :math:`\\mathbf{L}=` ``eval_L(X_data)``, out of which we are sampling objects according to a DPP
:param callable eval_L:
Likelihood function.
Given two sets of n points X and m points Y, ``eval_L(X, Y)`` should compute the :math:`n x m` matrix containing the likelihood between points.
The function should also accept a single argument X and return ``eval_L(X) = eval_L(X, X)``.
As an example, see the implementation of any of the kernels provided by scikit-learn (e.g. `PairwiseKernel <https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.PairwiseKernel.html>`_).
:param random_state:
random source used for sampling, if None a RandomState is automatically generated
:type random_state:
RandomState or None, default None
:param dict params:
Dictionary including optional parameters:
- ``'rls_oversample_alphadpp'`` (float, default 4.0)
Oversampling parameter used to construct alphadpp's internal Nystrom approximation.
The ``rls_oversample_alphadpp``:math:`\\geq 1` parameter is used to increase the rank of the approximation by | |
<reponame>theGreenJedi/grr
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests the access control authorization workflow."""
import re
import time
import urlparse
from grr.gui import runtests_test
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import hunts
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import cronjobs
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.flows.cron import system as cron_system
from grr.lib.rdfvalues import client as rdf_client
from grr.server import foreman as rdf_foreman
class TestWorkflowWithoutApprovals(test_lib.GRRSeleniumTest):
"""Tests acl policies when approvals system is not used."""
def setUp(self):
super(TestWorkflowWithoutApprovals, self).setUp()
self.UninstallACLChecks()
def testHostInformationDoesNotAskForApproval(self):
self.Open("/#/clients/C.0000000000000001")
# Make sure "Host Information" tab got shown.
self.WaitUntil(self.IsTextPresent, "Last Local Clock")
self.WaitUntil(self.IsTextPresent, "GRR Client Version")
self.WaitUntilNot(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
def testBrowseVirtualFileSystemDoesNotAskForApproval(self):
self.Open("/#/clients/C.0000000000000001")
# Clicking on the navigator link explicitly to make sure it's not disabled.
self.Click("css=a[grrtarget='client.vfs']")
# Make sure "Browse Virtual Filesystem" pane is displayed.
self.WaitUntil(self.IsTextPresent, "stat.st_size")
self.WaitUntilNot(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
def testStartFlowDoesNotAskForApproval(self):
self.Open("/#/clients/C.0000000000000001")
# Clicking on the navigator link explicitly to make sure it's not disabled.
self.Click("css=a[grrtarget='client.launchFlows']")
# Make sure "Start new flows" pane is displayed.
self.WaitUntil(self.IsTextPresent, "Please Select a flow to launch")
self.WaitUntilNot(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
def testManageLaunchedFlowsDoesNotAskForApproval(self):
self.Open("/#/clients/C.0000000000000001")
# Clicking on the navigator link explicitly to make sure it's not disabled.
self.Click("css=a[grrtarget='client.flows']")
# Make sure "Manage launched flows" pane is displayed.
self.WaitUntil(self.IsTextPresent,
"Please select a flow to see its details here.")
self.WaitUntilNot(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
class TestACLWorkflow(test_lib.GRRSeleniumTest):
"""Tests the access control workflow."""
# Using an Unicode string for the test here would be optimal but Selenium
# can't correctly enter Unicode text into forms.
reason = "Felt like it!"
def CreateSampleHunt(self, token=None):
client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[
rdf_foreman.ForemanClientRule(
rule_type=rdf_foreman.ForemanClientRule.Type.REGEX,
regex=rdf_foreman.ForemanRegexClientRule(
attribute_name="GRR client",
attribute_regex="GRR"))
])
with hunts.GRRHunt.StartHunt(hunt_name="SampleHunt",
client_rule_set=client_rule_set,
token=token or self.token) as hunt:
return hunt.session_id
def WaitForNotification(self, user):
sleep_time = 0.2
iterations = 50
for _ in xrange(iterations):
try:
fd = aff4.FACTORY.Open(user,
aff4_users.GRRUser,
mode="r",
ignore_cache=True,
token=self.token)
pending_notifications = fd.Get(fd.Schema.PENDING_NOTIFICATIONS)
if pending_notifications:
return
except IOError:
pass
time.sleep(sleep_time)
self.fail("Notification for user %s never sent." % user)
def testNavigatorLinksDisabledForClientWithoutApproval(self):
self.Open("/#/clients/C.0000000000000001?navigator-test")
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.vfs'].disabled")
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.launchFlows'].disabled")
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.flows'].disabled")
# Only the "Host Information" navigation link should be active.
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.hostInfo']:not(.disabled)")
def testApprovalNotificationIsShownInHostInfoForUnapprovedClient(self):
self.Open("/#/clients/C.0000000000000001")
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
def testClickingOnRequestApprovalShowsApprovalDialog(self):
self.Open("/#/clients/C.0000000000000001")
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
def testClientACLWorkflow(self):
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# We do not have an approval, so we need to request one.
self.WaitUntil(self.IsElementPresent, "css=div.no-approval")
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
# This asks the user "test" (which is us) to approve the request.
self.Type("css=input[id=acl_approver]", "test")
self.Type("css=input[id=acl_reason]", self.reason)
self.ClickUntilNotVisible("acl_dialog_submit")
self.WaitForNotification("aff4:/users/test")
# User test logs in as an approver.
self.Open("/")
self.WaitUntilEqual("1", self.GetText, "notification_button")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('grant access to GRR client')")
self.WaitUntilContains("Grant Access for GRR Use", self.GetText,
"css=h2:contains('Grant')")
self.WaitUntil(self.IsTextPresent, "The user test has requested")
self.Click("css=button:contains('Approve')")
self.WaitUntil(self.IsTextPresent,
"You have granted access for C.0000000000000001 to test")
self.WaitForNotification("aff4:/users/test")
self.Open("/")
# We should be notified that we have an approval
self.WaitUntilEqual("1", self.GetText, "notification_button")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('has granted you access')")
# This is insufficient - we need 2 approvers.
self.WaitUntilContains("Requires 2 approvers for access.", self.GetText,
"css=div#acl_form")
# Lets add another approver.
token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(
client_id="C.0000000000000001",
flow_name="GrantClientApprovalFlow",
reason=self.reason,
delegate="test",
subject_urn=rdf_client.ClientURN("C.0000000000000001"),
token=token)
# Check if we see that the approval has already been granted.
self.Open("/")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('grant access to GRR client')")
self.WaitUntil(self.IsTextPresent,
"This approval has already been granted!")
# Try again:
self.Open("/")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('has granted you access')")
self.Click("css=span:contains('fs')")
# This is ok - it should work now
self.WaitUntilContains("aff4:/C.0000000000000001/fs", self.GetText,
"css=h3:contains('fs')")
# One email for the original request and one for each approval.
self.assertEqual(len(self.emails_sent), 3)
def testRecentReasonBox(self):
test_reason = u"ástæða"
self.Open("/")
with self.ACLChecksDisabled():
token = access_control.ACLToken(username="test", reason=test_reason)
self.RequestAndGrantClientApproval("C.0000000000000006", token=token)
self.Type("client_query", "C.0000000000000006")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000006", self.GetText,
"css=span[type=subject]")
# Choose client 6
self.Click("css=td:contains('0006')")
self.WaitUntil(self.IsTextPresent, u"Access reason: %s" % test_reason)
# By now we should have a recent reason set, let's see if it shows up in the
# ACL dialog.
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# We do not have an approval, so check that the hint is shown, that the
# interrogate button is disabled and that the menu is disabled.
self.WaitUntil(self.IsElementPresent, "css=div.no-approval")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Interrogate')[disabled]")
self.WaitUntil(self.IsElementPresent, "css=a.nav-link.disabled")
# Request an approval.
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
self.WaitUntilEqual(2, self.GetCssCount,
"css=select[id=acl_recent_reasons] option")
self.assertEqual(
"Enter New Reason...",
self.GetText("css=select[id=acl_recent_reasons] option:nth(0)"))
self.assertEqual(
test_reason,
self.GetText("css=select[id=acl_recent_reasons] option:nth(1)"))
# The reason text box should be there and enabled.
element = self.GetElement("css=input[id=acl_reason]")
self.assertTrue(element.is_enabled())
self.Select("css=select[id=acl_recent_reasons]", test_reason)
# Make sure clicking the recent reason greys out the reason text box.
element = self.GetElement("css=input[id=acl_reason]")
self.assertFalse(element.is_enabled())
# Ok now submit this.
self.Type("css=input[id=acl_approver]", "test")
self.ClickUntilNotVisible("acl_dialog_submit")
# And make sure the approval was created...
fd = aff4.FACTORY.Open("aff4:/ACL/C.0000000000000001/test",
token=self.token)
approvals = list(fd.ListChildren())
self.assertEqual(len(approvals), 1)
# ... using the correct reason.
self.assertEqual(
utils.SmartUnicode(approvals[0].Basename().decode("base64")),
test_reason)
def testHuntACLWorkflow(self):
with self.ACLChecksDisabled():
hunt_id = self.CreateSampleHunt()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "SampleHunt")
# Select a Hunt.
self.Click("css=td:contains('SampleHunt')")
# Click on Run and wait for dialog again.
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
# This asks the user "test" (which is us) to approve the request.
self.Type("css=input[id=acl_approver]", "test")
self.Type("css=input[id=acl_reason]", self.reason)
self.Click("acl_dialog_submit")
# "Request Approval" dialog should go away
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
self.WaitForNotification("aff4:/users/test")
self.Open("/")
self.WaitUntilEqual("1", self.GetText, "notification_button")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('Please grant access to hunt')")
self.WaitUntilContains("Grant Access for GRR Use", self.GetText,
"css=h2:contains('Grant')")
self.WaitUntil(self.IsTextPresent, "The user test has requested")
# Hunt overview should be visible
self.WaitUntil(self.IsTextPresent, "SampleHunt")
self.WaitUntil(self.IsTextPresent, "Hunt ID")
self.WaitUntil(self.IsTextPresent, "Hunt URN")
self.WaitUntil(self.IsTextPresent, "Clients Scheduled")
self.Click("css=button:contains('Approve')")
self.WaitUntil(self.IsTextPresent,
"You have granted access for %s to test" % hunt_id)
self.WaitForNotification("aff4:/users/test")
self.Open("/")
# We should be notified that we have an approval
self.WaitUntilEqual("1", self.GetText, "notification_button")
self.Click("notification_button")
self.WaitUntil(self.GetText,
"css=td:contains('has granted you access to hunt')")
self.ClickUntilNotVisible("css=tr:contains('has granted you access') a")
# Run SampleHunt (it should be selected by default).
self.WaitUntil(self.IsTextPresent, "SampleHunt")
# Click on Run and wait for dialog again.
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This is insufficient - we need 2 approvers.
self.WaitUntilContains("Requires 2 approvers for access.", self.GetText,
"css=div#acl_form")
# Lets add another approver.
token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(flow_name="GrantHuntApprovalFlow",
subject_urn=hunt_id,
reason=self.reason,
delegate="test",
token=token)
self.WaitForNotification("aff4:/users/test")
self.Open("/")
# We should be notified that we have an approval
self.WaitUntilEqual("1", self.GetText, "notification_button")
self.Click("notification_button")
self.ClickUntilNotVisible("css=tr:contains('has granted you access') a")
# Wait for modal backdrop to go away.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
self.WaitUntil(self.IsTextPresent, "SampleHunt")
# Run SampleHunt (it should be selected by default).
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This is still insufficient - one of the approvers should have
# "admin" label.
self.WaitUntilContains("At least 1 approver(s) should have 'admin' label.",
self.GetText, "css=div#acl_form")
# Let's make "approver" an admin.
with self.ACLChecksDisabled():
self.CreateAdminUser("approver")
# Check if we see that the approval has already been granted.
self.Open("/")
self.Click("notification_button")
self.ClickUntilNotVisible("css=td:contains('Please grant access to hunt')")
self.WaitUntil(self.IsTextPresent,
"This approval has already been granted!")
# And try again
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "SampleHunt")
# Select and run SampleHunt.
self.Click("css=td:contains('SampleHunt')")
# Run SampleHunt (it should be selected by default).
self.WaitUntil(self.IsTextPresent, "SampleHunt")
self.Click("css=button[name=RunHunt]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to run this hunt?")
# Click on "Proceed" and wait for the success status message.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Hunt started successfully!")
def Create2HuntsForDifferentUsers(self):
# Create 2 hunts. Hunt1 by "otheruser" and hunt2 by "test".
# Both hunts will be approved by user "approver".
with self.ACLChecksDisabled():
hunt1_id = self.CreateSampleHunt(
token=access_control.ACLToken(username="otheruser"))
hunt2_id = self.CreateSampleHunt(
token=access_control.ACLToken(username="test"))
self.CreateAdminUser("approver")
token = access_control.ACLToken(username="otheruser")
flow.GRRFlow.StartFlow(flow_name="RequestHuntApprovalFlow",
subject_urn=hunt1_id,
reason=self.reason,
approver="approver",
token=token)
token = access_control.ACLToken(username="test")
flow.GRRFlow.StartFlow(flow_name="RequestHuntApprovalFlow",
subject_urn=hunt2_id,
reason=self.reason,
approver="approver",
token=token)
token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(flow_name="GrantHuntApprovalFlow",
subject_urn=hunt1_id,
reason=self.reason,
delegate="otheruser",
token=token)
token = access_control.ACLToken(username="approver")
flow.GRRFlow.StartFlow(flow_name="GrantHuntApprovalFlow",
subject_urn=hunt2_id,
reason=self.reason,
delegate="test",
token=token)
def testHuntApprovalsArePerHunt(self):
with self.ACLChecksDisabled():
self.Create2HuntsForDifferentUsers()
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsTextPresent, "SampleHunt")
#
# Check that test user can't start/stop/modify hunt1.
#
self.Click("css=tr:contains('SampleHunt') td:contains('otheruser')")
# Run hunt
# Click | |
import argparse
import os
import errno
from parseable import ImproperXmlException
from problem import Problem, Document
from subprocess import call
from random import randint
from config import get_problem_root, get_private_types
import xml.etree.ElementTree as ET
from color import *
from pdfbuilder import build, temp_file_remove
def types_imply_private(types):
if types:
for private in get_private_types():
if private in types:
return True
return False
def satisfies(version, settings, used_ins):
if (settings.allowed_topics and
[topic for topic in version.topics
if topic not in settings.allowed_topics]):
return False
if (settings.required_topics and not
[topic for topic in version.topics
if topic in settings.required_topics]):
return False
if (settings.required_types and not
[type for type in version.types
if type in settings.required_types]):
return False
if (settings.written and version.year not in settings.written):
return False
if settings.todo or settings.grep:
lower_sol = version.solution.lower()
lower_rub = version.rubric.lower()
if (settings.todo and lower_sol.find("todo") == -1
and lower_rub.find("todo") == -1):
return False
if settings.grep:
lower_body = version.body.lower()
for word in settings.grep:
word = word.lower()
if (lower_sol.find(word) == -1 and
lower_rub.find(word) == -1 and
lower_body.find(word) == -1):
return False
if settings.used_in or settings.not_used_in:
matches_used = False
for actual in used_ins:
if settings.used_in and actual.year in settings.used_in:
matches_used = True
if settings.not_used_in and actual.year in settings.not_used_in and not (actual.private or types_imply_private(version.types)):
return False
if not used_ins:
if settings.used_in and "none" in settings.used_in:
matches_used = True
if settings.not_used_in and "none" in settings.not_used_in:
return False
if settings.used_in and not matches_used:
return False
if (settings.authors and not
[author for author in version.authors
if author in settings.authors]):
return False
return True
def build_wrapper(document, filename, settings):
filename = os.path.basename(filename)
if document.versions:
if filename.endswith(".pdf"):
filename = filename[:-4]
assert filename
else:
print_warning("Output will be named '{}.pdf'".format(filename))
resources = set()
for version in document.versions:
for resource in version.resources:
resources.add(resource)
build(document.build(settings.solutions,
settings.rubrics, settings.metadata),
resources,
filename,
settings.keep)
else:
print_error("No problems were added to the build successfully.")
def build_doc(settings):
document = Document(settings.document)
try:
tree = ET.parse(settings.document)
document.parse_tree(tree)
build_wrapper(document, settings.filename, settings)
except (ImproperXmlException, ET.ParseError):
print_error("Could not parse {}".format(settings.document))
def build_each(settings):
document = Document(settings.document)
try:
tree = ET.parse(settings.document)
document.parse_tree(tree)
for i,v in enumerate(document.versions):
problem_document = Document()
problem_document.name = document.name + " Problem " + str(i+1)
problem_document.year = "1901"
problem_document.due = "Grading"
problem_document.blurb = ""
problem_document.versions.append(v)
build_wrapper(problem_document, settings.document[:-4] + "-" + str(i+1) + ".pdf", settings)
except (ImproperXmlException, ET.ParseError):
print_error("Could not parse {}".format(settings.document))
def build_if(settings):
document = Document()
document.name = "".join(settings.title)
# TODO:
document.year = "1901"
document.due = "Never"
document.blurb = ""
if os.path.isdir(settings.directory):
for dirpath, dirnames, filenames in os.walk(settings.directory):
for filename in filenames:
if filename.endswith(".xml"):
filename = os.path.join(dirpath, filename)
try:
tree = ET.parse(filename)
problem = Problem(filename)
problem.parse_tree(tree, validate_versions=False)
currentVersions = []
if settings.all:
everyVersion = problem.get_versions()
currentVersions = [everyVersion[0]] + [v for v in everyVersion[1:] if v.standalone]
else:
currentVersions = [problem.newest_version()]
firstInProblem = settings.all # only use separators if including multiple versions per problem,
for version in currentVersions:
try:
version.validate()
if satisfies(version, settings, problem.used_in):
version.separateFromPrevious = firstInProblem
firstInProblem = False
document.versions.append(version)
if settings.verbose:
print color("Added: ", color_code(GREEN)), filename, "Version {}".format(version.vid)
elif settings.verbose:
print color("Skipped (Predicate): ", color_code(CYAN)), filename, "Version {}".format(version.vid)
except ImproperXmlException:
if settings.verbose:
print color("Error (Validation): ", color_code(YELLOW)), filename, "Version {}".format(version.vid)
except ImproperXmlException:
if settings.verbose:
print color("Error (Problem Validation): ", color_code(YELLOW)), filename
except ET.ParseError:
if settings.verbose:
print color("Error (XML Parsing): ", color_code(RED, bold=True)), filename
except IOError as e:
# Permission errors can be safely skipped
if e.errno != errno.EACCES:
print color("Error (IO): ", color_code(RED)), filename
raise # TODO
elif settings.verbose:
print color("Error (Permissions): ", color_code(MAGENTA)), filename
except Exception:
print_error(filename)
raise
build_wrapper(document, settings.filename, settings)
else:
print_error("The directory '{}' does not exist".format(settings.directory))
def build_list(settings):
if os.path.isdir(get_problem_root()):
for dirpath, dirnames, filenames in os.walk(get_problem_root()):
for filename in filenames:
if filename.endswith(".xml"):
filename = os.path.join(dirpath, filename)
try:
tree = ET.parse(filename)
problem = Problem(filename)
problem.parse_tree(tree, validate_versions=False)
version = problem.newest_version()
version.validate()
if satisfies(version, settings, problem.used_in):
if settings.verbose:
print color("Added: ", color_code(GREEN)), filename
else:
print filename
elif settings.verbose:
print color("Skipped (Predicate): ", color_code(CYAN)), filename
except ImproperXmlException:
if settings.verbose:
print color("Error (Validation): ", color_code(YELLOW)), filename
except ET.ParseError:
if settings.verbose:
print color("Error (XML Parsing): ", color_code(RED, bold=True)), filename
except IOError as e:
# Permission errors can be safely skipped
if e.errno != errno.EACCES:
print color("Error (IO): ", color_code(RED)), filename
raise # TODO
elif settings.verbose:
print color("Error (Permissions): ", color_code(MAGENTA)), filename
except Exception:
print_error(filename)
raise
else:
print_error("The directory '{}' does not exist".format(get_problem_root()))
def build_single(settings):
document = Document("")
document.name = "".join(settings.title)
#TODO
document.year = "1900"
document.due = "Never"
document.blurb = ""
outname = ""
if settings.problem.endswith(".xml"):
outname = settings.problem[:-4] + ".pdf"
assert outname != ".pdf"
else:
print_error("Problem file does not have a .xml extension")
exit(1)
try:
tree = ET.parse(settings.problem)
problem = Problem(settings.problem)
problem.parse_tree(tree, validate_versions=False)
version = problem.newest_version()
version.validate()
document.versions.append(version)
build_wrapper(document, outname, settings)
except (ImproperXmlException, ET.ParseError):
print_warning("Could not parse '{}'".format(settings.problem))
print "Run '22edit validate' to check for common problems."
def build_specific(settings):
document = Document("")
document.name = "".join(settings.title)
#TODO
document.year = "1900"
document.due = " "
for filename in settings.problems:
try:
tree = ET.parse(filename)
problem = Problem(filename)
problem.parse_tree(tree, validate_versions=False)
version = problem.newest_version()
version.validate()
document.versions.append(version)
except (ImproperXmlException, ET.ParseError):
print_warning("Could not parse {}".format(settings.filename))
build_wrapper(document, settings.filename, settings)
def add_common_flags(subparser, title=True):
subparser.add_argument('-k', dest='keep', action='store_true',
default=False, help='Keeps the intermediate .tex file')
subparser.add_argument('-m', dest='metadata', action='store_true',
default=False, help='Builds the problems with attached metadata')
subparser.add_argument('-r', dest='rubrics', action='store_true',
default=False, help='Builds the problems with rubrics')
subparser.add_argument('-s', dest='solutions', action='store_true',
default=False, help='Builds the problems with solutions')
if title:
subparser.add_argument('--title', nargs=1, required=False,
default="Problem", help='Sets the title of the problem build')
def add_verbose_flag(subparser):
subparser.add_argument('--verbose', '-v', action='store_true',
dest='verbose', default=False,
help='Prints a verbose description of the files being considered')
def add_doc_parser(parser):
subparser = parser.add_parser('doc',
help='Builds a particular assignment XML file into a pdf')
subparser.set_defaults(func=build_doc)
subparser.add_argument('document', metavar='D',
help='The assignment XML file to build')
subparser.add_argument('filename', metavar='O',
help='The destination of the rendered PDF')
add_common_flags(subparser, title=False)
def add_predicate_flags(subparser):
subparser.add_argument('--all', required=False,
dest='all', action='store_true', default=False,
help='If present, will include any standalone versions in addition to the most recent ones')
subparser.add_argument('--allowed-topics', required=False,
dest='allowed_topics', nargs='+',
help='If present, will restrict the allowed topics: a problem will not be built if it uses any topic outside of the provided')
subparser.add_argument('--authors', required=False, dest='authors',
nargs='+', help='If present, restricts to problems which were written by any of the given authors')
subparser.add_argument('--grep', required=False, dest='grep', nargs='+',
help='If present, restricts to problems which contain within the rubric, solution, or body that contain all of the given words. Words are treated separately, but case-insensitively.')
subparser.add_argument('--not-used-in', required=False, dest='not_used_in',
nargs='+', help='If present, restricts to problems which were used in none of the given years')
subparser.add_argument('--required-topics', required=False,
dest='required_topics', nargs='+',
help='If present, will specify the required topics: a problem will be built only if it uses at least one of the provided')
subparser.add_argument('--required-types', required=False,
dest='required_types', nargs='+',
help='If present, will specify the required types: a problem will be built only if it uses at least one of the provided')
subparser.add_argument('--todo', dest='todo', action='store_true',
default=False, help='If present, restricts to problems that have "todo" in their solution or rubric.')
subparser.add_argument('--used-in', required=False, dest='used_in',
nargs='+', help='If present, restricts to problems which were used in any of the given years')
subparser.add_argument('--written', required=False, dest='written',
nargs='+', help='If present, will specify a set of years that a problem\'s most recent version may have been written (to be included)')
def add_each_parser(parser):
subparser = parser.add_parser('each',
help='Builds each problem of a document into separate PDFs, in preparation for grading')
subparser.set_defaults(func=build_each)
subparser.add_argument('document', metavar='D',
help='The assignment XML file where each problem is stored')
add_common_flags(subparser)
def add_from_parser(parser):
subparser = parser.add_parser('from',
help='Builds all problems that satisfy the given predicates within a particular directory')
subparser.set_defaults(func=build_if)
subparser.add_argument('directory',
help='The search directory containing all problems to examine')
subparser.add_argument('filename', metavar='O',
help='The destination of the rendered PDF')
add_common_flags(subparser)
add_predicate_flags(subparser)
add_verbose_flag(subparser)
def add_all_parser(parser):
subparser = parser.add_parser('all',
help='Builds all problems that satisfy the given predicates within the problem root directory')
subparser.set_defaults(func=build_if, directory=get_problem_root())
subparser.add_argument('filename', metavar='O',
help='The destination of the output PDF')
add_common_flags(subparser)
add_predicate_flags(subparser)
add_verbose_flag(subparser)
def add_list_parser(parser):
subparser = parser.add_parser('list',
help='Lists all problems that satisfy the given predicates within the problem | |
save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing. (fig, ax1, ax2)
"""
band_up = Band(
folder=folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
if stack == 'vertical':
fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
elif stack == 'horizontal':
fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
bbox = dict(boxstyle="round", fc="white")
ax1.annotate(
'$\\uparrow$',
xy=(0.02, 0.98),
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
)
ax2.annotate(
'$\\downarrow$',
xy=(0.02, 0.98),
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
)
band_up.plot_elements(
ax=ax1,
scale_factor=scale_factor,
elements=elements,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=0.5,
)
band_down.plot_elements(
ax=ax1,
scale_factor=scale_factor,
elements=elements,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=0.5,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
def band_element_orbital_spin_polarized(
folder,
element_orbital_pairs,
output='band_element_orbital_sp.png',
scale_factor=6,
color_list=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates an element orbital spin polarized band structure. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
element_orbital_pairs (list[list]): Selected orbitals on selected elements to plot.
This should take the form of [[element index, orbital_index], ...].
color_list (list): List of colors of the same length as the atom_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (str): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
stack (str): Determines how the plots are stacked (vertical or horizontal)
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing. (fig, ax1, ax2)
"""
band_up = Band(
folder=folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
if stack == 'vertical':
fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
elif stack == 'horizontal':
fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
bbox = dict(boxstyle='round', fc='white',
edgecolor='gray', alpha=0.95, pad=0.3)
ax1.annotate(
annotations[0],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
ax2.annotate(
annotations[1],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
band_up.plot_element_orbitals(
ax=ax1,
scale_factor=scale_factor,
element_orbital_pairs=element_orbital_pairs,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
band_down.plot_element_orbitals(
ax=ax2,
scale_factor=scale_factor,
element_orbital_pairs=element_orbital_pairs,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax2,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2
def band_element_spd_spin_polarized(
folder,
elements,
output='band_elements_spd_sp.png',
scale_factor=2,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates a s, p, d spin polarized band structure on specific elements. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
elements (list): Elements to project onto
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (str): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
stack (str): Determines how the plots are stacked (vertical or horizontal)
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will | |
<reponame>harshendrashah/Spell-Corrector
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from os import listdir
from os.path import isfile, join
from collections import namedtuple
from tensorflow.python.layers.core import Dense
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
import time
import re
from sklearn.model_selection import train_test_split
import json
import difflib
from parameters import *
def load_book(path):
"""Load a book from its file"""
input_file = os.path.join(path)
with open(input_file) as f:
book = f.read()
return book
def clean_text(text):
#Remove unwanted characters and extra spaces from the text
text = re.sub(r'\n', ' ', text)
text = re.sub(r'[{}@_*>()\\#%+=\[\]]','', text)
text = re.sub('a0','', text)
text = re.sub('\'92t','\'t', text)
text = re.sub('\'92s','\'s', text)
text = re.sub('\'92m','\'m', text)
text = re.sub('\'92ll','\'ll', text)
text = re.sub('\'91','', text)
text = re.sub('\'92','', text)
text = re.sub('\'93','', text)
text = re.sub('\'94','', text)
text = re.sub('\.','. ', text)
text = re.sub('\!','', text)
text = re.sub('\?','', text)
text = re.sub(' +',' ', text)
text = re.sub(',','', text)
text = re.sub('-','', text)
text = re.sub('; ','', text)
text = re.sub(':','', text)
text = re.sub('"','', text)
text = re.sub("'97",'\'', text)
return text
def noise_maker(sentence, threshold):
'''Relocate, remove, or add characters to create spelling mistakes'''
noisy_sentence = []
i = 0
while i < len(sentence):
random = np.random.uniform(0,1,1)
# Most characters will be correct since the threshold value is high
if random < threshold:
noisy_sentence.append(sentence[i])
else:
new_random = np.random.uniform(0,1,1)
# ~33% chance characters will swap locations
if new_random > 0.67:
if i == (len(sentence) - 1):
# If last character in sentence, it will not be typed
continue
else:
# if any other character, swap order with following character
noisy_sentence.append(sentence[i+1])
noisy_sentence.append(sentence[i])
i += 1
# ~33% chance an extra lower case letter will be added to the sentence
elif new_random < 0.33:
random_letter = np.random.choice(letters, 1)[0]
noisy_sentence.append(vocab_to_int[random_letter])
noisy_sentence.append(sentence[i])
# ~33% chance a character will not be typed
else:
pass
i += 1
return noisy_sentence
def model_inputs():
'''Create palceholders for inputs to the model'''
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [None, None], name='inputs')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [None, None], name='targets')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
inputs_length = tf.placeholder(tf.int32, (None,), name='inputs_length')
targets_length = tf.placeholder(tf.int32, (None,), name='targets_length')
max_target_length = tf.reduce_max(targets_length, name='max_target_len')
return inputs, targets, keep_prob, inputs_length, targets_length ,max_target_length
def process_encoding_input(targets, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
with tf.name_scope("process_encoding"):
ending = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
def encoding_layer(rnn_size, sequence_length, num_layers, rnn_inputs, keep_prob, direction):
'''Create the encoding layer'''
if direction == 1:
with tf.name_scope("RNN_Encoder_Cell_1D"):
for layer in range(num_layers):
with tf.variable_scope('encoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob = keep_prob)
enc_output, enc_state = tf.nn.dynamic_rnn(drop,
rnn_inputs,
sequence_length,
dtype=tf.float32)
return enc_output, enc_state
if direction == 2:
with tf.name_scope("RNN_Encoder_Cell_2D"):
for layer in range(num_layers):
with tf.variable_scope('encoder_{}'.format(layer)):
cell_fw = tf.contrib.rnn.LSTMCell(rnn_size)
cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw,
input_keep_prob = keep_prob)
cell_bw = tf.contrib.rnn.LSTMCell(rnn_size)
cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw,
input_keep_prob = keep_prob)
enc_output, enc_state = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
rnn_inputs,
sequence_length,
dtype=tf.float32)
# Join outputs since we are using a bidirectional RNN
enc_output = tf.concat(enc_output,2)
# Use only the forward state because the model can't use both states at once
return enc_output, enc_state[0]
def training_decoding_layer(dec_embed_input, targets_length, dec_cell, initial_state, output_layer,
vocab_size):
'''Create the training logits'''
with tf.name_scope("Training_Decoder"):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=targets_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
initial_state,
output_layer)
training_logits, one,two = tf.contrib.seq2seq.dynamic_decode(training_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=tf.reduce_max(targets_length))
return training_logits
def inference_decoding_layer(embeddings, start_token, end_token, dec_cell, initial_state, output_layer,max_target_length,
batch_size,targets_length):
'''Create the inference logits'''
with tf.name_scope("Inference_Decoder"):
start_tokens = tf.tile(tf.constant([start_token], dtype=tf.int32), [batch_size], name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embeddings,
start_tokens,
end_token)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
initial_state,
output_layer)
inference_logits, one_in,two_in = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=tf.reduce_max(targets_length))
return inference_logits
def decoding_layer(dec_embed_input, embeddings, enc_output, enc_state, vocab_size, inputs_length, targets_length, max_target_length,
rnn_size, vocab_to_int, keep_prob, batch_size, num_layers,direction):
'''Create the decoding cell and attention for the training and inference decoding layers'''
with tf.name_scope("RNN_Decoder_Cell"):
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size)
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob = keep_prob)
output_layer = Dense(vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
attn_mech = tf.contrib.seq2seq.BahdanauAttention(rnn_size,
enc_output,
inputs_length,
normalize=False,
name='BahdanauAttention')
with tf.name_scope("Attention_Wrapper"):
dec_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell,
attn_mech,
rnn_size)
initial_state = dec_cell.zero_state(dtype=tf.float32, batch_size=batch_size).clone(cell_state=enc_state)
with tf.variable_scope("decode"):
training_logits = training_decoding_layer(dec_embed_input,
targets_length,
dec_cell,
initial_state,
output_layer,
vocab_size)
with tf.variable_scope("decode", reuse=True):
inference_logits = inference_decoding_layer(embeddings,
vocab_to_int['<GO>'],
vocab_to_int['<EOS>'],
dec_cell,
initial_state,
output_layer,
max_target_length,
batch_size,
targets_length)
return training_logits, inference_logits
def seq2seq_model(inputs, targets, keep_prob, inputs_length, targets_length,max_target_length,
vocab_size, rnn_size, num_layers, vocab_to_int, batch_size, embedding_size,direction):
'''Use the previous functions to create the training and inference logits'''
enc_embeddings = tf.Variable(tf.random_uniform(shape=[vocab_size, embedding_size], minval = -1, maxval = 1, seed = 0.5))
enc_embed_input = tf.nn.embedding_lookup(enc_embeddings, inputs)
enc_output, enc_state = encoding_layer(rnn_size, inputs_length, num_layers,
enc_embed_input, keep_prob,direction)
dec_embeddings = tf.Variable(tf.random_uniform(shape=[vocab_size, embedding_size],minval=-1,maxval= 1,seed = 0.5))
dec_input = process_encoding_input(targets, vocab_to_int, batch_size)
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
training_logits, inference_logits = decoding_layer(dec_embed_input,
dec_embeddings,
enc_output,
enc_state,
vocab_size,
inputs_length,
targets_length,
max_target_length,
rnn_size,
vocab_to_int,
keep_prob,
batch_size,
num_layers,
direction)
return training_logits, inference_logits
def pad_sentence_batch(sentence_batch):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [vocab_to_int['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sentences, batch_size, threshold):
"""Batch sentences, noisy sentences, and the lengths of their sentences together.
With each epoch, sentences will receive new mistakes"""
for batch_i in range(0, len(sentences)//batch_size):
start_i = batch_i * batch_size
sentences_batch = sentences[start_i:start_i + batch_size]
sentences_batch_noisy = []
for sentence in sentences_batch:
sentences_batch_noisy.append(noise_maker(sentence, threshold))
sentences_batch_eos = []
for sentence in sentences_batch:
sentence.append(vocab_to_int['<EOS>'])
sentences_batch_eos.append(sentence)
pad_sentences_batch = np.array(pad_sentence_batch(sentences_batch_eos))
pad_sentences_noisy_batch = np.array(pad_sentence_batch(sentences_batch_noisy))
# Need the lengths for the _lengths parameters
pad_sentences_lengths = []
for sentence in pad_sentences_batch:
pad_sentences_lengths.append(len(sentence))
pad_sentences_noisy_lengths = []
for sentence in pad_sentences_noisy_batch:
pad_sentences_noisy_lengths.append(len(sentence))
yield pad_sentences_noisy_batch, pad_sentences_batch, pad_sentences_noisy_lengths, pad_sentences_lengths
def build_graph(keep_prob, rnn_size, num_layers, batch_size, learning_rate, embedding_size,direction):
tf.reset_default_graph()
# Load the model inputs
inputs, targets, keep_prob, inputs_length, targets_length, max_target_length = model_inputs()
# Create the training and inference logits
training_logits, inference_logits = seq2seq_model(tf.reverse(inputs, [-1]),
targets,
keep_prob,
inputs_length,
targets_length,
max_target_length,
len(vocab_to_int)+1,
rnn_size,
num_layers,
vocab_to_int,
batch_size,
embedding_size,
direction)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_logits.rnn_output, 'logits')
with tf.name_scope('predictions'):
predictions = tf.identity(inference_logits.sample_id, name='predictions')
tf.summary.histogram('predictions', predictions)
# Create the weights for sequence_loss
masks = tf.sequence_mask(targets_length, dtype=tf.float32, name='masks')
with tf.name_scope("cost"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(training_logits,
targets,
masks)
tf.summary.scalar('cost', cost)
with tf.name_scope("optimze"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# Merge all of the summaries
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'keep_prob', 'cost', 'inputs_length', 'targets_length',
'predictions', 'merged', 'train_op','optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
saver = tf.train.Saver()
return graph, saver
# Train the model with the desired tuning parameters
'''for keep_probability in [0.75]:
for num_layers in [3]:
for threshold in [0.75]:
log_string = 'kp={},nl={},th={}'.format(keep_probability,
num_layers,
threshold)
model, saver = build_graph(keep_probability, rnn_size, num_layers, batch_size,
learning_rate,
embedding_size,
direction)
#train(model, epochs, log_string, saver)'''
def text_to_ints(text):
'''Prepare the text for the model'''
text = clean_text(text)
return [vocab_to_int[word] for word in text]
path = './books/'
book_files = [f for f in listdir(path) if isfile(join(path, f))]
book_files = book_files[1:]
books = [] # books data ka array
for book in book_files:
books.append(load_book(path+book))
# Clean the text of the books
clean_books = []
for book in books:
book.lower()
clean_books.append(clean_text(book))
# Create a dictionary to convert the vocabulary (characters) to integers
vocab_to_int = {}
'''count = 0
for book in clean_books:
for character in book:
if character not in vocab_to_int:
vocab_to_int[character] = count
count += 1'''
with open("./clean_data/vocab_to_int.json", 'r') as f:
vocab_to_int = json.load(f)
count = len(vocab_to_int)
# Add special tokens to vocab_to_int
'''codes = ['<PAD>','<EOS>','<GO>']
for code in codes:
vocab_to_int[code] = count
count += 1'''
# Create another dictionary to convert integers to their respective characters
int_to_vocab = {}
for character, value in vocab_to_int.items():
int_to_vocab[value] = character
# Split the text from the books into sentences.
sentences = []
'''for book in clean_books:
| |
[816, 946, 134, 587, 645, 751, 780, 140, 731,
208, 504, 939, 401, 724, 140, 1000, 575, 15,
966, 719],
[929, 121, 255, 511, 401, 94, 7, 656, 871, 52,
589, 504, 456, 524, 492, 4, 513, 673, 536,
877],
[828, 402, 44, 162, 805, 675, 391, 875, 955,
410, 385, 625, 250, 837, 153, 922, 105, 279,
91, 121]]),
[491, 432, 751, 729, 722, 964, 386, 710, 130, 369, 227,
487, 395, 914, 468, 885, 81, 569, 868, 900, 840, 75,
431, 319, 21, 745, 771, 407, 708, 412, 396, 875, 209,
122, 139, 731, 719, 877, 121, 91, 279, 105, 922, 153,
837, 250, 625, 385, 410, 955, 875, 391, 675, 805, 162,
44, 402, 828, 929, 816, 209, 335, 721, 903, 424, 310,
663, 877, 904, 107, 546, 748, 584, 542, 401, 925, 992,
601, 188, 204, 640, 239, 6, 26, 451, 26, 630, 429,
830, 38, 905, 555, 630, 296, 56, 984, 288, 186, 505,
366, 561, 91, 490, 356, 209, 896, 647, 521, 583, 966,
536, 673, 513, 4, 492, 524, 456, 504, 589, 52, 871,
656, 7, 94, 401, 511, 255, 121, 946, 110, 97, 17, 651,
931, 464, 758, 195, 230, 963, 745, 508, 627, 905, 86,
682, 405, 960, 499, 290, 765, 513, 376, 331, 78, 471,
999, 3, 328, 896, 758, 178, 151, 496, 188, 89, 492,
208, 139, 493, 64, 552, 463, 828, 925, 15, 575, 1000,
140, 724, 401, 939, 504, 208, 731, 140, 780, 751, 645,
587, 134, 325, 438, 825, 952, 222, 204, 330, 809, 474,
507, 446, 826, 404, 880, 788, 546, 879, 658, 836, 787,
912, 968, 988, 98, 461, 973, 469, 371, 751, 181, 123,
797, 940, 746, 559, 290, 989, 916, 493, 800, 590, 756,
801, 819, 452, 863, 653, 573, 457, 133, 897, 379, 706,
990, 636, 638, 356, 6, 408, 359, 714, 400, 886, 346,
682, 160, 875, 721, 409, 163, 30, 127, 499, 300, 869,
690, 69, 260, 534, 650, 417, 87, 229, 834, 655, 101,
260, 82, 299, 748, 283, 505, 547, 597, 679, 316, 998,
681, 329, 568, 691, 647, 67, 801, 996, 64, 921, 162,
449, 70, 215, 89, 186, 418, 386, 474, 42, 389, 599,
872, 544, 578, 437, 637, 802, 253, 208, 745, 518, 113,
618, 210, 692, 203, 75, 844, 95, 971, 99, 347, 352,
67, 362, 749, 321, 807, 863, 996, 605, 427, 845, 182,
932, 282, 276, 983, 468, 88, 731, 76, 839, 657, 200,
147, 620, 203, 868, 895, 450, 18, 409, 585, 277, 597,
90, 576, 101, 818, 394, 542, 405, 55, 191, 85, 925,
971, 283, 444, 353, 528, 167, 169, 4, 826, 638, 294,
520, 617, 172, 50, 833, 758, 753, 815, 660, 995, 949,
383, 11, 688])
def test_snail_031(self):
self.assertEqual(snail(
[[751, 521, 950, 82], [455, 888, 335, 526], [105, 724, 129, 53],
[380, 655, 725, 828]]),
[751, 521, 950, 82, 526, 53, 828, 725, 655, 380, 105,
455, 888, 335, 129, 724])
def test_snail_032(self):
self.assertEqual(snail([[543]]), [543])
def test_snail_033(self):
self.assertEqual(snail([[229, 998, 713, 612, 345, 412, 73, 287, 921, 44,
509, 147, 815, 84],
[202, 726, 739, 170, 976, 345, 944, 506, 848,
942, 98, 297, 75, 807],
[893, 82, 958, 458, 916, 954, 418, 436, 492, 86,
792, 226, 925, 268],
[370, 388, 588, 171, 945, 358, 281, 657, 577,
147, 44, 352, 899, 119],
[63, 834, 521, 924, 276, 174, 483, 414, 999,
932, 97, 492, 833, 363],
[983, 187, 828, 23, 387, 853, 203, 130, 187,
820, 569, 974, 494, 870],
[265, 162, 207, 733, 32, 925, 259, 761, 166,
231, 504, 503, 64, 851],
[434, 330, 43, 791, 846, 790, 566, 474, 702,
462, 693, 826, 682, 881],
[752, 68, 291, 180, 294, 674, 433, 486, 768,
743, 498, 98, 61, 154],
[52, 47, 323, 362, 247, 135, 716, 566, 713, 977,
78, 222, 300, 909],
[265, 17, 534, 221, 142, 430, 935, 948, 600, 79,
898, 229, 949, 656],
[850, 639, 989, 941, 84, 62, 850, 437, 25, 538,
670, 868, 406, 755],
[370, 978, 377, 131, 102, 929, 459, 201, 14,
981, 461, 153, 665, 352],
[374, 581, 593, 665, 922, 259, 899, 586, 405,
812, 645, 820, 321, 535]]),
[229, 998, 713, 612, 345, 412, 73, 287, 921, 44, 509,
147, 815, 84, 807, 268, 119, 363, 870, 851, 881, 154,
909, 656, 755, 352, 535, 321, 820, 645, 812, 405, 586,
899, 259, 922, 665, 593, 581, 374, 370, 850, 265, 52,
752, 434, 265, 983, 63, 370, 893, 202, 726, 739, 170,
976, 345, 944, 506, 848, 942, 98, 297, 75, 925, 899,
833, 494, 64, 682, 61, 300, 949, 406, 665, 153, 461,
981, 14, 201, 459, 929, 102, 131, 377, 978, 639, 17,
47, 68, 330, 162, 187, 834, 388, 82, 958, 458, 916,
954, 418, 436, 492, 86, 792, 226, 352, 492, 974, 503,
826, 98, 222, 229, 868, 670, 538, 25, 437, 850, 62,
84, 941, 989, 534, 323, 291, 43, 207, 828, 521, 588,
171, 945, 358, 281, 657, 577, 147, 44, 97, 569, 504,
693, 498, 78, 898, 79, 600, 948, 935, 430, 142, 221,
362, 180, 791, 733, 23, 924, 276, 174, 483, 414, 999,
932, 820, 231, 462, 743, 977, 713, 566, 716, 135, 247,
294, 846, 32, 387, 853, 203, 130, 187, 166, 702, 768,
486, 433, 674, 790, 925, 259, 761, 474, 566])
def test_snail_034(self):
self.assertEqual(snail([[543, 159, 630, 512, 408, 22, 659, 938, 716,
955, 142, 6, 273, 723],
[899, 659, 592, 655, 57, 191, 321, 795, 226,
317, 372, 190, 368, 804],
[214, 369, 514, 853, 25, 423, 744, 462, 181,
663, 863, 747, 152, 353],
[117, 9, 923, 420, 253, 550, 729, 881, 696, 208,
269, 362, 242, 177],
[625, 547, 37, 512, 130, 542, 853, 646, 551,
801, 257, 306, 206, 361],
[271, 719, 731, 679, 306, 529, 531, 846, 891,
420, 871, 537, 514, 117],
[350, 890, 866, 614, 496, 485, 88, 13, 488, 842,
197, 891, 854, 554],
[278, 713, 485, 671, 556, 687, 246, 19, 293,
906, 1000, 375, 531, 126],
[641, 531, 586, 598, 991, 366, 229, 169, 644,
562, 847, 724, 546, 904],
[859, 329, 116, 455, 986, 255, 334, 156, 188,
438, 112, 409, 283, 653],
[844, 612, 215, 684, 518, 422, 922, 741, 33,
196, 272, 51, 604, 951],
[457, 68, 327, 589, 617, 942, 5, 200, 722, 725,
971, 886, 972, 961],
[817, 172, 829, 438, 738, 639, 453, 565, 270,
683, 405, 829, 664, 749],
[347, 518, 664, 43, 591, 52, 685, 427, 716, 578,
854, 88, 673, 458]]),
[543, 159, 630, 512, 408, 22, 659, 938, 716, 955, 142,
6, 273, 723, 804, 353, 177, 361, 117, 554, 126, 904,
653, 951, 961, 749, 458, 673, 88, 854, 578, 716, 427,
685, 52, 591, 43, 664, 518, 347, 817, 457, 844, 859,
641, 278, 350, 271, 625, 117, 214, 899, 659, 592, 655,
57, 191, 321, 795, 226, 317, 372, 190, 368, 152, 242,
206, 514, 854, 531, 546, 283, 604, 972, 664, 829, 405,
683, 270, 565, 453, 639, 738, 438, 829, 172, 68, 612,
329, 531, 713, 890, 719, 547, 9, 369, 514, 853, 25,
423, 744, 462, 181, 663, 863, 747, 362, 306, 537, 891,
375, 724, 409, 51, 886, 971, 725, 722, 200, 5, 942,
617, 589, 327, 215, 116, 586, 485, 866, 731, 37, 923,
420, 253, 550, 729, 881, 696, 208, 269, 257, 871, 197,
1000, 847, 112, 272, 196, 33, 741, 922, 422, 518, 684,
455, 598, 671, 614, 679, 512, 130, 542, 853, 646, 551,
801, 420, 842, 906, 562, 438, 188, 156, 334, 255, 986,
991, 556, 496, 306, 529, 531, 846, 891, 488, | |
##############################################################################
# NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size #
##############################################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2020.07 #
##############################################################################
# This file is used to train (all) architecture candidate in the size search #
# space in NATS-Bench (sss) with different hyper-parameters. #
# When use mode=new, it will automatically detect whether the checkpoint of #
# a trial exists, if so, it will skip this trial. When use mode=cover, it #
# will ignore the (possible) existing checkpoint, run each trial, and save. #
# (NOTE): the topology for all candidates in sss is fixed as: ######################
# |nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2| #
###################################################################################################
# Please use the script of scripts/NATS-Bench/train-shapes.sh to run. #
##############################################################################
import os, sys, time, torch, argparse
from typing import List, Text, Dict, Any
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from copy import deepcopy
from pathlib import Path
from xautodl.config_utils import dict2config, load_config
from xautodl.procedures import bench_evaluate_for_seed
from xautodl.procedures import get_machine_info
from xautodl.datasets import get_datasets
from xautodl.log_utils import Logger, AverageMeter, time_string, convert_secs2time
from xautodl.utils import split_str2indexes
def evaluate_all_datasets(
channels: Text,
datasets: List[Text],
xpaths: List[Text],
splits: List[Text],
config_path: Text,
seed: int,
workers: int,
logger,
):
machine_info = get_machine_info()
all_infos = {"info": machine_info}
all_dataset_keys = []
# look all the dataset
for dataset, xpath, split in zip(datasets, xpaths, splits):
# the train and valid data
train_data, valid_data, xshape, class_num = get_datasets(dataset, xpath, -1)
# load the configuration
if dataset == "cifar10" or dataset == "cifar100":
split_info = load_config(
"configs/nas-benchmark/cifar-split.txt", None, None
)
elif dataset.startswith("ImageNet16"):
split_info = load_config(
"configs/nas-benchmark/{:}-split.txt".format(dataset), None, None
)
else:
raise ValueError("invalid dataset : {:}".format(dataset))
config = load_config(
config_path, dict(class_num=class_num, xshape=xshape), logger
)
# check whether use the splitted validation set
if bool(split):
assert dataset == "cifar10"
ValLoaders = {
"ori-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
)
}
assert len(train_data) == len(split_info.train) + len(
split_info.valid
), "invalid length : {:} vs {:} + {:}".format(
len(train_data), len(split_info.train), len(split_info.valid)
)
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
# data loader
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train),
num_workers=workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid),
num_workers=workers,
pin_memory=True,
)
ValLoaders["x-valid"] = valid_loader
else:
# data loader
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config.batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
)
if dataset == "cifar10":
ValLoaders = {"ori-test": valid_loader}
elif dataset == "cifar100":
cifar100_splits = load_config(
"configs/nas-benchmark/cifar100-test-split.txt", None, None
)
ValLoaders = {
"ori-test": valid_loader,
"x-valid": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
cifar100_splits.xvalid
),
num_workers=workers,
pin_memory=True,
),
"x-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
cifar100_splits.xtest
),
num_workers=workers,
pin_memory=True,
),
}
elif dataset == "ImageNet16-120":
imagenet16_splits = load_config(
"configs/nas-benchmark/imagenet-16-120-test-split.txt", None, None
)
ValLoaders = {
"ori-test": valid_loader,
"x-valid": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
imagenet16_splits.xvalid
),
num_workers=workers,
pin_memory=True,
),
"x-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
imagenet16_splits.xtest
),
num_workers=workers,
pin_memory=True,
),
}
else:
raise ValueError("invalid dataset : {:}".format(dataset))
dataset_key = "{:}".format(dataset)
if bool(split):
dataset_key = dataset_key + "-valid"
logger.log(
"Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
dataset_key,
len(train_data),
len(valid_data),
len(train_loader),
len(valid_loader),
config.batch_size,
)
)
logger.log(
"Evaluate ||||||| {:10s} ||||||| Config={:}".format(dataset_key, config)
)
for key, value in ValLoaders.items():
logger.log(
"Evaluate ---->>>> {:10s} with {:} batchs".format(key, len(value))
)
# arch-index= 9930, arch=|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|
# this genotype is the architecture with the highest accuracy on CIFAR-100 validation set
genotype = "|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|"
arch_config = dict2config(
dict(
name="infer.shape.tiny",
channels=channels,
genotype=genotype,
num_classes=class_num,
),
None,
)
results = bench_evaluate_for_seed(
arch_config, config, train_loader, ValLoaders, seed, logger
)
all_infos[dataset_key] = results
all_dataset_keys.append(dataset_key)
all_infos["all_dataset_keys"] = all_dataset_keys
return all_infos
def main(
save_dir: Path,
workers: int,
datasets: List[Text],
xpaths: List[Text],
splits: List[int],
seeds: List[int],
nets: List[str],
opt_config: Dict[Text, Any],
to_evaluate_indexes: tuple,
cover_mode: bool,
):
log_dir = save_dir / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
logger = Logger(str(log_dir), os.getpid(), False)
logger.log("xargs : seeds = {:}".format(seeds))
logger.log("xargs : cover_mode = {:}".format(cover_mode))
logger.log("-" * 100)
logger.log(
"Start evaluating range =: {:06d} - {:06d}".format(
min(to_evaluate_indexes), max(to_evaluate_indexes)
)
+ "({:} in total) / {:06d} with cover-mode={:}".format(
len(to_evaluate_indexes), len(nets), cover_mode
)
)
for i, (dataset, xpath, split) in enumerate(zip(datasets, xpaths, splits)):
logger.log(
"--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}".format(
i, len(datasets), dataset, xpath, split
)
)
logger.log("--->>> optimization config : {:}".format(opt_config))
start_time, epoch_time = time.time(), AverageMeter()
for i, index in enumerate(to_evaluate_indexes):
channelstr = nets[index]
logger.log(
"\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] {:}".format(
time_string(),
i,
len(to_evaluate_indexes),
index,
len(nets),
seeds,
"-" * 15,
)
)
logger.log("{:} {:} {:}".format("-" * 15, channelstr, "-" * 15))
# test this arch on different datasets with different seeds
has_continue = False
for seed in seeds:
to_save_name = save_dir / "arch-{:06d}-seed-{:04d}.pth".format(index, seed)
if to_save_name.exists():
if cover_mode:
logger.log(
"Find existing file : {:}, remove it before evaluation".format(
to_save_name
)
)
os.remove(str(to_save_name))
else:
logger.log(
"Find existing file : {:}, skip this evaluation".format(
to_save_name
)
)
has_continue = True
continue
results = evaluate_all_datasets(
channelstr, datasets, xpaths, splits, opt_config, seed, workers, logger
)
torch.save(results, to_save_name)
logger.log(
"\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] ===>>> {:}".format(
time_string(),
i,
len(to_evaluate_indexes),
index,
len(nets),
seeds,
to_save_name,
)
)
# measure elapsed time
if not has_continue:
epoch_time.update(time.time() - start_time)
start_time = time.time()
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.avg * (len(to_evaluate_indexes) - i - 1), True)
)
logger.log(
"This arch costs : {:}".format(convert_secs2time(epoch_time.val, True))
)
logger.log("{:}".format("*" * 100))
logger.log(
"{:} {:74s} {:}".format(
"*" * 10,
"{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}".format(
i, len(to_evaluate_indexes), index, len(nets), need_time
),
"*" * 10,
)
)
logger.log("{:}".format("*" * 100))
logger.close()
def traverse_net(candidates: List[int], N: int):
nets = [""]
for i in range(N):
new_nets = []
for net in nets:
for C in candidates:
new_nets.append(str(C) if net == "" else "{:}:{:}".format(net, C))
nets = new_nets
return nets
def filter_indexes(xlist, mode, save_dir, seeds):
all_indexes = []
for index in xlist:
if mode == "cover":
all_indexes.append(index)
else:
for seed in seeds:
temp_path = save_dir / "arch-{:06d}-seed-{:04d}.pth".format(index, seed)
if not temp_path.exists():
all_indexes.append(index)
break
print(
"{:} [FILTER-INDEXES] : there are {:}/{:} architectures in total".format(
time_string(), len(all_indexes), len(xlist)
)
)
SLURM_PROCID, SLURM_NTASKS = "SLURM_PROCID", "SLURM_NTASKS"
if SLURM_PROCID in os.environ and SLURM_NTASKS in os.environ: # run on the slurm
proc_id, ntasks = int(os.environ[SLURM_PROCID]), int(os.environ[SLURM_NTASKS])
assert 0 <= proc_id < ntasks, "invalid proc_id {:} vs ntasks {:}".format(
proc_id, ntasks
)
scales = [int(float(i) / ntasks * len(all_indexes)) for i in range(ntasks)] + [
len(all_indexes)
]
per_job = []
for i in range(ntasks):
xs, xe = min(max(scales[i], 0), len(all_indexes) - 1), min(
max(scales[i + 1] - 1, 0), len(all_indexes) - 1
)
per_job.append((xs, xe))
for i, srange in enumerate(per_job):
print(" -->> {:2d}/{:02d} : {:}".format(i, ntasks, srange))
current_range = per_job[proc_id]
all_indexes = [
all_indexes[i] for i in range(current_range[0], current_range[1] + 1)
]
# set the device id
device = proc_id % torch.cuda.device_count()
torch.cuda.set_device(device)
print(" set the device id = {:}".format(device))
print(
"{:} [FILTER-INDEXES] : after filtering there are {:} architectures in total".format(
time_string(), len(all_indexes)
)
)
return all_indexes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="NATS-Bench (size search space)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--mode",
type=str,
required=True,
choices=["new", "cover"],
help="The script mode.",
)
parser.add_argument(
"--save_dir",
type=str,
default="output/NATS-Bench-size",
help="Folder to save checkpoints and log.",
)
parser.add_argument(
"--candidateC",
type=int,
nargs="+",
default=[8, 16, 24, 32, 40, 48, 56, 64],
help=".",
)
parser.add_argument(
"--num_layers", type=int, default=5, help="The number of layers in a network."
)
parser.add_argument("--check_N", type=int, default=32768, help="For safety.")
# use for train the model
parser.add_argument(
"--workers",
type=int,
default=8,
help="The number of data loading workers (default: 2)",
)
parser.add_argument(
"--srange", type=str, required=True, help="The range of models to be evaluated"
)
parser.add_argument("--datasets", type=str, nargs="+", help="The applied datasets.")
parser.add_argument(
"--xpaths", type=str, nargs="+", help="The root path for this dataset."
)
parser.add_argument(
"--splits", type=int, nargs="+", help="The root path for this dataset."
)
parser.add_argument(
"--hyper",
type=str,
default="12",
choices=["01", "12", "90"],
help="The tag for hyper-parameters.",
)
parser.add_argument(
"--seeds", type=int, nargs="+", help="The range of models to be evaluated"
)
args = parser.parse_args()
nets = traverse_net(args.candidateC, args.num_layers)
if len(nets) != args.check_N:
raise ValueError(
"Pre-num-check failed : {:} vs {:}".format(len(nets), args.check_N)
)
opt_config = "./configs/nas-benchmark/hyper-opts/{:}E.config".format(args.hyper)
if not os.path.isfile(opt_config):
raise ValueError("{:} is not a file.".format(opt_config))
save_dir = Path(args.save_dir) / "raw-data-{:}".format(args.hyper)
save_dir.mkdir(parents=True, exist_ok=True)
to_evaluate_indexes = split_str2indexes(args.srange, args.check_N, 5)
if not len(args.seeds):
raise ValueError("invalid length of seeds args: {:}".format(args.seeds))
if not (len(args.datasets) == len(args.xpaths) == len(args.splits)):
raise ValueError(
"invalid infos : {:} vs {:} vs {:}".format(
len(args.datasets), len(args.xpaths), len(args.splits)
)
)
if args.workers | |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
| |
= X_target,
# warm_up = warm_up,
# log_gradient_current_epoch=self._gradient_logger and self._gradient_logger.log_epoch(epoch))
# else:
# args, summaries_lists = self.partial_fit(X,
# warm_up = warm_up,
# log_gradient_current_epoch=self._gradient_logger and self._gradient_logger.log_epoch(epoch))
#
# #TODO XXX
# # TODO YYYY
# #if i==0:
# # pdb.set_trace()
#
# #log_p_x_z = self.test_cost(X)
#
# #TODO-ARGO2 retrieving global_step to pass it to the summaries (will not be needed when using hooks)
# gstep = args[0]
# opt = args[1]
# cost = args[2]
# regularization = args[3]
# latent_loss = args[4]
#
# #this is to have summaries that write on a separate files
# for ks in summaries_lists:
# writers = self.summary_writers[ks]
# summaries = summaries_lists[ks]
# for wr,summ in zip(writers, summaries):
# wr.add_summary(summ, global_step=gstep)
#
#
# '''
# # this should happen independently, just a check for non implemented loggers
# if 0 in [0, -2, 3]:
# z_mean = args[3]
# z_covariance_parameterization = args[4]
# x_mean = args[5]
# if self.binary == 1:
# index = 6
# else:
# x_covariance_parameterization = args[6]
# index = 7
# else:
# index = 3
# '''
# # TODO variable index has never been defined :o
# if self._gradient_logger and self._gradient_logger.log_epoch(epoch):
# if 0==1: #self.k TODO IF 0==1, Really???
# index = index + 1
# # pdb.set_trace()
# grads_and_vars = args[index]
# index = index + 1
# ''' TODOTF to be deleted
# if self._log_tf==1:
# summary = args[index]
# index = index + 1
# '''
# # not used
# if self._check_ops==1:
# numerics_ops = args[index]
# index = index + 1
# ''' TODOTF to be deleted
# if self._log_tf==1:
# self._summary_writer.add_summary(summary,epoch * total_batch_train + i/batch_size)
# '''
# if self._gradient_logger and self._gradient_logger.log_epoch(epoch):
# self._gradient_logger.log(epoch, (i/batch_size), grads_and_vars)
#
# # compute average loss
# avg_cost_train += cost * batch_size
# avg_latent_loss_train += latent_loss * batch_size
#
# # importance sampling estimation of log p
# if self._vae_log_p_importance_sampling_logger and self._vae_log_p_importance_sampling_logger.log_epoch(epoch):
# marg_train = self._vae_log_p_importance_sampling_logger.compute_log_p_batch(self.x, X, self.n_z_samples, self.sess)
# self._vae_log_p_importance_sampling_logger.log_train(marg_train / n_samples_train)
#
# # estimate function logger
# if self._vae_estimate_function_logger and self._vae_estimate_function_logger.log_epoch(epoch):
# function_train = self._vae_estimate_function_logger.compute_estimate_function_batch(self.x, X, self.n_z_samples, self.sess, self.alpha, self.beta_coeff)
# #pdb.set_trace()
# self._vae_estimate_function_logger.log_train(function_train, n_samples_train)
#
# #TODO-ARGO2 what to fix down here?
# # FIX THIS
# if self._gradient_logger and self._gradient_logger.log_epoch(epoch):
# self._gradient_logger.plot()
#
# # compute average loss
# avg_cost_train = - avg_cost_train / n_samples_train
# avg_latent_loss_train = avg_latent_loss_train / n_samples_train
#
# for i in range(0, n_samples_test, batch_size_test):
# #TODO-ARGO2 temporary solution for gradual change, this is not the orthodox way
# if self.sess.should_stop():
# break
#
# '''
# if not isinstance(train_set, np.ndarray):
# raise Exception("not implemented yet, why?")
# X, _ = perturbed_test_set.next_batch(batch_size_test)
# else:
# '''
# if i + batch_size_test > processed_test_set.shape[0] : break
# X = processed_test_set[i:i + batch_size]
# X_target = target_test_set[i:i + batch_size_test]
#
# if self.binary==0 and self.synthetic==0:
# X_target = utils.rescale(X_target, self.rescale)
# X = utils.rescale(X, self.rescale)
#
#
# # Fit test using batch data
# if self.denoising:
# cost, latent_loss = self.test_fit(X, X_target = X_target)
# else:
# cost, latent_loss = self.test_fit(X)
# # Compute average loss
# avg_cost_test += cost * batch_size_test
# avg_latent_loss_test += latent_loss * batch_size_test
#
# # importance sampling estimation of log p
# if self._vae_log_p_importance_sampling_logger and self._vae_log_p_importance_sampling_logger.log_epoch(epoch):
# marg_test = self._vae_log_p_importance_sampling_logger.compute_log_p_batch(self.x, X, self.n_z_samples, self.sess)
# self._vae_log_p_importance_sampling_logger.log_test(marg_test / n_samples_test)
#
# # estimate function logger
# if self._vae_estimate_function_logger and self._vae_estimate_function_logger.log_epoch(epoch):
# function_test = self._vae_estimate_function_logger.compute_estimate_function_batch(self.x, X, self.n_z_samples, self.sess, self.alpha, self.beta_coeff)
# self._vae_estimate_function_logger.log_test(function_test, n_samples_test)
#
#
# # importance sampling estimation of log p
# if self._vae_log_p_importance_sampling_logger and self._vae_log_p_importance_sampling_logger.log_epoch(epoch):
# self._vae_log_p_importance_sampling_logger.log(epoch)
# self._vae_log_p_importance_sampling_logger.plot()
#
# # estimate function
# if self._vae_estimate_function_logger and self._vae_estimate_function_logger.log_epoch(epoch):
# self._vae_estimate_function_logger.log(epoch)
# self._vae_estimate_function_logger.plot()
#
#
# avg_cost_test = - avg_cost_test / n_samples_test
# avg_latent_loss_test = avg_latent_loss_test / n_samples_test
#
# # end timer for TF
# stop_tf = timeit.default_timer()
# time = stop_tf - start_tf
#
# graph_size = self.graph_size
# default_graph_size = len([n.name for n in tf.get_default_graph().as_graph_def().node])
#
# # start timer for loggers (some loggers, not all of them)
# start_log = timeit.default_timer()
#
# if self._vae_latent_vars_model_logger and self._vae_latent_vars_model_logger.log_epoch(epoch):
#
# selected_points = self._vae_latent_vars_model_logger.get_selected_points("train")
# X = perturbed_train_set[selected_points]
# z_mean, z_covariance_parameters, z = self.encode(X)
# for j, p in enumerate(selected_points):
# self._vae_latent_vars_model_logger.log(p,
# "train",
# epoch,
# z_mean[j],
# self._gaussian_model_latent["train"].get_covariance_from_parameters_np(z_covariance_parameters[j]))
# self._vae_latent_vars_model_logger.plot(p,"train")
#
#
# selected_points = self._vae_latent_vars_model_logger.get_selected_points("test")
# X = perturbed_test_set[selected_points]
# z_mean, z_covariance_parameters, z = self.encode(X)
# for j, p in enumerate(selected_points):
# self._vae_latent_vars_model_logger.log(p,
# "test",
# epoch,
# z_mean[j],
# self._gaussian_model_latent["train"].get_covariance_from_parameters_np(z_covariance_parameters[j]))
# self._vae_latent_vars_model_logger.plot(p,"test")
#
#
# if self._vae_visible_vars_model_logger and self._vae_visible_vars_model_logger.log_epoch(epoch):
#
# selected_points = self._vae_visible_vars_model_logger.get_selected_points("train")
# X = perturbed_train_set[selected_points]
# z_mean, z_covariance_parameters, z = self.encode(X)
# x_mean, x_covariance_parameters = self.generate(z)
# for j, p in enumerate(selected_points):
# self._vae_visible_vars_model_logger.log(p,
# "train",
# epoch,
# x_mean[j],
# self._model_visible["train"].get_covariance_from_parameters_np(x_covariance_parameters[j]))
# self._vae_visible_vars_model_logger.plot(p,"train")
#
#
# selected_points = self._vae_visible_vars_model_logger.get_selected_points("test")
# X = perturbed_test_set[selected_points]
# z_mean, z_covariance_parameters, z = self.encode(X)
# x_mean, x_covariance_parameters = self.generate(z)
# for j, p in enumerate(selected_points):
# self._vae_visible_vars_model_logger.log(p,
# "test",
# epoch,
# x_mean[j],
# self._model_visible["train"].get_covariance_from_parameters_np(x_covariance_parameters[j]))
# self._vae_visible_vars_model_logger.plot(p,"test")
#
#
#
# '''
# if self._vae_repeated_reconstructions_logger and self._vae_repeated_reconstructions_logger.log_epoch(epoch):
#
# selected_points = self._vae_repeated_reconstructions_logger.get_selected_points("train")
# X = perturbed_train_set[selected_points]
# for iteration in range(self._vae_repeated_reconstructions_logger.n_reconstructions):
# X, z_mean, z, z_cov_params, cov_params_x = self.reconstruct(X)
# for j, p in enumerate(selected_points):
#
#
# self._vae_visible_vars_model_logger.log(p,
# "train",
# epoch,
# x_mean[j],
# self._model_visible["train"].get_covariance_from_parameters(x_covariance_parameters[j]))
# self._vae_visible_vars_model_logger.plot(p,"train")
#
#
# selected_points = self._vae_visible_vars_model_logger.get_selected_points("test")
# X = perturbed_test_set[selected_points]
# z_mean, z_covariance_parameters, z = self.encode(X)
# x_mean, x_covariance_parameters = self.generate(z)
# for j, p in enumerate(selected_points):
# self._vae_visible_vars_model_logger.log(p,
# "test",
# epoch,
# x_mean[j],
# self._model_visible["train"].get_covariance_from_parameters(x_covariance_parameters[j]))
# self._vae_visible_vars_model_logger.plot(p,"test")
# '''
#
# if self._images_generator and self._images_generator.log_epoch(epoch):
# images, _ = self.generate(z_generate_from_prior)
# #resized_images = [image.reshape(self._image_height, self._image_width) for image in images]
# self._images_generator.save_images(images, epoch)
#
# if self._images_regenerator and self._images_regenerator.log_epoch(epoch):
#
# def regenerate_image(dataset, suffix):
# width = self._images_regenerator.number_images_columns
# height = self._images_regenerator.number_images_rows
# X = dataset[:width*height]
# means, cov, z = self.encode(X)
# images_mu, _ = self.generate(means)
# images_z, _ = self.generate(z)
#
# height = height*3
#
# composite = np.zeros((X.shape[0]*3,X.shape[1]))
# for i in range(0,int(height),3):
# composite[int(i*width):int((i+1)*width)] = X[int(i/3*width):int((i/3+1)*width)]
# composite[int((i+1)*width):int((i+2)*width)] = images_mu[int(i/3*width):int((i/3+1)*width)]
# composite[int((i+2)*width):int((i+3)*width)] = images_z[int(i/3*width):int((i/3+1)*width)]
#
# self._images_regenerator.save_images(composite, epoch, width=width, height=height, fileNameSuffix = suffix, title="1st line: orig image; 2nd line: recostr mean; 3rd line: reconstr z ")
#
# regenerate_image(perturbed_train_set,"perturbed_train")
# regenerate_image(train_set,"train")
# regenerate_image(perturbed_test_set,"perturbed_test")
# regenerate_image(test_set,"test")
#
# # check if the stats logger is enabled (e.g. pca_logger or corr_logger),
# # in case the andwer is yes, there are some common precomputation to be done based on encode()
# if (self._vae_latent_vars_pca_logger and self._vae_latent_vars_pca_logger.log_epoch(epoch)) or (self._vae_latent_vars_corr_logger and self._vae_latent_vars_corr_logger.log_epoch(epoch)) or (self._vae_latent_vars_classification_logger and self._vae_latent_vars_classification_logger.log_epoch(epoch)):
#
# means_train, cov_train, samples_train = self.encode(train_set[perm])
# means_test, cov_test, samples_test = self.encode(perturbed_test_set)
# means = (means_train, means_test)
# samples = (samples_train, samples_test)
#
# #samples_train = samples_train[::self.samples,:]
# #samples_test = samples_test[::self.samples,:]
#
# # Plot the PCA eigenvalues of latent means and samples if the flag is set
# if self._vae_latent_vars_pca_logger and self._vae_latent_vars_pca_logger.log_epoch(epoch):
# self._vae_latent_vars_pca_logger.plot_pca_eigenvalues(means, samples, epoch)
#
# # Plot the correlations plots of latent means and samples if the flag is set
# if self._vae_latent_vars_corr_logger and self._vae_latent_vars_corr_logger.log_epoch(epoch):
# self._vae_latent_vars_corr_logger.corr_heatmap(means, samples, epoch)
# self._vae_latent_vars_corr_logger.corr_pairwise(means[0], epoch)
#
# if self._vae_latent_vars_classification_logger and self._vae_latent_vars_classification_logger.log_epoch(epoch):
# labels_train = ds.train_set_y[perm]
# #pdb.set_trace()
# labels_test = ds.test_set_y
# self._vae_latent_vars_classification_logger.log(train_set[perm], perturbed_test_set, means_train, cov_train, samples_train, labels_train, means_test, cov_test, samples_test, labels_test, epoch)
# self._vae_latent_vars_classification_logger.plot(epoch)
#
# # Plot the generated images obtained from interpolating between two latent means or samples (linear interpolation)
# if self._vae_latent_lin_interpolate and self._vae_latent_lin_interpolate.log_epoch(epoch):
#
# for c in self._vae_latent_lin_interpolate.images_couples:
# x1 = train_set[c[0]].reshape(1,-1)
# x2 = train_set[c[1]].reshape(1,-1)
#
# mean1, cov1, sample1 = self.encode(x1)
# mean2, cov2, sample2 = self.encode(x2)
#
# means_t = self._vae_latent_lin_interpolate.linear_interpolation(mean1, mean2)
# samples_t = self._vae_latent_lin_interpolate.linear_interpolation(sample1[0], sample2[0])
#
# # pdb.set_trace()
# # TODO THIS IS WRONG!!
# cov1_diag = np.exp(cov1[0])
# cov2_diag = np.exp(cov2[0])
#
# means_geo_t, | |
<gh_stars>10-100
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import pkgutil
import six
from nose.tools import (
assert_equal, assert_not_equal, assert_raises, assert_is_instance,
raises)
import url
from url.url import StringURL, UnicodeURL
def test_bad_port():
def test(example):
assert_raises(ValueError, url.parse, example)
examples = [
'http://www.python.org:65536/',
'http://www.python.org:-20/',
'http://www.python.org:8589934592/',
'http://www.python.org:80hello/'
]
for example in examples:
yield test, example
def test_deparam_sane():
def test(bad, good):
assert_equal(url.parse(bad).strip().deparam(['c']).unicode, good)
examples = [
('?a=1&b=2&c=3&d=4', '?a=1&b=2&d=4'), # Maintains order
('?a=1&&&&&&b=2' , '?a=1&b=2' ), # Removes excess &'s
(';a=1;b=2;c=3;d=4', ';a=1;b=2;d=4'), # Maintains order
(';a=1;;;;;;b=2' , ';a=1;b=2' ), # Removes excess ;'s
(';foo_c=2' , ';foo_c=2' ), # Not overzealous
('?foo_c=2' , '?foo_c=2' ), # ...
('????foo=2' , '?foo=2' ), # Removes leading ?'s
(';foo' , ';foo' ),
('?foo' , '?foo' ),
('' , '' )
]
base = 'http://testing.com/page'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_deparam_case_insensitivity():
def test(bad, good):
assert_equal(url.parse(bad).deparam(['HeLlO']).unicode, good)
examples = [
('?hELLo=2', ''),
('?HELLo=2', '')
]
base = 'http://testing.com/page'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_filter_params():
def function(name, value):
'''Only keep even-valued parameters.'''
return int(value) % 2
def test(bad, good):
assert_equal(url.parse(bad).filter_params(function).unicode, good)
examples = [
('?a=1&b=2', '?b=2'),
(';a=1;b=2', ';b=2')
]
base = 'http://testing.com/page'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_lower():
def test(bad, good):
assert_equal(url.parse(bad).unicode, good)
examples = [
('www.TESTING.coM' , 'www.testing.com/' ),
('WWW.testing.com' , 'www.testing.com/' ),
('WWW.testing.com/FOO', 'www.testing.com/FOO')
]
for bad, good in examples:
bad = 'http://' + bad
good = 'http://' + good
yield test, bad, good
def test_abspath():
def test(bad, good):
assert_equal(url.parse(bad).abspath().unicode, good)
examples = [
('howdy' , 'howdy' ),
('hello//how//are' , 'hello/how/are'),
('hello/../how/are', 'how/are' ),
('hello//..//how/' , 'how/' ),
('a/b/../../c' , 'c' ),
('../../../c' , 'c' ),
('./hello' , 'hello' ),
('./././hello' , 'hello' ),
('a/b/c/' , 'a/b/c/' ),
('a/b/c/..' , 'a/b/' ),
('a/b/.' , 'a/b/' ),
('a/b/./././' , 'a/b/' ),
('a/b/../' , 'a/' ),
('.' , '' ),
('../../..' , '' ),
('////foo' , 'foo' ),
('/foo/../whiz.' , 'whiz.' ),
('/foo/whiz./' , 'foo/whiz./' ),
('/foo/whiz./bar' , 'foo/whiz./bar')
]
base = 'http://testing.com/'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_escape():
def test(bad, good):
assert_equal(url.parse(bad).escape().unicode, good)
# Escaping should also be idempotent
assert_equal(url.parse(bad).escape().escape().unicode, good)
examples = [
('hello%20and%20how%20are%20you', 'hello%20and%20how%20are%20you'),
('danny\'s pub' , 'danny\'s%20pub' ),
('danny%27s pub' , 'danny\'s%20pub' ),
('danny\'s pub?foo=bar&yo' , 'danny\'s%20pub?foo=bar&yo' ),
('hello%2c world' , 'hello,%20world' ),
('%3f%23%5b%5d' , '%3F%23%5B%5D' ),
# Thanks to @myronmarston for these test cases
('foo?bar none=foo bar' , 'foo?bar%20none=foo%20bar' ),
('foo;a=1;b=2?a=1&b=2' , 'foo;a=1;b=2?a=1&b=2' ),
('foo?bar=["hello","howdy"]' ,
'foo?bar=%5B%22hello%22,%22howdy%22%5D'),
# Example from the wild
('http://www.balset.com/DE3FJ4Yg/p:h=300&m=2011~07~25~2444705.png&ma=cb&or=1&w=400/2011/10/10/2923710.jpg',
'http://www.balset.com/DE3FJ4Yg/p:h=300&m=2011~07~25~2444705.png&ma=cb&or=1&w=400/2011/10/10/2923710.jpg'),
# Example with userinfo
('http://user%[email protected]/', 'http://user:[email protected]/')
]
base = 'http://testing.com/'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_strict_escape():
def test(bad, good):
assert_equal(url.parse(bad).escape(strict=True).unicode, good)
# Escaping should also be idempotent
assert_equal(
url.parse(bad).escape(strict=True).escape(strict=True).unicode, good)
examples = [
('http://testing.com/danny%27s pub',
'http://testing.com/danny%27s%20pub'),
('http://testing.com/this%5Fand%5Fthat',
'http://testing.com/this_and_that'),
('http://user:[email protected]',
'http://user:[email protected]/'),
(u'http://José:no <EMAIL>',
'http://Jos%C3%A9:no%20<EMAIL>/'),
('http://oops!:don%27t@<EMAIL>.com',
'http://oops!:<EMAIL>%2<EMAIL>/'),
(u'española,nm%2cusa.html?gunk=junk+glunk&foo=bar baz',
'espa%C3%B1ola,nm%2Cusa.html?gunk=junk+glunk&foo=bar%20baz'),
('http://foo.com/bar\nbaz.html\n',
'http://foo.com/bar%0Abaz.html%0A'),
('http://foo.com/bar.jsp?param=\n/value%2F',
'http://foo.com/bar.jsp?param=%0A/value%2F'),
('http://user%[email protected]/',
'http://user%[email protected]/')
]
for bad, good in examples:
yield test, bad, good
def test_userinfo():
def test(bad, good):
assert_equal(url.parse(bad).unicode, good)
examples = [
('http://user:[email protected]', 'http://user:[email protected]'),
('http://[email protected]', 'http://[email protected]')
]
suffix = '/page.html'
for bad, good in examples:
bad = bad + suffix
good = good + suffix
yield test, bad, good
def test_not_equal():
def test(first, second):
# None of these examples should evaluate as strictly equal
assert_not_equal(url.parse(first), url.parse(second),
'URL(%s) should not equal URL(%s)' % (first, second))
# Using a string
assert_not_equal(url.parse(first), second,
'URL(%s) should not equal %s' % (first, second))
# Symmetric
assert_not_equal(url.parse(second), url.parse(first),
'URL(%s) should not equal URL(%s)' % (second, first))
# Using a string, symmetric
assert_not_equal(url.parse(second), first,
'URL(%s) should not equal %s' % (second, first))
# Should equal self
assert_equal(url.parse(first), first,
'URL(%s) should equal itself' % first)
assert_equal(url.parse(second), second,
'URL(%s) should equal itself' % second)
# These examples should not work. This includes all the examples from equivalence
# test as well.
examples = [
('http://foo.com:80' , 'http://foo.com/' ),
('https://foo.com:443' , 'https://foo.com/' ),
('http://foo.com/?b=2&&&&a=1', 'http://foo.com/?a=1&b=2' ),
('http://foo.com/%A2%B3' , 'http://foo.com/%a2%b3' ),
('http://foo.com/a/../b/.' , 'http://foo.com/b/' ),
(u'http://www.kündigen.de/' , 'http://www.xn--kndigen-n2a.de/'),
(u'http://www.kündiGen.DE/' , 'http://www.xn--kndigen-n2a.de/'),
('http://foo.com:' , 'http://foo.co.uk/' ),
('http://foo.com:8080' , 'http://foo.com/' ),
('https://foo.com:4430' , 'https://foo.com/' ),
('http://foo.com?page&foo' , 'http://foo.com/?page' ),
('http://foo.com/?b=2&c&a=1' , 'http://foo.com/?a=1&b=2' ),
('http://foo.com/%A2%B3%C3' , 'http://foo.com/%a2%b3' ),
(u'http://www.kündïgen.de/' , 'http://www.xn--kndigen-n2a.de/'),
('http://user:[email protected]/' , 'http://foo.com/' ),
('http://[email protected]/' , 'http://foo.com/' ),
('http://user:[email protected]/' , 'http://pass:[email protected]/' )
]
for first, second in examples:
yield test, first, second
def test_equiv():
def test(first, second):
# Equiv with another URL object
assert url.parse(first).equiv(url.parse(second))
# Equiv with a string
assert url.parse(first).equiv(second)
# Make sure it's also symmetric
assert url.parse(second).equiv(url.parse(first))
# Symmetric with string arg
assert url.parse(second).equiv(first)
# Should be equivalent to self
assert url.parse(first).equiv(first)
assert url.parse(second).equiv(second)
# Things to consider here are:
#
# - default ports (https://foo.com/ == https://foo.com:443/)
# - capitalization of the hostname
# - capitalization of the escaped characters in the path
examples = [
('http://foo.com:80' , 'http://foo.com/' ),
('https://foo.com:443' , 'https://foo.com/' ),
('http://foo.com/?b=2&&&&a=1', 'http://foo.com/?a=1&b=2' ),
('http://foo.com/%A2%B3' , 'http://foo.com/%a2%b3' ),
('http://foo.com/a/../b/.' , 'http://foo.com/b/' ),
(u'http://www.kündigen.de/' , 'http://www.xn--kndigen-n2a.de/'),
(u'http://www.kündiGen.DE/' , 'http://www.xn--kndigen-n2a.de/'),
('http://user:[email protected]/' , 'http://foo.com/' ),
('http://[email protected]/' , 'http://foo.com/' )
]
for first, second in examples:
yield test, first, second
def test_not_equiv():
def test(first, second):
# Equiv with another URL object
assert not url.parse(first).equiv(url.parse(second))
# Equiv with a string
assert not url.parse(first).equiv(second)
# Make sure it's also symmetric
assert not url.parse(second).equiv(url.parse(first))
# Symmetric with string arg
assert not url.parse(second).equiv(first)
# Should be equivalent to self
assert url.parse(first).equiv(first)
assert url.parse(second).equiv(second)
# None of these examples should evaluate as strictly equal
assert_not_equal(url.parse(first), url.parse(second),
'URL(%s) should not equal URL(%s)' % (first, second))
# Using a string
assert_not_equal(url.parse(first), second,
'URL(%s) should not equal %s' % (first, second))
# Symmetric
assert_not_equal(url.parse(second), url.parse(first),
'URL(%s) should not equal URL(%s)' % (second, first))
# Using a string, symmetric
assert_not_equal(url.parse(second), first,
'URL(%s) should not equal %s' % (second, first))
# Should equal self
assert_equal(url.parse(first), first,
'URL(%s) should equal itself' % first)
assert_equal(url.parse(second), second,
'URL(%s) should equal itself' % second)
# Now some examples that should /not/ pass
examples = [
('http://foo.com:' , 'http://foo.co.uk/' ),
('http://foo.com:8080' , 'http://foo.com/' ),
('https://foo.com:4430' , 'https://foo.com/' ),
('http://foo.com?page&foo' , 'http://foo.com/?page' ),
('http://foo.com/?b=2&c&a=1' , 'http://foo.com/?a=1&b=2' ),
('http://foo.com/%A2%B3%C3' , 'http://foo.com/%a2%b3' ),
(u'http://www.kündïgen.de/' , 'http://www.xn--kndigen-n2a.de/')
]
for first, second in examples:
yield test, first, second
def test_str_repr():
def test(first, second):
assert_equal(str(url.parse(toparse)), strng)
assert_equal(repr(url.parse(toparse)),
'<url.URL object "%s" >' % strng)
examples = [
('http://foo.com/', 'http://foo.com/'),
('http://FOO.com/', 'http://foo.com/')
]
for toparse, strng in examples:
yield test, toparse, strng
def test_canonical():
def test(bad, good):
assert_equal(url.parse(bad).canonical().unicode, good)
examples = [
('?b=2&a=1&c=3', '?a=1&b=2&c=3'),
(';b=2;a=1;c=3', ';a=1;b=2;c=3')
]
base = 'http://testing.com/'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_defrag():
def test(bad, good):
assert_equal(url.parse(bad).defrag().unicode, good)
examples = [
('foo#bar', 'foo')
]
base = 'http://testing.com/'
for bad, good in examples:
bad = base + bad
good = base + good
yield test, bad, good
def test_deuserinfo():
def test(bad, good):
assert_equal(url.parse(bad).deuserinfo().unicode, good)
examples = [
('http://user:[email protected]/', 'http://foo.com/'),
('http://[email protected]/', 'http://foo.com/')
]
for bad, good in examples:
yield test, bad, good
def test_punycode():
def test(uni, puny):
assert_equal(url.parse(uni).escape().punycode().unicode, puny)
# Also make sure punycode is idempotent
assert_equal(
url.parse(uni).escape().punycode().punycode().unicode, puny)
# Make sure that we can reverse the procedure correctly
assert_equal(
url.parse(uni).escape().punycode().unpunycode().unescape(),
uni)
# And we get what we'd expect going the opposite direction
assert_equal(
url.parse(puny).unescape().unpunycode().unicode, uni)
examples = [
(u'http://www.kündigen.de/',
'http://www.xn--kndigen-n2a.de/'),
(u'http://россия.иком.museum/',
'http://xn--h1alffa9f.xn--h1aegh.museum/'),
(u'https://t…/',
'https://xn--t-9hn/'),
(u'http://россия.иком.museum/испытание.html',
'http://xn--h1alffa9f.xn--h1aegh.museum/%D0%B8%D1%81%D0%BF%D1%8B%D1%82%D0%B0%D0%BD%D0%B8%D0%B5.html')
]
for uni, puny in examples:
yield test, uni, puny
def test_punycode_relative_urls():
def test(example):
assert_equal(url.parse(example).escape().punycode().unicode, example)
# Also make sure punycode is idempotent
assert_equal(
url.parse(example).escape().punycode().punycode().unicode, example)
# Make sure that we can reverse the procedure correctly
assert_equal(
url.parse(example).escape().punycode().unpunycode().unescape(),
example)
# And we get what we'd expect going the opposite direction
assert_equal(
url.parse(example).unescape().unpunycode().unicode, example)
# Make sure that we can't punycode or unpunycode relative urls
examples = ['foo', '../foo', '/bar/foo']
for relative in examples:
yield test, relative
def test_punycode_encode_errors():
def test(example):
assert_raises(ValueError, url.parse('http://' + | |
in parsecs and V-band extinction, Av, for a
star.
Returns:
G_bp - G_rp color.
"""
_, _, _, bands = mist.interp_mag([*mag_pars], ["BP", "RP"])
bp, rp = bands
return bp - rp
# def lnprior(params):
# """ logarithmic prior on parameters.
# The (natural log) prior on the parameters. Takes EEP, log10(age) in years,
# metallicity (feh), distance in parsecs and V-band extinction (Av).
# Args:
# params (array-like): An array of EEP, age, feh, distance and
# extinction.
# Returns:
# The prior probability for the parameters.
# """
# # finite_mask = np.isfinite(params)
# # if sum(finite_mask) < len(params):
# # print(params, "non-finite parameter")
# # log Priors over age, metallicity and distance.
# # (The priors in priors.py are not in log)
# age_prior = np.log(priors.age_prior(params[1]))
# feh_prior = np.log(priors.feh_prior(params[2]))
# distance_prior = np.log(priors.distance_prior(params[3]))
# # Uniform prior on extinction.
# mAv = (0 <= params[4]) * (params[4] < 1) # Prior on A_v
# mAv &= np.isfinite(params[4])
# mAv = mAv == 1
# # Uniform prior on EEP
# m = (190 < params[0]) * (params[0] < 500) # Broad bounds on EEP.
# m &= np.isfinite(params[0])
# if mAv and m and np.isfinite(age_prior) and np.isfinite(distance_prior) \
# and np.isfinite(feh_prior):
# return age_prior + feh_prior + distance_prior
# else:
# return -np.inf
# def ptform(u):
# """
# Prior transform for sampling with dynesty.
# Args:
# u (array): The parameter array.
# Returns:
# u' (array): The parameters transformed from the unit cube to the prior
# space.
# """
# x = np.array(u)
# EEP between 100 and 800
# x[0] = 300*x[0] + 600 # x by range and + max
# x[0] = 700*x[0] + 800 # x by range and + max
# # Age between 0 and 13.8
# x[1] = np.log10(x[1]*13.8*1e9)
# # Fe/H between -5 and 5
# x[2] = x[2]*10 - 5
# # Distance uniform in log between 0 and 100 kpc
# x[3] = x[3]*np.log(100*1e3)
# # Av uniform between 0 and 1
# x[4] = x[4]
# return x
def lnprob(lnparams, *args):
""" The ln-probability function.
Calculates the logarithmic posterior probability (likelihood times prior)
of the model given the data.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, iso_only, gyro_only,
rossby and model.
mod is the isochrones starmodel object which is set
up in stardate.py. period and period_err are the
rotation period and rotation period uncertainty (in days).
iso_only should be true if you want to use ONLY isochrone fitting
and not gyrochronology.
rossby is true if you want to use the van Saders + (2016) weakened
magnetic braking law. Set to false to turn this off.
model is "angus15" for the Angus + (2015) gyro model or "praesepe"
for the Praesepe gyro model.
Returns:
The log-posterior probability of the model given the data and the
log-prior.
"""
# transform mass and distance back to linear.
params = lnparams*1
params[3] = np.exp(lnparams[3])
# Unpack the args.
mod, period, period_err, iso_only, gyro_only, rossby, model = args
# Put a prior on EEP
if params[0] > 800: #2000:
return -np.inf, -np.inf
# If the prior is -inf, don't even try to calculate the isochronal
# likelihood.
lnpr = mod.lnprior(params)
if not np.isfinite(lnpr):
return -np.inf, -np.inf
like = lnlike(lnparams, *args)
return like + lnpr, lnpr
def lnlike(lnparams, *args):
""" The log-likelihood function.
Calculates the logarithmic likelihood of the data given the model.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, iso_only, gyro_only,
rossby and model.
mod is the isochrones starmodel object which is set
up in stardate.py. period and period_err are the
rotation period and rotation period uncertainty (in days).
iso_only should be true if you want to use ONLY isochrone fitting
and not gyrochronology.
rossby is true if you want to use the van Saders + (2016) weakened
magnetic braking law. Set to false to turn this off.
model is "angus15" for the Angus + (2015) gyro model or "praesepe"
for the Praesepe gyro model.
Returns:
The log-likelihood
"""
# transform mass and distance back to linear.
params = lnparams*1
params[3] = np.exp(lnparams[3])
# Unpack the args.
mod, period, period_err, iso_only, gyro_only, rossby, model = args
# If isochrones only, just return the isochronal lhf.
if iso_only:
return mod.lnlike(params)
# Check that the period is a positive, finite number. It doesn't matter
# too much what the lhf is here, as long as it is constant.
if period is None or not np.isfinite(period) or period <= 0. \
or period_err is None or not np.isfinite(period_err) \
or period_err <= 0.:
gyro_lnlike, sig = -.5*((5/(20.))*2) - np.log(20.), 0
else:
# The gyrochronology lhf.
# The model
# Calculate a period using the gyrochronology model
log10_period_model, sig = gyro_model_rossby(params, rossby=rossby,
model=model)
# The variance model
relative_err = period_err/period
var = (relative_err*.434 + sig)**2
# The likelihood
# Calculate the gyrochronology likelihood.
gyro_lnlike = -.5*((log10_period_model - np.log10(period))**2/var) \
- .5*np.log(2*np.pi*var)
if gyro_only:
like = gyro_lnlike
else:
like = mod.lnlike(params) + gyro_lnlike
if not np.isfinite(like):
like = -np.inf
return float(like)
def nll(lnparams, args):
""" The negative ln-probability function.
Calculates the negative logarithmic posterior probability (likelihood times
prior) of the model given the data.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, color, mass and iso_only.
mod is the isochrones starmodel object which is set
up in stardate.py. period, period_err, color and mass are the
rotation period and rotation period uncertainty (in days), B-V
color and mass [M_sun]. color and mass should both be None unless
only gyrochronology is being used.
Returns:
The negative log-posterior probability of the model given the data.
"""
lp, prior = lnprob(lnparams, *args)
return -lp
def convective_overturn_time(*args):
"""Estimate the convective overturn time.
Estimate the convective overturn time using equation 11 in Wright et al.
(2011): https://arxiv.org/abs/1109.4634
log tau = 1.16 - 1.49log(M/M⊙) - 0.54log^2(M/M⊙)
Args:
args: EITHER mass (float): Mass in Solar units OR eep (float):
The Equivalent evolutionary point of a star (355 for the Sun),
age (float): The age of a star in log_10(years) and feh (float):
the metallicity of a star.
Returns:
The convective overturn time in days.
"""
if len(args) > 1:
# Convert eep, age and feh to mass (mass will be in Solar mass units)
eep, age, feh = args
M = mist.interp_value([eep, age, feh], ["mass"])
else:
M = args[0]
log_tau = 1.16 - 1.49*np.log10(M) - .54*(np.log10(M))**2
return 10**log_tau
def sigmoid(k, x0, L, x):
"""
Computes a sigmoid function.
Args:
k (float): The logistic growth rate (steepness).
x0 (float): The location of 1/2 max.
L (float): The maximum value.
x, (array): The x-array.
Returns:
y (array): The logistic function.
"""
return L/(np.exp(-k*(x - x0)) + 1)
def sigma(eep, log_age, feh, color, model="praesepe"):
"""
The standard deviation of the rotation period distribution.
Currently comprised of two three logistic functions that 'blow up' the
variance at hot colours, cool colours and large EEPs. The FGK dwarf part
of the model has zero variance.
Args:
eep (float): The equivalent evolutionary point.
log_age (float): The log10(age) in years.
feh (float): The metallicity.
color (float): The G_BP - G_RP colour if model == "praesepe" or the
B-V color if model == "angus15"
"""
kcool, khot, keep = 100, 100, .2
Lcool, Lhot, Leep = .5, .5, 5
x0eep = 454 #454 #100 #454
k_old, x0_old = 100, np.log10(10*1e9)
k_young, x0_young = 20, np.log10(250*1e6)
L_age = .5
# k_feh, L_feh, x0_feh = 5, .5, 3.
k_feh, L_feh, x0_feh = 50, .5, .2
# k_feh, L_feh, x0_feh = 50, .5, .25
if model == "angus15":
x0cool, x0hot = 1.4, .45
if color > 0:
sigma_color = sigmoid(kcool, x0cool, Lcool, color) \
| |
= """
-> MEIMask
---
-> stimulus.StaticImage.Image
img_activation: float # activation at the best masked image
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
images, image_ids = (Frame * ConditionTier * stimulus.Frame & target_scan & 'image_class="imagenet"').fetch('frame', 'image_id')
mask = (MEIMask & key).fetch1('mei_mask')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# activations for each masked image
img_activations = []
for image in tqdm(images):
image = np.atleast_3d(mask_image(image, mask, bias)) # ensure channel dimension exist
image = torch.tensor(process(image, mu=bias, sigma=scale)[None, ...], dtype=torch.float32, requires_grad=True, device='cuda')
# --- Compute gradient receptive field at the image
y = adj_model(image)
img_activations.append(y.item())
img_activations = np.array(img_activations)
pos = np.argmax(img_activations)
key['image_class'] = 'imagenet'
key['image_id'] = image_ids[pos]
key['img_activation'] = img_activations[pos]
self.insert1(key)
@schema
class MEIMaskedImageResponse(dj.Computed):
definition = """
-> BestMEIMaskedImageNet
---
masked_image_resp: float # activation on the masked image
original_image_resp: float # activation on the unmasked image
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# use the best MEI masked ImageNet image
image = (stimulus.Frame * Frame * ConditionTier * stimulus.StaticImage.Image * StaticMultiDataset.Member & (BestMEIMaskedImageNet & key)).fetch1('frame')
mask = (MEIMask & key).fetch1('mei_mask')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# activations for each masked image
def evaluate_image(x):
x = np.atleast_3d(x)
x = torch.tensor(process(x, mu=bias, sigma=scale)[None, ...], dtype=torch.float32,
requires_grad=False, device='cuda')
y = adj_model(x)
return y.item()
key['masked_image_resp'] = evaluate_image(mask_image(image, mask, bias))
key['original_image_resp'] = evaluate_image(image)
self.insert1(key)
@schema
class MEIMaskedBestImageResponse(dj.Computed):
"""
Response of masked vs unmaksed on **images with best unmasked resopsnes**
"""
definition = """
-> BestImageNet
-> MEIMask
---
masked_image_resp: float # activation on the masked image
original_image_resp: float # activation on the unmasked image
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
image = (stimulus.Frame * Frame * ConditionTier * stimulus.StaticImage.Image * StaticMultiDataset.Member & (BestImageNet & key)).fetch1('frame')
mask = (MEIMask & key).fetch1('mei_mask')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# activations for each masked image
def evaluate_image(x):
x = np.atleast_3d(x)
x = torch.tensor(process(x, mu=bias, sigma=scale)[None, ...], dtype=torch.float32,
requires_grad=False, device='cuda')
y = adj_model(x)
return y.item()
key['masked_image_resp'] = evaluate_image(mask_image(image, mask, bias))
key['original_image_resp'] = evaluate_image(image)
self.insert1(key)
@schema
class MEIMaskedAllResponse(dj.Computed):
definition = """
-> MEIMask
---
original_img_activations: longblob # activations on the original unmasked images
masked_img_activations: longblob # activations on the masked images
image_ids: longblob # image ids
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
images, image_ids = (Frame * ConditionTier * stimulus.Frame & target_scan & 'image_class="imagenet"').fetch(
'frame', 'image_id')
mask = (MEIMask & key).fetch1('mei_mask')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# activations for each masked image
def evaluate_image(x):
x = np.atleast_3d(x)
x = torch.tensor(process(x, mu=bias, sigma=scale)[None, ...], dtype=torch.float32,
requires_grad=False, device='cuda')
y = adj_model(x)
return y.item()
original_img_activations = []
masked_img_activations = []
for image in tqdm(images):
original_img_activations.append(evaluate_image(image))
masked_img_activations.append(evaluate_image(mask_image(image, mask, bias)))
key['original_img_activations'] = original_img_activations
key['masked_img_activations'] = masked_img_activations
key['image_ids'] = image_ids
self.insert1(key)
### Below we use the "tight" MEI mask and find the best masked ImageNet image
### for each target neuron.
@schema
class BestTightMaskedImageNet(dj.Computed):
definition = """
-> TightMEIMask
---
-> stimulus.StaticImage.Image
img_activation: float # activation at the best masked image
unmasked_activation: float # activation of the unmasked best image
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
images, image_ids = (ProcessedImagenet & target_scan & 'collection_id = 4').fetch('frame', 'image_id')
mask = (TightMEIMask & key).fetch1('tight_mask')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# activations for each masked image
img_activations = []
def evaluate_image(x):
x = np.atleast_3d(x)
x = torch.tensor(process(x, mu=bias, sigma=scale)[None, ...], dtype=torch.float32,
requires_grad=False, device='cuda')
y = adj_model(x)
return y.item()
# let sigma be proportional to the mask size
sigma = scale * np.sqrt(mask.sum() / img_shape[-1] / img_shape[-2])
for image in tqdm(images):
adj_img, _ = adjust_img_stats(image, bias, sigma, mask=mask)
img_activations.append(evaluate_image(adj_img))
img_activations = np.array(img_activations)
pos = np.nanargmax(img_activations)
key['image_class'] = 'imagenet'
key['image_id'] = image_ids[pos]
key['img_activation'] = img_activations[pos]
key['unmasked_activation'] = evaluate_image(images[pos])
self.insert1(key)
@schema
class WrongMEI(dj.Computed):
"""
This is WRONG in the sense that it uses the mean of the image as the standard deviation when
generating the MEI. This reflects the previous bug in Normalizer where the images were standardized
by the mean instead of std. Hence, when this "wrong" MEI generation is applied on those models,
it actually generates MEI in a way that is consistent with the intensity range that the network
was trained under.
"""
definition = """
-> TargetModel
-> MEIParameter
-> TargetDataset.Unit
---
n_seeds : int # number of distinct seeded models used
mei : longblob # most exciting images
activation : float # activation at the MEI
monotonic : tinyint # does activity increase monotonically with contrast
max_contrast : float # contrast at which maximum activity is achived
max_activation : float # activation at the maximum contrast
sat_contrast : float # contrast at which image would start saturating
img_mean : float # mean luminance of the image
lim_contrast : float # max reachable contrast without clipping
"""
@property
def key_source(self):
# restriction by CorePlusReadout is needed to link dataconfig with the appropriate model
return TargetModel() * MEIParameter() * TargetDataset.Unit & NetworkConfig.CorePlusReadout
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# !!!!!INTENTIONALLY WRONG OVERRIDE!!!!!!
scale = bias
print('Working with images with mu={}, sigma={}'.format(bias, scale))
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
params = (MEIParameter() & key).fetch1()
blur = bool(params['blur'])
jitter = int(params['jitter'])
precond = float(params['precond'])
step_gain = float(params['step_gain'])
norm = float(params['norm'])
train_norm = float(params['train_norm'])
octaves = [
{
'iter_n': int(params['iter_n']),
'start_sigma': float(params['start_sigma']),
'end_sigma': float(params['end_sigma']),
'start_step_size': float(params['start_step_size']),
'end_step_size': float(params['end_step_size']),
},
]
# prepare initial image
channels, original_h, original_w = img_shape[-3:]
# the background color of the initial image
background_color = np.float32([128] * channels)
# generate initial random image
gen_image = np.random.normal(background_color, 8, (original_h, original_w, channels))
gen_image = np.clip(gen_image, 0, 255)
# generate class visualization via octavewise gradient ascent
gen_image = deepdraw(adj_model, gen_image, octaves, clip=True,
random_crop=False, blur=blur, jitter=jitter,
precond=precond, step_gain=step_gain,
bias=bias, scale=scale, norm=norm, train_norm=train_norm)
mei = gen_image.squeeze()
with torch.no_grad():
img = torch.Tensor(process(gen_image, mu=bias, sigma=scale)[None, ...]).to('cuda')
activation = adj_model(img).data.cpu().numpy()[0]
cont, vals, lim_contrast = contrast_tuning(adj_model, mei, bias, scale)
key['n_seeds'] = len(models)
key['mei'] = mei
key['activation'] = activation
key['monotonic'] = bool(np.all(np.diff(vals) >= 0))
key['max_activation'] = np.max(vals)
key['max_contrast'] = cont[np.argmax(vals)]
key['sat_contrast'] = np.max(cont)
key['img_mean'] = mei.mean()
key['lim_contrast'] = lim_contrast
self.insert1(key)
@schema
class ImageConfig(dj.Lookup):
definition = """
img_config_id: int
---
img_mean: float # image mean to use. -1 would use original image mean.
img_contrast: float # image contrast to use. -1 would use original image contrast.
force_stats: bool # whether to make forcible adjustment on the stats
"""
contents = [
(0, 111.0, 16.0, True),
# (1, 112.0, 6.0, False),
]
@schema
class MEIActivation(dj.Computed):
definition = """
-> MEI
-> ImageConfig
---
mei_activation: float # activation on mei
mei_clipped: bool # whether image was clipped
mei_contrast: float # actual contrast of mei
"""
def make(self, key):
mei = (MEI() & key).fetch1('mei')
target_mean, target_contrast, force_stats = (ImageConfig() & key).fetch1('img_mean', 'img_contrast', 'force_stats')
mei, clipped, actual_contrast = adjust_contrast(mei, target_contrast, mu=target_mean, force=force_stats)
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
with torch.no_grad():
img = torch.Tensor(process(mei[..., None], mu=bias, sigma=scale)[None, ...]).to('cuda')
activation = adj_model(img).data.cpu().numpy()[0]
key['mei_activation'] = activation
key['mei_clipped'] = bool(clipped)
key['mei_contrast'] = actual_contrast
self.insert1(key)
@schema
class JitterConfig(dj.Lookup):
definition = """
jitter_config_id: int
---
jitter_size: int
| |
<reponame>faithcomesbyhearing/verse-timing
import pandas as pd,argparse,glob,statistics as stat,matplotlib.pyplot as plt,numpy as np,operator
'''
ex:
python3 upload_code/compare_with_cue_info_chinanteco.py -i /Users/spanta/Desktop/jon_code_test/upload_code/chinanteco_aeneas/lang_code_epo -o /Users/spanta/Desktop/jon_code_test/upload_code/chinanteco_aeneas -book_find_string MRC -synced_silence -write_audition_format -print_chapter 3 -extract_verse_timing
'''
#Make sure for chinanteco line comparison the cue info first liine is taken care of
parser = argparse.ArgumentParser(
description='This function gives quality control metric between cue info and aeneas. Assumes only one book in aeneas.csv, assumes that each line in core script is translated'
'to cue info and aeneas, otherwise script fails')
required_args = parser.add_argument_group('required arguments')
required_args.add_argument(
'-i', required=True, nargs=1, type=str, help='Input dir that contains aeneas.csv *aeneas*.txt/*adjusted.txt and *cue_info*.txt')
required_args.add_argument(
'-o', required=True, nargs=1, type=str, help='Output dir')
required_args.add_argument(
'-book_find_string', required=True, nargs=1, type=str, help='EX: MRK to get MRK1,ESV_MRK_01 etc.')
optional_args=parser.add_argument_group('optional arguments')
optional_args.add_argument('-strict_comparison', type=int,choices=[1,2], help='1-Compare aeneas and qinfo start boundaries. 2- Compare verse duration and 1')
optional_args.add_argument('-extract_verse_timing', action='store_true', help='Compare verse timing info.')
optional_args.add_argument('-synced_silence', action='store_true', help='Indicate whether the output from aeneas timing was fully synced with silence adjustment')
optional_args.add_argument('-synced_qinfo', action='store_true', help='Indicate whether the output from aeneas timing was fully synced with qinfo')
optional_args.add_argument('-print_chapter', nargs=1, type=str,default=['9'],help='Print chapter')
optional_args.add_argument('-write_audition_format', action='store_true', help='Write output to audition tab csv format')
optional_args.add_argument('-check_secs', nargs=1, type=str,default=['3'],help='Mark if above check secs')
args = parser.parse_args()
input_dir=args.i[0]
output_dir=args.o[0]
book_find_string=args.book_find_string[0]
if args.strict_comparison is not None: strict_comparison=args.strict_comparison
else:strict_comparison=0
print_chapter=args.print_chapter[0]
print(args)
input_file='aeneas.csv'
df=(pd.read_csv(input_dir+'/'+input_file, encoding='utf-8')).astype(str)
#Get unique book and chapters
book_list=df['book'].unique()
chapter_list=df['chapter'].unique()
if args.extract_verse_timing: target='verse_number'
else:target='line_number'
if args.synced_silence: aeneas_adjusted_file='*_sync_adjusted.txt'
elif args.synced_qinfo:aeneas_adjusted_file='*_qinfo_adjusted.txt'
else:aeneas_adjusted_file='*_adjusted.txt'
#get column indexes
target_index=df.columns.get_loc(target)
qc_data=dict()
median_dict=dict()
std_dev_dict=dict()
for each_chapter in chapter_list:
target_list=df[target][df['chapter'] == str(each_chapter)]
uniq_target=target_list.unique()
if float(each_chapter)<10:
#Get cue info file
cue_info_chapter_file=glob.glob(input_dir+'/*'+book_find_string+'*'+'0'+str(each_chapter)+'*_cue_info.txt')[0]
#Get aeneas silence adjusted file
aeneas_chapter_file=glob.glob(input_dir+'/*'+book_find_string+'*'+'0'+str(each_chapter)+aeneas_adjusted_file)[0]
else:
#Get cue info file
cue_info_chapter_file=glob.glob(input_dir+'/*'+book_find_string+'*'+str(each_chapter)+'*_cue_info.txt')[0]
#Get aeneas silence adjusted file
aeneas_chapter_file=glob.glob(input_dir+'/*'+book_find_string+'_'+str(each_chapter)+aeneas_adjusted_file)[0]
if args.write_audition_format:
audition_file_name = (aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0] + target.split('_')[0]+'_audition_markers.csv'
#print(aeneas_chapter_file.split('/')[-1],(aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0],audition_file_name)
audition_file = input_dir + '/' + audition_file_name
#Read cue info and aeneas for iteration in the foll. for loop
aeneas_target = open(aeneas_chapter_file).readlines()
cue_target=open(cue_info_chapter_file).readlines()
ind=0
difference_list=list()
with open(audition_file,'w') as aud:
# Since we cant read cue info last verse, we will remove the last element from uniq_target
# uniq_target=uniq_target[:-1]
for each_target in uniq_target:
ind+=1
#
if ind<=len(open(cue_info_chapter_file).readlines( )):
cue_times=((cue_target[ind-1]).strip('\n')).split('\t')
cue_duration=float(cue_times[1])-float(cue_times[0])
cue_start=float(cue_times[0])
indices=[i for i,val in enumerate(target_list) if val==each_target]
aeneas_duration = 0
counter=0
aud_duration=0
aud_text=''
for each_index in indices:
aeneas_times = ((aeneas_target[each_index]).strip('\n')).split('\t')
aeneas_duration += float(aeneas_times[1]) - float(aeneas_times[0])
aud_duration=aeneas_duration
aud_text+=aeneas_times[-1][1:]
if counter==0:
aeneas_start=float(((aeneas_target[each_index]).strip('\n')).split('\t')[0])
aud_start=aeneas_start
counter+=1
# print(aeneas_times,aeneas_duration,cue_duration)
# print(each_chapter,each_target, each_index,cue_times, aeneas_times)
#if int(each_chapter) == 16:print(cue_duration,aeneas_duration)
difference = (round(abs(cue_duration - aeneas_duration), 1))
#print(difference)
# if each_chapter==print_chapter:
#print('AUD->', each_chapter, each_target, each_index, aud_start, aud_duration, aeneas_times[2],aud_text)
if strict_comparison==1: difference=(round(abs(cue_start - aeneas_start), 1))
elif strict_comparison==2:difference=(round(abs(cue_start - aeneas_start), 1))+(round(abs(cue_duration - aeneas_duration), 1))
# print(difference)
difference_list.append(difference)
# if each_chapter==print_chapter: print(cue_start,aeneas_start)
if difference > float(args.check_secs[0]):
marker_name = 'Check Marker '
else:
marker_name = 'Marker '
if ind==1:
# Write to adobe audition file
aud.write(
'Name' + '\t' + 'Start' + '\t' + 'Duration' + '\t' + 'Time Format' + '\t' + 'Type' + '\t' + 'Description' + '\n')
aud.write(marker_name + '0'+str(each_target) + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
# print('aud text->',aud_text)
else:
if int(each_target)<10:marker_number='0'+str(each_target)
else:marker_number=str(each_target)
aud.write(marker_name + marker_number + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
# print('aud text->', aud_text)
target1='verse_number'
target1_list = df[target1][df['chapter'] == str(each_chapter)]
uniq1_target = target1_list.unique()
if args.write_audition_format:
audition_file_name = (aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0] + target1.split('_')[0]+'_audition_markers.csv'
#print(aeneas_chapter_file.split('/')[-1],(aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0],audition_file_name)
audition_file = input_dir + '/' + audition_file_name
#Read cue info and aeneas for iteration in the foll. for loop
aeneas_target = open(aeneas_chapter_file).readlines()
ind=0
with open(audition_file,'w') as aud:
# Since we cant read cue info last verse, we will remove the last element from uniq_target
# uniq_target=uniq_target[:-1]
for each_target in uniq1_target:
ind+=1
indices=[i for i,val in enumerate(target1_list) if val==each_target]
aeneas_duration = 0
counter=0
aud_duration=0
aud_text=''
for each_index in indices:
aeneas_times = ((aeneas_target[each_index]).strip('\n')).split('\t')
aeneas_duration += float(aeneas_times[1]) - float(aeneas_times[0])
aud_duration=aeneas_duration
aud_text+=aeneas_times[-1][1:]
if counter==0:
aeneas_start=float(((aeneas_target[each_index]).strip('\n')).split('\t')[0])
aud_start=aeneas_start
counter+=1
marker_name = 'Marker '
if ind==1:
# Write to adobe audition file
#print(marker_name + str(each_target) + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
aud.write(
'Name' + '\t' + 'Start' + '\t' + 'Duration' + '\t' + 'Time Format' + '\t' + 'Type' + '\t' + 'Description' + '\n')
aud.write(marker_name + '0'+str(each_target) + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
# print('aud text->',aud_text)
else:
if int(each_target) < 10:
marker_number = '0' + str(each_target)
else:
marker_number = str(each_target)
#print(marker_name + str(each_target) + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
aud.write(marker_name + marker_number + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
# print('aud text->', aud_text)
qc_data[each_chapter]=difference_list
median_dict[each_chapter]=np.median(difference_list)
std_dev_dict[each_chapter]=np.std(difference_list)
target1='verse_number'
target1_list = df[target1][df['chapter'] == str(each_chapter)]
uniq1_target = target1_list.unique()
target2='line_number'
target2_list = df[target2][df['chapter'] == str(each_chapter)]
uniq2_target = target2_list.unique()
line_cue_target = open(cue_info_chapter_file).readlines()
# if each_chapter==print_chapter:
# print(uniq1_target,uniq2_target)
if args.write_audition_format:
audition_file_name = (aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0] + '_'+target1.split('_')[0]+'_curated_audition_markers.csv'
#print(aeneas_chapter_file.split('/')[-1],(aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0],audition_file_name)
audition_file = input_dir + '/' + audition_file_name
silence_file=glob.glob(input_dir+'/'+(aeneas_chapter_file.split('/')[-1]).split('_sync_adjusted.txt')[0]+'*silence*')[0]
#Read cue info and aeneas for iteration in the foll. for loop
aeneas_target = open(aeneas_chapter_file).readlines()
ind=0
with open(audition_file,'w') as aud:
# Since we cant read cue info last verse, we will remove the last element from uniq_target
# uniq_target=uniq_target[:-1]
for each_target in uniq1_target:
ind+=1
indices=[i for i,val in enumerate(target1_list) if val==each_target]
#Find the next verse index
for k,kval in enumerate(target1_list):
if int(kval)==(int(each_target)+1):
next_verse_index=k
break
# Find the previous verse index
for k, kval in enumerate(target1_list):
if int(kval) == (int(each_target) - 1):
previous_verse_index = k
break
# if each_chapter==print_chapter:
# print(int(each_target)+1,next_verse_index)
aeneas_duration = 0
counter=0
aud_duration=0
aud_text=''
for g,each_index in enumerate(indices):
aeneas_times = ((aeneas_target[each_index]).strip('\n')).split('\t')
aeneas_duration += float(aeneas_times[1]) - float(aeneas_times[0])
aud_duration=aeneas_duration
aud_text=aud_text+' '+aeneas_times[-1][1:]
if counter==0:
aeneas_start=float(((aeneas_target[each_index]).strip('\n')).split('\t')[0])
# aeneas_start_next=float(((aeneas_target[each_index+1]).strip('\n')).split('\t')[0])
aud_start=aeneas_start
if ind>1 and (float(target2_list.iloc[each_index])-float(target2_list.iloc[each_index-1]))==1 :
# if each_chapter==print_chapter:
# print(each_target,target2_list.iloc[each_index])
#Get adjustboundary1
for i,each_line in enumerate(uniq2_target):
if each_line==target2_list.iloc[each_index]:
# if each_chapter==print_chapter:
# print("chapter-{0},verse_num-{1},verse index-{2},line_num-{3}".format(each_chapter,each_target,each_index, float(target2_list.iloc[each_index])))
adjust_boundary1=float((line_cue_target[i]).split('\t')[0])
# if each_chapter==print_chapter:
# print(each_target, each_line,adjust_boundary1)
break
if each_chapter==print_chapter:
aeneas_start_next = float(
((aeneas_target[next_verse_index]).strip('\n')).split('\t')[0])
#Read silence file
silence_split_field=','
with open(silence_file,'r') as s:
for silence_bounds in s:
silence_start=float(silence_bounds.split(silence_split_field)[0])
silence_end=float(silence_bounds.split(silence_split_field)[1])
#if boundary falls inside silence, move it to silence region mid point
if ind<len(aeneas_target):
aeneas_start_next = float(
((aeneas_target[next_verse_index]).strip('\n')).split('\t')[0])
aeneas_start_previous=float(
((aeneas_target[previous_verse_index]).strip('\n')).split('\t')[0])
# if each_chapter==print_chapter and int(each_target)==14:
# print(each_target, aeneas_start, int(each_target) + 1, aeneas_start_next)
# if each_chapter==print_chapter:
# print('adjust_boundary1->{0},next_verse_start_time->{1},each_target_verse->{2}'.format(adjust_boundary1,aeneas_start_next,each_target))
if silence_end - silence_start >= 0.45 and silence_start <= adjust_boundary1 <= silence_end and aeneas_start_previous<adjust_boundary1 < aeneas_start_next :
#if each_chapter == print_chapter:
#print("chapter-{0},verse_num-{1},verse index-{2},line_num-{3}".format(
#each_chapter, each_target, each_index,
#float(target2_list.iloc[each_index])))
#print('hey its chapt 3', each_target, adjust_boundary1, aeneas_start_next)
adjust_boundary1 = (silence_start + silence_end) / 2
aud_start = adjust_boundary1
delta = aud_start - adjust_boundary1
# aud_duration=aud_duration-delta
break
else:
if silence_end - silence_start >= 0.45 and silence_start <= adjust_boundary1 <= silence_end:
#if each_chapter == print_chapter:
#print("chapter-{0},verse_num-{1},verse index-{2},line_num-{3}".format(
#each_chapter, each_target, each_index,
#float(target2_list.iloc[each_index])))
#print('hey its chapt 3', each_target, adjust_boundary1, aeneas_start_next)
adjust_boundary1 = (silence_start + silence_end) / 2
aud_start = adjust_boundary1
delta = aud_start - adjust_boundary1
# aud_duration=aud_duration-delta
break
s.close()
counter += 1
marker_name = 'Marker '
if ind==1:
# Write to adobe audition file
aud.write(
'Name' + '\t' + 'Start' + '\t' + 'Duration' + '\t' + 'Time Format' + '\t' + 'Type' + '\t' + 'Description' + '\n')
aud.write(marker_name + '0'+str(each_target) + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
else:
if int(each_target) < 10:
marker_number = '0' + str(each_target)
else:
marker_number = str(each_target)
# print(aud_start)
aud.write(marker_name + marker_number + '\t' + '0:' + str(round(aud_start, 3)) + '\t' + '0:' + str(
round(aud_duration, 3)) + '\t' + 'decimal' + '\t' + 'Cue' + '\t' +aud_text+'\n')
#Makers have been adjusted , now adjust durations to match the markers
#Load audition verse file to update duration
#print(audition_file)
aud_df=pd.read_csv(audition_file, encoding='utf-8',sep='\t')
verses=open(audition_file).readlines()
conv_string_to_secs = [60, 1]
for i,each_verse in enumerate(verses):
if 0<i<len(verses)-1:
verse_num=(each_verse.split('\t')[0])
timestr=(each_verse.split('\t')[1])
current_verse_start_time=sum([a * b for a, b in zip(conv_string_to_secs, map(float, timestr.split(':')))])
timestr = (each_verse.split('\t')[2])
current_verse_duration=sum([a * b for a, b in zip(conv_string_to_secs, map(float, timestr.split(':')))])
current_verse_end_time=current_verse_start_time+current_verse_duration
#Get | |
<reponame>Awenbocc/mvqa-system
import json
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
import mainwindow
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import qtawesome
import os
from interface import VQAsignal
class MainUi(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setFixedSize(960, 700)
self.main_widget = QtWidgets.QWidget()
self.main_layout = QtWidgets.QGridLayout()
self.main_layout.setSpacing(0)
self.main_widget.setLayout(self.main_layout)
self.left_widget = QtWidgets.QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout = QtWidgets.QGridLayout()
self.left_widget.setLayout(self.left_layout)
self.left_widget.setStyleSheet('''
QWidget#left_widget{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
}
QLabel#left_label{
color: white;
border:none;
font-size:14px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
QPushButton{border:none;color:white;}
QPushButton#left_label{
border:none;
border-bottom:1px solid white;
font-size:18px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
QPushButton#left_button:hover{border-left:4px solid red;font-weight:700;}
QRadioButton{
color: white;
}
''')
self.right_widget = QtWidgets.QWidget()
self.right_widget.setObjectName('right_widget')
self.right_widget.setStyleSheet('''
QWidget#right_widget{
color:#232C51;
background:white;
border-top:1px solid darkGray;
border-bottom:1px solid darkGray;
border-right:1px solid darkGray;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
QLabel#right_lable{
border:none;
font-size:16px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
''')
self.right_layout = QtWidgets.QGridLayout()
self.right_widget.setLayout(self.right_layout)
self.main_layout.addWidget(self.left_widget, 0, 0, 12, 2)
self.main_layout.addWidget(self.right_widget, 0, 2, 12, 10)
self.setCentralWidget(self.main_widget)
#left pannel
self.left_close = QtWidgets.QPushButton("")
self.left_visit = QtWidgets.QPushButton("")
self.left_mini = QtWidgets.QPushButton("")
self.left_close.setFixedSize(15, 15)
self.left_visit.setFixedSize(15, 15)
self.left_mini.setFixedSize(15, 15)
self.left_close.clicked.connect(self.close)
self.left_close.setStyleSheet(
'''QPushButton{background:red;border-radius:5px;}QPushButton:hover{background:red;}''')
self.left_visit.setStyleSheet(
'''QPushButton{background:#f0f0f0;border-radius:5px;}QPushButton:hover{background:#f0f0f0;}''')
self.left_mini.setStyleSheet(
'''QPushButton{background:#f0f0f0;border-radius:5px;}QPushButton:hover{background:#f0f0f0;}''')
self.left_label_1 = QtWidgets.QPushButton("Med - VQA")
self.left_label_1.setObjectName('left_label')
#Med-VQA can answer questions you ask about an image
self.left_text_1 = QtWidgets.QLabel(' This system can \nanswer questions you \n ask about a \n radiology image!')
self.left_text_1.setObjectName('left_label')
self.left_label_2 = QtWidgets.QPushButton("Dataset Choice")
self.left_label_2.setObjectName('left_label')
self.radio_button1 = QtWidgets.QRadioButton("VQA-RAD (2018)")
self.radio_button1.setChecked(True)
# self.radio_button2 = QtWidgets.QRadioButton("SLAKE (Ours)")
self.radio_button1.toggled.connect(self.dataset_choice)
# self.radio_button2.toggled.connect(self.dataset_choice)
self.radio_button_group = QtWidgets.QButtonGroup(self)
self.radio_button_group.addButton(self.radio_button1)
# self.radio_button_group.addButton(self.radio_button2)
self.left_label_3 = QtWidgets.QPushButton("Model Choice")
self.left_label_3.setObjectName('left_label')
# self.radio_button3 = QtWidgets.QRadioButton("MEVF (2019)")
self.radio_button4 = QtWidgets.QRadioButton("CPRD (Ours)")
self.radio_button4.setChecked(True)
# self.radio_button3.toggled.connect(self.model_choice)
self.radio_button4.toggled.connect(self.model_choice)
self.radio_button_group2 = QtWidgets.QButtonGroup(self)
# self.radio_button_group2.addButton(self.radio_button3)
self.radio_button_group2.addButton(self.radio_button4)
self.left_team = QtWidgets.QPushButton("Team")
self.left_team.setObjectName('left_label')
#XMLab
# Copyright ©2020 All Rights Reserved The Department of Computing
# The Hong Kong Polytechnic University
self.left_team_1 = QtWidgets.QPushButton(qtawesome.icon('fa.television', color='white'),"XMLAB@Comp")
self.left_team_1.setObjectName('left_button')
self.left_team_2 = QtWidgets.QPushButton(qtawesome.icon('fa.map-marker', color='white'),"PolyU,HK")
self.left_team_2.setObjectName('left_button')
self.left_team_3 = QtWidgets.QPushButton(qtawesome.icon('fa.copyright', color='white'),"Copyright©2020")
self.left_team_3.setObjectName('left_button')
self.left_team_4 = QtWidgets.QPushButton("")
self.left_team_4.setObjectName('left_button')
self.left_team_5 = QtWidgets.QPushButton("")
self.left_team_5.setObjectName('left_button')
self.left_team_6 = QtWidgets.QPushButton("")
self.left_team_6.setObjectName('left_button')
self.left_team_7 = QtWidgets.QPushButton("")
self.left_team_7.setObjectName('left_button')
self.left_team_8 = QtWidgets.QPushButton("")
self.left_team_8.setObjectName('left_button')
self.left_question = QtWidgets.QPushButton("Feedback")
self.left_question.setObjectName('left_label')
self.left_problem_1 = QtWidgets.QPushButton(qtawesome.icon('fa.envelope', color='white'), "Email")
self.left_problem_1.setObjectName('left_button')
self.left_problem_2 = QtWidgets.QPushButton(qtawesome.icon('fa.mobile', color='white'), "Telephone")
self.left_problem_2.setObjectName('left_button')
self.left_layout.addWidget(self.left_close, 0, 0, 1, 1)
# self.left_layout.addWidget(self.left_visit, 0, 1, 1, 1)
# self.left_layout.addWidget(self.left_mini, 0, 2, 1, 1)
self.left_layout.addWidget(self.left_label_1, 1, 0, 1, 3)
self.left_layout.addWidget(self.left_text_1 , 2, 0, 1, 3)
self.left_layout.addWidget(self.left_label_2, 4, 0, 1, 3)
self.left_layout.addWidget(self.radio_button1, 5, 0, 1, 3)
# self.left_layout.addWidget(self.radio_button2, 6, 0, 1, 3)
self.left_layout.addWidget(self.left_label_3, 7, 0, 1, 3)
# self.left_layout.addWidget(self.radio_button3, 8, 0, 1, 3)
self.left_layout.addWidget(self.radio_button4, 9, 0, 1, 3)
self.left_layout.addWidget(self.left_team, 10, 0, 1, 3)
self.left_layout.addWidget(self.left_team_1, 11, 0, 1, 3)
self.left_layout.addWidget(self.left_team_2, 12, 0, 1, 3)
self.left_layout.addWidget(self.left_team_3, 13, 0, 1, 3)
self.left_layout.addWidget(self.left_question,14,0,1,3)
self.left_layout.addWidget(self.left_problem_1, 15, 0, 1, 3)
self.left_layout.addWidget(self.left_problem_2, 16, 0, 1, 3)
self.left_layout.addWidget(self.left_team_4, 17, 0, 1, 3)
self.left_layout.addWidget(self.left_team_5, 18, 0, 1, 3)
self.left_layout.addWidget(self.left_team_6, 19, 0, 1, 3)
self.left_layout.addWidget(self.left_team_7, 20, 0, 1, 3)
# right panel
self.right_gallery_label = QtWidgets.QLabel("Dataset Visual Gallery")
self.right_gallery_label.setObjectName('right_lable')
self.right_gallery_widght = QtWidgets.QWidget()
self.right_gallery_widght.setStyleSheet(
'''
QToolButton{border:none;}
QToolButton:hover{border-bottom:2px solid #F76677;}
QPushButton{border:none;}
''')
self.right_gallery_layout = QtWidgets.QGridLayout()
self.right_gallery_widght.setLayout(self.right_gallery_layout)
self.gallery_pic_1 = QtWidgets.QToolButton()
# self.gallery_pic_1.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_1.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_1.clicked.connect(lambda: self.tb_action_slot(1))
self.gallery_pic_2 = QtWidgets.QToolButton()
# self.gallery_pic_2.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_2.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_2.clicked.connect(lambda: self.tb_action_slot(2))
# self.recommend_button_2.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_3 = QtWidgets.QToolButton()
# self.gallery_pic_3.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_3.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_3.clicked.connect(lambda: self.tb_action_slot(3))
# self.recommend_button_3.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_4 = QtWidgets.QToolButton()
# self.gallery_pic_4.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_4.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_4.clicked.connect(lambda: self.tb_action_slot(4))
# self.recommend_button_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_5 = QtWidgets.QToolButton()
# self.gallery_pic_5.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_5.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_5.clicked.connect(lambda: self.tb_action_slot(5))
# self.recommend_button_5.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_6 = QtWidgets.QToolButton()
# self.gallery_pic_6.setIcon(QtGui.QIcon('./images/synpic676.jpg')) # 设置按钮图标
self.gallery_pic_6.setIconSize(QtCore.QSize(100, 100)) # 设置图标大小
self.gallery_pic_6.clicked.connect(lambda: self.tb_action_slot(6))
# self.recommend_button_6.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) # 设置按钮形式为上图下文
self.gallery_pic_7 = QtWidgets.QToolButton()
# self.gallery_pic_7.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_7.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_7.clicked.connect(lambda: self.tb_action_slot(7))
# self.recommend_button_7.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_8 = QtWidgets.QToolButton()
# self.gallery_pic_8.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_8.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_8.clicked.connect(lambda: self.tb_action_slot(8))
self.gallery_pic_9 = QtWidgets.QToolButton()
# self.gallery_pic_9.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_9.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_9.clicked.connect(lambda: self.tb_action_slot(9))
# self.recommend_button_9.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.gallery_pic_10 = QtWidgets.QToolButton()
# self.gallery_pic_10.setIcon(QtGui.QIcon('./images/synpic676.jpg'))
self.gallery_pic_10.setIconSize(QtCore.QSize(100, 100))
self.gallery_pic_10.clicked.connect(lambda: self.tb_action_slot(10))
self.gallery = [self.gallery_pic_1,self.gallery_pic_2,self.gallery_pic_3,self.gallery_pic_4,
self.gallery_pic_5,self.gallery_pic_6,self.gallery_pic_7,self.gallery_pic_8,
self.gallery_pic_9,self.gallery_pic_10]
self.gallery_path = []
self.ptr = 0
# self.recommend_button_10.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.button_1 = QtWidgets.QPushButton(qtawesome.icon('fa.backward', color='#F76677'), "")
self.button_1.clicked.connect(lambda: self.bt_action_slot(1))
self.button_2 = QtWidgets.QPushButton(qtawesome.icon('fa.forward', color='#F76677'), "")
self.button_2.clicked.connect(lambda: self.bt_action_slot(2))
self.right_gallery_layout.addWidget(self.button_1,0,0,2,1)
self.button_1.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.right_gallery_layout.addWidget(self.gallery_pic_1, 0, 1)
self.right_gallery_layout.addWidget(self.gallery_pic_2, 0, 2)
self.right_gallery_layout.addWidget(self.gallery_pic_3, 0, 3)
self.right_gallery_layout.addWidget(self.gallery_pic_4, 0, 4)
self.right_gallery_layout.addWidget(self.gallery_pic_5, 0, 5)
self.right_gallery_layout.addWidget(self.gallery_pic_6, 1, 1)
self.right_gallery_layout.addWidget(self.gallery_pic_7, 1, 2)
self.right_gallery_layout.addWidget(self.gallery_pic_8, 1, 3)
self.right_gallery_layout.addWidget(self.gallery_pic_9, 1, 4)
self.right_gallery_layout.addWidget(self.gallery_pic_10, 1, 5)
self.right_gallery_layout.addWidget(self.button_2, 0, 6, 2, 1)
self.button_2.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.right_layout.addWidget(self.right_gallery_label, 0, 0, 1, 9)
self.right_layout.addWidget(self.right_gallery_widght, 1, 0, 2, 9)
#Selected Radiology Images
self.right_selected_lable = QtWidgets.QLabel("Selected Radiology Images")
self.right_selected_lable.setObjectName('right_lable')
self.right_selected_widget = QtWidgets.QWidget()
self.right_selected_widget.setStyleSheet('''
QPushButton{
border:none;
color:gray;
font-size:12px;
height:40px;
padding-left:5px;
padding-right:10px;
text-align:left;
}
QPushButton:hover{
color:black;
border:1px solid #F3F3F5;
border-radius:10px;
background:LightGray;
}
''')
self.right_selected_layout = QtWidgets.QGridLayout()
self.right_selected_widget.setLayout(self.right_selected_layout)
self.select_image = QtWidgets.QLabel("")
self.select_image_path = None
pic = QtGui.QPixmap('./images/blank.jpg')
self.select_image.setPixmap(pic)
self.select_image.setScaledContents(True) # 图片自适应LABEL大小
self.right_selected_layout.addWidget(self.select_image, 0, 1)
# select questions
self.right_question_lable = QtWidgets.QLabel("Input Questions")
self.right_question_lable.setObjectName('right_lable')
self.right_question_widget = QtWidgets.QWidget()
self.right_question_layout = QtWidgets.QGridLayout()
self.right_question_widget.setStyleSheet(
'''
QToolButton{border:none;}
QToolButton:hover{border-bottom:2px solid #F76677;}
QProgressBar { border: 1px solid grey; border-radius: 5px; background-color: #FFFFFF;text-align: center;}
QProgressBar::chunk { background-color: #F76677; width: 10px;}
''')
self.right_question_widget.setLayout(self.right_question_layout)
self.input_edit =QtWidgets.QComboBox()
self.input_edit.currentIndexChanged.connect(self.comboChange)
self.input_edit.currentTextChanged.connect(self.comboTextChange)
# self.input_edit.setPlaceholderText("Enter the question ")
self.input_edit.setEditable(True)
# self.input_edit.currentIndexChanged.connect(self.selectionchange)
self.submit = QtWidgets.QPushButton('Submit')
self.submit.clicked.connect(self.submit_slot)
self.question_id = 0
# self.pushButton_3.setGraphicsEffect(op)
self.answer1 = QtWidgets.QLabel('')
self.right_process_bar1 = QtWidgets.QProgressBar()
self.right_process_bar1.setVisible(False)
# self.op1 = QtWidgets.QGraphicsOpacityEffect()
# self.op1.setOpacity(0)
# self.right_process_bar1.setGraphicsEffect(self.op1)
self.answer2 = QtWidgets.QLabel('')
self.right_process_bar2 = QtWidgets.QProgressBar()
self.right_process_bar2.setValue(10)
self.right_process_bar2.setVisible(False)
# op2 = QtWidgets.QGraphicsOpacityEffect()
# op2.setOpacity(0)
# self.right_process_bar2.setGraphicsEffect(op2)
self.answer3 = QtWidgets.QLabel('')
self.right_process_bar3 = QtWidgets.QProgressBar()
self.right_process_bar3.setValue(40)
self.right_process_bar3.setVisible(False)
# op3 = QtWidgets.QGraphicsOpacityEffect()
# op3.setOpacity(0)
# self.right_process_bar3.setGraphicsEffect(op3)
self.answer4 = QtWidgets.QLabel('')
self.right_process_bar4 = QtWidgets.QProgressBar()
self.right_process_bar4.setValue(30)
self.right_process_bar4.setVisible(False)
# op4 = QtWidgets.QGraphicsOpacityEffect()
# op4.setOpacity(0)
# self.right_process_bar4.setGraphicsEffect(op4)
self.answer5 = QtWidgets.QLabel('')
self.right_process_bar5 = QtWidgets.QProgressBar()
self.right_process_bar5.setValue(20)
self.right_process_bar5.setVisible(False)
# op5 = QtWidgets.QGraphicsOpacityEffect()
# op5.setOpacity(0)
# self.right_process_bar5.setGraphicsEffect(op5)
# self.answer6 = QtWidgets.QLabel('Ground Truth:')
self.answer7 = QtWidgets.QLabel('')
# self.right_question_layout.addWidget(self.answer6, 0, 0, 1, 1)
# self.right_question_layout.addWidget(self.answer7, 0, 2, 1, 1)
self.right_question_layout.addWidget(self.input_edit,1,0,1,5)
self.right_question_layout.addWidget(self.answer7,1,5,1,2)
self.right_question_layout.addWidget(self.submit, 1, 7, 1, 1)
# self.right_question_layout.addWidget(self.answer6, 1, 0, 1, 1)
# self.right_question_layout.addWidget(self.answer7, 1, 2, 1, 5)
self.right_question_layout.addWidget(self.answer1, 2, 0, 1, 2)
self.right_question_layout.addWidget(self.right_process_bar1, 2, 2, 1, 6)
self.right_question_layout.addWidget(self.answer2, 3, 0, 1, 2)
self.right_question_layout.addWidget(self.right_process_bar2, 3, 2, 1, 6)
self.right_question_layout.addWidget(self.answer3, 4, 0, 1, 2)
self.right_question_layout.addWidget(self.right_process_bar3, 4, 2, 1, 6)
self.right_question_layout.addWidget(self.answer4, 5, 0, 1, 2)
self.right_question_layout.addWidget(self.right_process_bar4, 5, 2, 1, 6)
self.right_question_layout.addWidget(self.answer5, 6, 0, 1, 2)
self.right_question_layout.addWidget(self.right_process_bar5, 6, 2, 1, 6)
self.right_layout.addWidget(self.right_selected_lable, 4, 0, 1, 3)
self.right_layout.addWidget(self.right_selected_widget, 5, 0, 1, 3)
self.right_layout.addWidget(self.right_question_lable, 4, 3, 1, 7)
self.right_layout.addWidget(self.right_question_widget, 5, 3, 1, 7)
self.setWindowOpacity(1)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
# initial images
self.combo_idx = 0
self.comboTextlabel = 0
self.img_ques_ans = {}
self.show_gallery(0)
def tb_action_slot(self,id):
if id > len(self.gallery_path):
return
# show selected img
pic = QtGui.QPixmap(self.gallery_path[id-1])
self.select_image_path = self.gallery_path[id-1]
self.select_image.setPixmap(pic)
# shown combo questions
img_name = self.select_image_path.split('/')[-1]
self.input_edit.clear()
self.input_edit.addItems(self.img_ques_ans[img_name][0])
def comboChange(self,id):
self.combo_idx = id
self.comboTextlabel = 0
def set_icon(self,images):
for id,image in enumerate(images):
self.gallery[id].setIcon(QtGui.QIcon(image))
def show_gallery(self,n):
# n 0: vqa_rad 1: slake
image_set = './images/slake' if n else './images/vqa_rad'
questions = json.load(open('./vqa/data/rad/testset.json', 'r')) + json.load(open('./vqa/data/rad/testset.json', 'r'))[:400]
for i in questions:
name = i['image_name']
question = i['question']
answer = i['answer']
if name not in self.img_ques_ans:
self.img_ques_ans[name] = [[],[]]
self.img_ques_ans[name][0].append(question)
self.img_ques_ans[name][1].append(answer)
sorted(self.img_ques_ans.items(), key=lambda d: len(d[1]))
images = [os.path.join(image_set,i) for i in self.img_ques_ans.keys()]
interval = 10
self.gallery_path = images[:interval]
self.set_icon(images[:interval])
self.images = images
def dataset_choice(self):
if self.radio_button1.isChecked():
self.show_gallery(0)
# if self.radio_button2.isChecked():
# self.show_gallery(1)
def model_choice(self):
if self.radio_button4.isChecked():
print('b')
def bt_action_slot(self,n):
self.ptr += -1 if n==1 else 1
start = self.ptr*10
end = start+10 if start < len(self.images)-10 else len(self.images)
self.gallery_path = self.images[start:end]
self.set_icon(self.gallery_path)
def comboTextChange(self):
self.comboTextlabel = 1
def submit_slot(self):
question = self.input_edit.currentText()
if len(question)==0 or question.isspace():
return
if self.select_image_path == None:
return
# set gt
name = self.select_image_path.split('/')[-1]
if self.comboTextlabel:
show = ''
else:
show = self.img_ques_ans[name][1][self.combo_idx]
if len(show)>12:
show = show[:12]+'..'
self.answer7.setText(show)
self.answer7.repaint()
# signal register
thread = VQAsignal(self,question=question,image=self.select_image_path)
# return signal, and show results
thread.breakSignal.connect(self.show_result)
# signal start
thread.start()
def show_result(self,answer):
name = list(answer.keys())
for i in range(5):
if len(name[i])>23:
name[i] = name[i][:23]+"..."
acc = list(answer.values())
self.answer1.setText(name[0])
self.right_process_bar1.setMaximum(100 * 100)
value = round(acc[0],2)*100
self.right_process_bar1.setValue(value*100)
self.right_process_bar1.setVisible(True)
self.right_process_bar1.setFormat("%.02f %%" % value)
self.answer2.setText(name[1])
self.right_process_bar2.setMaximum(100 * 100)
value = round(acc[1], 2) * 100
self.right_process_bar2.setValue(value * 100)
self.right_process_bar2.setVisible(True)
self.right_process_bar2.setFormat("%.02f %%" % value)
self.answer3.setText(name[2])
self.right_process_bar3.setMaximum(100 * 100)
value = round(acc[2], 2) * 100
self.right_process_bar3.setValue(value * 100)
self.right_process_bar3.setVisible(True)
self.right_process_bar3.setFormat("%.02f %%" % value)
self.answer4.setText(name[3])
self.right_process_bar4.setMaximum(100 * 100)
value = round(acc[3],2)*100
self.right_process_bar4.setValue(value*100)
self.right_process_bar4.setVisible(True)
self.right_process_bar4.setFormat("%.02f %%" % value)
self.answer5.setText(name[4])
self.right_process_bar5.setMaximum(100 * 100)
value = round(acc[4],2)*100
self.right_process_bar5.setValue(value*100)
self.right_process_bar5.setVisible(True)
self.right_process_bar5.setFormat("%.02f %%" % value)
class blankDialogue(QtWidgets.QDialog):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setGeometry(300,260,840,420)
self.main_widget = QtWidgets.QWidget()
self.main_layout = QtWidgets.QGridLayout()
self.main_layout.setSpacing(0)
self.main_widget.setLayout(self.main_layout)
palette1 = QtGui.QPalette()
palette1.setColor(self.backgroundRole(), QtGui.QColor(255, 255, 255))
self.setPalette(palette1)
self.setWindowOpacity(1)
label | |
['data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_webhook" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/webhooks'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type([])
# Authentication setting
auth_settings = ['Token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Webhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
collection_formats=collection_formats)
def destroy_applicant(self, applicant_id, **kwargs):
"""
Delete Applicant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.destroy_applicant(applicant_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str applicant_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.destroy_applicant_with_http_info(applicant_id, **kwargs)
else:
(data) = self.destroy_applicant_with_http_info(applicant_id, **kwargs)
return data
def destroy_applicant_with_http_info(self, applicant_id, **kwargs):
"""
Delete Applicant
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.destroy_applicant_with_http_info(applicant_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str applicant_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['applicant_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method destroy_applicant" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'applicant_id' is set
if ('applicant_id' not in params) or (params['applicant_id'] is None):
raise ValueError("Missing the required parameter `applicant_id` when calling `destroy_applicant`")
collection_formats = {}
resource_path = '/applicants/{applicant_id}'.replace('{format}', 'json')
path_params = {}
if 'applicant_id' in params:
path_params['applicant_id'] = params['applicant_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type([])
# Authentication setting
auth_settings = ['Token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
collection_formats=collection_formats)
def download_document(self, applicant_id, document_id, **kwargs):
"""
Download a documents raw data
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_document(applicant_id, document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str applicant_id: (required)
:param str document_id: (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.download_document_with_http_info(applicant_id, document_id, **kwargs)
else:
(data) = self.download_document_with_http_info(applicant_id, document_id, **kwargs)
return data
def download_document_with_http_info(self, applicant_id, document_id, **kwargs):
"""
Download a documents raw data
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_document_with_http_info(applicant_id, document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str applicant_id: (required)
:param str document_id: (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['applicant_id', 'document_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method download_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'applicant_id' is set
if ('applicant_id' not in params) or (params['applicant_id'] is None):
raise ValueError("Missing the required parameter `applicant_id` when calling `download_document`")
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `download_document`")
collection_formats = {}
resource_path = '/applicants/{applicant_id}/documents/{document_id}/download'.replace('{format}', 'json')
path_params = {}
if 'applicant_id' in params:
path_params['applicant_id'] = params['applicant_id']
if 'document_id' in params:
path_params['document_id'] = params['document_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['image/png', 'image/jpeg', 'applicant/pdf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type([])
# Authentication setting
auth_settings = ['Token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
collection_formats=collection_formats)
def download_live_photo(self, live_photo_id, **kwargs):
"""
Download live photo
Live photos are downloaded using this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_live_photo(live_photo_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str live_photo_id: The live photo’s unique identifier. (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.download_live_photo_with_http_info(live_photo_id, **kwargs)
else:
(data) = self.download_live_photo_with_http_info(live_photo_id, **kwargs)
return data
def download_live_photo_with_http_info(self, live_photo_id, **kwargs):
"""
Download live photo
Live photos are downloaded using this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_live_photo_with_http_info(live_photo_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str live_photo_id: The live photo’s unique identifier. (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['live_photo_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method download_live_photo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'live_photo_id' is set
if ('live_photo_id' not in params) or (params['live_photo_id'] is None):
raise ValueError("Missing the required parameter `live_photo_id` when calling `download_live_photo`")
collection_formats = {}
resource_path = '/live_photos/{live_photo_id}/download'.replace('{format}', 'json')
path_params = {}
if 'live_photo_id' in params:
path_params['live_photo_id'] = params['live_photo_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type([])
# Authentication setting
auth_settings = ['Token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
collection_formats=collection_formats)
def download_live_video(self, live_video_id, **kwargs):
"""
Download live video
Live videos are downloaded using this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_live_photo(live_video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str live_video_id: The live video’s unique identifier. (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.download_live_video_with_http_info(live_video_id, **kwargs)
else:
(data) = self.download_live_video_with_http_info(live_video_id, **kwargs)
return data
def download_live_video_with_http_info(self, live_video_id, **kwargs):
"""
Download live video
Live videos are downloaded using this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.download_live_photo_with_http_info(live_video_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str live_video_id: The live video’s unique identifier. (required)
:return: file
If the method | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v3.1.3),
on June 24, 2019, at 16:21
If you publish work using this script please cite the PsychoPy publications:
<NAME> (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '3.1.3'
expName = 'stroop' # from the Builder filename that created this script
expInfo = {'session': '01', 'participant': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' % (expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='C:\\Users\\lpzdb\\pavloviaDemos\\stroop\\stroop.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1920, 1080], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color='black', colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "instruct"
instructClock = core.Clock()
instrText = visual.TextStim(win=win, name='instrText',
text='OK. Ready for the real thing?\n\nRemember, ignore the word itself; press:\nLeft for red LETTERS\nDown for green LETTERS\nRight for blue LETTERS\n(Esc will quit)\n\nPress any key to continue',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
word = visual.TextStim(win=win, name='word',
text='default text',
font='Arial',
units='height', pos=[0, 0], height=0.15, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
thanksText = visual.TextStim(win=win, name='thanksText',
text='This is the end of the experiment.\n\nThanks!',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instruct"-------
t = 0
instructClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
ready = keyboard.Keyboard()
# keep track of which components have finished
instructComponents = [instrText, ready]
for thisComponent in instructComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instruct"-------
while continueRoutine:
# get current time
t = instructClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrText* updates
if t >= 0 and instrText.status == NOT_STARTED:
# keep track of start time/frame for later
instrText.tStart = t # not accounting for scr refresh
instrText.frameNStart = frameN # exact frame index
win.timeOnFlip(instrText, 'tStartRefresh') # time at next scr refresh
instrText.setAutoDraw(True)
# *ready* updates
waitOnFlip = False
if t >= 0 and ready.status == NOT_STARTED:
# keep track of start time/frame for later
ready.tStart = t # not accounting for scr refresh
ready.frameNStart = frameN # exact frame index
win.timeOnFlip(ready, 'tStartRefresh') # time at next scr refresh
ready.status = STARTED
# keyboard checking is just starting
win.callOnFlip(ready.clearEvents, eventType='keyboard') # clear events on next screen flip
if ready.status == STARTED and not waitOnFlip:
theseKeys = ready.getKeys(keyList=None, waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instructComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instruct"-------
for thisComponent in instructComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('instrText.started', instrText.tStartRefresh)
thisExp.addData('instrText.stopped', instrText.tStopRefresh)
# the Routine "instruct" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=5, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('trialTypes.xls'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
word.setColor(letterColor, colorSpace='rgb')
word.setText(text)
resp = keyboard.Keyboard()
# keep track of which components have finished
trialComponents = [word, resp]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *word* updates
if t >= 0.5 and word.status == NOT_STARTED:
# keep track of start time/frame for later
word.tStart = t # not accounting for scr refresh
word.frameNStart = frameN # exact frame index
win.timeOnFlip(word, 'tStartRefresh') # time at next scr refresh
word.setAutoDraw(True)
# *resp* updates
waitOnFlip = False
if t >= 0.5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # not accounting for scr refresh
resp.frameNStart = frameN # exact frame index
win.timeOnFlip(resp, 'tStartRefresh') # time at next scr refresh
resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if resp.status == STARTED and not waitOnFlip:
theseKeys = resp.getKeys(keyList=['left', 'down', 'right'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
resp.keys = theseKeys.name # just the last key pressed
resp.rt = theseKeys.rt
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") | |
FlightLogDetailInlineFormSet = inlineformset_factory(
AircraftFlightLog, AircraftFlightLogDetail, extra=6, exclude=(
'creator', 'modifier'), can_delete=False, form=details_form)
if request.method == 'POST':
form = AircraftFlightLogForm(data=request.POST, instance=flightlog)
formset = FlightLogDetailInlineFormSet(
request.POST, request.FILES, instance=flightlog)
#formset = FlightLogDetailInlineFormSet(request.POST)
if form.is_valid():
new_aircraftflightlog = form.save(commit=False)
new_aircraftflightlog.creator = request.user
new_aircraftflightlog.modifer = request.user
# print formset
# print 'HHHHHHHHHHHH'
if formset.is_valid():
#instances = formset.save(commit=False)
# for f in formset:
# print 'Datcon' + str(f['datcon'])
return_time_last = 0
counter = 1
error = 0
for f in formset:
# print 'Datcon' + str(f['datcon'])
if error == 0:
datcon_html = str(f['datcon'])
datcon_array = datcon_html.split("\"")
if len(datcon_array) == 11:
datcon = datcon_array[7]
# print 'datcon: ' + datcon
try:
datcon_hour = int(datcon.split(".")[0])
except:
datcon = "0" + datcon
datcon_hour = int(datcon.split(".")[0])
datcon_24h = datcon_hour * 60
try:
datcon_minute = int(datcon.split(".")[1])
except:
datcon_minute = 0
datcon_min = datcon_minute * 6
total_datcon_minutes = datcon_24h + datcon_min
# print 'time Out' + str(f['time_out'])
timeout_html = str(f['time_out'])
timeout_array = timeout_html.split("\"")
# if len(timeout_array) == 13:
timeout_str = timeout_array[5]
if len(timeout_str) == 4:
timeout_hh = int(timeout_str[:2])
timeout_mm = int(timeout_str[2:])
else:
timeout_hh = int(timeout_str[:1])
timeout_mm = int(timeout_str[1:])
#timeout_int = int(timeout_str)
timeout_total_minutes = (
int(timeout_hh) * 60) + int(timeout_mm)
return_time_minutes = total_datcon_minutes + timeout_total_minutes
'''
print 'datcon: ' + str(datcon)
print 'datcon in minutes: ' + str(total_datcon_minutes)
print 'time out: ' + str(timeout_str)
print 'time out in minutes: ' + str(timeout_total_minutes)
print 'return time in minutes: ' + str(return_time_minutes)
print 'return time last: ' + str(return_time_last)
'''
if return_time_last > timeout_total_minutes:
state = 'Warning (Rows ' + str(counter - 1) + ", " + str(
counter) + ') - Aircraft leaving before it has returned. See Datcon and Time Out.'
state_type = 'Warning'
error = 1
return_time_last = return_time_minutes
counter = counter + 1
# f.save()
if error == 0:
new_aircraftflightlog.save()
formset.save()
state = 'Saved'
state_type = 'OK'
formset = FlightLogDetailInlineFormSet(instance=flightlog)
form = form_master
else:
state = 'Warning - Flight Log Details are not valid.'
state_type = 'Warning'
else:
state = 'Warning - Flight Log is not valid.'
state_type = 'Warning'
# return
# render_to_response('aircraftflightlogdetailsadd.html',{"formset":
# formset,"form":form}, context_instance=RequestContext(request))
else:
#state = ''
#state_type = ''
formset = FlightLogDetailInlineFormSet(instance=flightlog)
form = form_master
#form = AircraftFlightLogForm(instance=flightlog)
return render_to_response("aircraftflightlogdetailsadd.html",
{"formset": formset,
"form": form,
"state": state,
"state_type": state_type,
'pagetitle': 'Aircraft Flight Log Details'},
context_instance=RequestContext(request))
@login_required
# Duty Time
@login_required
@csrf_protect
def dutytimeadd(request):
# print '+++++++++++++++'
time_start = time.time()
state = ''
state_type = ''
pilots = Pilot.objects.filter(
effective_to__exact=None).order_by('last_name')
table = ''
# print 'pilots'
# print pilots
for pilot in pilots:
# print "Pilot Name: " + pilot.first_name
# print "Pilot ID: " + str(pilot.id)
table += '<tr>'
table += '<td>'
try:
table += '<a href="' + \
str(pilot.dutytime_set.all()[0].get_absolute_url()) + '">'
table += '<input type="image" src="/static/img/page_white_edit.png" name="edit" width="24" height="24" alt="Edit">'
table += '</a>'
except IndexError as e:
table += '<img type="image" src="/static/img/cross.png" name="edit" width="24" height="24" alt="No Duty Time Records">'
table += '</td>'
table += '<td>'
try:
table += '<a href="../' + str(pilot.id) + '/hours">'
table += '<input type="image" src="/static/img/page_white_edit.png" name="edit" width="24" height="24" alt="Edit">'
table += '</a>'
except IndexError as e:
table += '<img type="image" src="/static/img/cross.png" name="edit" width="24" height="24" alt="No Duty Time Records">'
table += '</td>'
table += '<td style="text-align:center" >'
table += '<input type="radio" name="rdio" value="' + \
str(pilot.id) + '">'
table += '</td>'
table += '<td>'
table += str(pilot.first_name)
table += '</td>'
table += '<td>'
table += pilot.last_name
table += '</td>'
table += '<td id="date_' + str(pilot.id) + '">'
# print '---------'
try:
dt_date = pilot.dutytime_set.order_by(
'-date')[0].date.strftime("%d/%m/%Y")
# print dt_date
except IndexError as e:
# print pilot.first_name + ' ' + pilot.last_name + ' has no Last
# Date.'
dt_date = ''
table += dt_date
table += '</td>'
table += '</tr>'
if request.method == 'POST':
# Validate Dates
# print '^^^^^^^^^^^^^^^^^^^^^'
# print request.POST['pilot_id']
# Check if pilot id is sent back
if request.POST['pilot_id'] != '':
pilot = Pilot.objects.get(id=int(request.POST['pilot_id']))
# print pilot
# print pilot.id
# Check if both dates have been chosen
if request.POST['date_from'] != '' and request.POST[
'date_to'] != '':
date_from = from_datetime.strptime(
request.POST['date_from'], "%d/%m/%Y")
date_to = from_datetime.strptime(
request.POST['date_to'], "%d/%m/%Y")
# print date_from
# print date_to
# Check date range is valid
if date_to >= date_from:
# Make one day
oneday = datetime.timedelta(days=1)
# While date_change is less than date_to - create day
# records
date_change = date_from
while (date_change <= date_to):
# print date_change
dt = DutyTime(date=date_change, pilot=pilot)
dt.creator = request.user
dt.modifer = request.user
dt.save()
date_change = date_change + oneday
# print date_change
state = 'Saved'
state_type = 'OK'
return redirect(
reverse(
'dutytimeaddset_saved',
kwargs={
'id': pilot.id}))
else:
# No dates. Send user message.
state = 'Warning - Enter values for both date fields'
state_type = 'Warning'
else:
# No pilot id. Send user message.
state = "Warning - No pilot selected"
state_type = "Warning"
drForm = DateRangeForm()
time_end = time.time()
time_total = time_end - time_start
# print 'Total Time: ' +str(time_total)
return render_to_response("dutytimeadd.html",
{'pagetitle': 'Duty Times',
"drForm": drForm,
'pilots': table,
"state": state,
"state_type": state_type},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def dutytimeaddset(request, id, str_date_to=None,
str_date_from=None, state='', state_type=''):
time_start = time.time()
state = ''
state_type = ''
pilot = Pilot.objects.get(pk=id)
name = pilot.first_name + ' ' + pilot.last_name
# inlineformset
DutyTimeInlineFormSet = inlineformset_factory(Pilot, DutyTime, exclude=(
'creator', 'modifier'), can_delete=False, form=DutyTimeForm, extra=0)
#dt_formset = formset_factory(DutyTimeForm, extra=2)
# Do this if something submitted
if request.method == "POST":
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "enter post: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# if duty times are saved do this
if request.POST['type'] == 'Save':
# print request.POST
formset = DutyTimeInlineFormSet(
request.POST, request.FILES, instance=pilot)
#formset = DutyTimeInlineFormSet(request.POST, request.FILES)
# print formset
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "after formset get: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if len(formset) == 0:
date_to = pilot.dutytime_set.order_by('-date')[0].date
# get date using last entered date - 14 days
date_from = date_to - timedelta(days=13)
# Create formset
sort = 'A'
state = 'Warning - No Records Submitted To Save'
state_type = 'Warning'
else:
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "Before Date Range: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
formsetstring = str(formset)
formsetdates = re.findall(r"\d{2}/\d{2}/\d{4}", formsetstring)
date_from = from_datetime.strptime("01/01/2050", "%d/%m/%Y")
date_to = from_datetime.strptime("01/01/1900", "%d/%m/%Y")
for formdate in formsetdates:
thedate = from_datetime.strptime(formdate, "%d/%m/%Y")
if thedate > date_to:
date_to = thedate
if thedate < date_from:
date_from = thedate
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Date Range: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
try:
if from_datetime.strptime(
formsetdates[0], "%d/%m/%Y") > from_datetime.strptime(formsetdates[1], "%d/%m/%Y"):
sort = 'D'
else:
sort = 'A'
except:
sort = 'A'
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Order Calc: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if formset.is_valid():
error = 0
counter = 0
for f in formset:
counter = counter + 1
ontime = str(f['datetime_on_first'])
offtime = str(f['datetime_off_first'])
thedate = str(f['date'])
# print thedate
day = thedate.split("\"")[3]
ontime_arr = ontime.split("\"")
offtime_arr = offtime.split("\"")
if len(ontime_arr) == 11 and len(offtime_arr) == 11:
ontime = int(ontime_arr[7])
offtime = int(offtime_arr[7])
if ontime >= offtime:
state = 'Warning - Duty Time is not valid (' + \
day + '). Time On must be less than Time Off'
state_type = 'Warning'
error = 1
elif len(ontime_arr) == 11 and len(offtime_arr) == 9:
state = 'Warning - Duty Time is not valid (' + \
day + '). Missing Time Off value.'
state_type = 'Warning'
error = 1
elif len(ontime_arr) == 9 and len(offtime_arr) == 11:
state = 'Warning - Duty Time is not valid (' + \
day + '). Missing Time On value.'
state_type = 'Warning'
error = 1
# print "Counter (rows): " + str(counter)
if error == 0:
formset.save()
state = 'Saved'
state_type = 'OK'
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Formset Saved: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
else:
state = 'Warning - Duty Time is not valid.'
state_type = 'Warning'
# if date filter submitted do | |
import numpy as np
import scipy.linalg as la
from matplotlib import pyplot as plt
from scipy import sparse as sp
from time import time
import scipy.sparse.linalg as spla
from math import sqrt
import streaming_subclass as stsb
#####################################################################
# Obtain the necessary data and data values
def get_bagX(filename, Acc=True):
'''
Reads in bag of words data and return it as a sparse csr matrix
Inputs:
--------------------------------------------------------------------
filename: str, name of the file containing the bag of words data
Outputs:
-------------------------------------------------------------------
n: int, the number of samples in the dataset (in this case, the number of
documents)
d: int, the number of features in the dataset (in this case, the number of
words)
nnz: int, the number of nonzero values in the dataset
density: float between 0 and 1, the density of the dataset (indicates
sparsity)
SparseX: sparse nxd csr matrix where each row is a document and each column
is a word
norm2: optional output (returns if Acc=True). The frobenius norm squared of
the dataset. Note that if you want to compute the explained variance for
your streaming PCA algorithm, you need the squared frobenius norm of the
dataset.
'''
DWN = np.genfromtxt(filename, max_rows=3)
# D is the number of samples (n), W is the number of words (d) and N
# is the number of nonzero values
n, d, nnz = DWN[0], DWN[1], DWN[2]
density = nnz / (n * d)
Data = np.loadtxt(filename, skiprows=3, dtype=int)
SparseX = sp.csr_matrix((Data[:,2], (Data[:,0]-1, Data[:,1]-1)))
if Acc:
norm2 = spla.norm(SparseX, ord='fro')**2
return n, d, nnz, density, SparseX, norm2
else:
return n, d, nnz, density, SparseX
def get_bagXblocks(filename, B, Acc=True, block_total=1000):
'''
Reads in bag of words data and returns the properties as well as a list of
sparse blocks
Inputs:
--------------------------------------------------------------------
filename: str, name of the file containing the bag of words data
B: int, the number of rows in each block
Acc: optional bool, indicates whether or not the accuracy will be measured
for this dataset. If True, returns the norm of the dataset as well.
Outputs:
-------------------------------------------------------------------
n: int, the number of samples in the dataset (in this case, the number of
documents)
d: int, the number of features in the dataset (in this case, the number of
words)
nnz: int, the number of nonzero values in the dataset
density: float between 0 and 1, the density of the dataset (indicates
sparsity)
SparseX: sparse nxd csr matrix where each row is a document and each column
is a word
norm2: optional output (returns if Acc=True). The frobenius norm squared of
the dataset
'''
Xblocks=[]
with open(filename, 'r') as f:
n = int(f.readline())
d = int(f.readline())
nnz = int(f.readline())
density = nnz / (n*d)
blocknum = 1
row, col, data = [], [], []
for i in range(nnz):
entry = list(map(int, f.readline().split()))
# if the row num (with zero based indexing)
# is in the current block
if entry[0] - 1 < blocknum * B:
# note bag of words uses 1 based indexing
row.append((entry[0]-1) % B)
col.append(entry[1]-1)
data.append(entry[2])
else:
Xi = sp.csr_matrix((data, (row,col)), shape=(B,d))
Xblocks.append(Xi)
blocknum += 1
if blocknum > block_total:
if Acc:
norm2 = 0
for X in Xblocks:
norm2 += spla.norm(X, ord='fro')**2
return n, d, nnz, density, Xblocks, norm2
else:
return n, d, nnz, density, Xblocks
# Start the new block in the row, col, and data entries.
row, col, data = [(entry[0] - 1) % B], [entry[1] - 1], [entry[2]]
Xi = sp.csr_matrix((data, (row, col)), shape=(B,d))
Xblocks.append(Xi)
if Acc:
norm2 = 0
for X in Xblocks:
norm2 += spla.norm(X, ord='fro')**2
return n, d, nnz, density, Xblocks, norm2
else:
return n, d, nnz, density, Xblocks
#########################################################################################
# Run the dataset simultaneously for multiple algorithms
# Currently: Oja with learning rates c/t and c/sqrt(t), AdaOja, and HPCA
def run_sim_bag(filename, k, methods=['AdaOja', 'HPCA', 'SPM'], tol=.005, b0=1e-5, p=None, B=10, m=1, gamma=.9, beta_1 = 0.9, beta_2 = 0.999, delta=1e-8, eta=1e-3, Sparse=True, Acc=True, X=None, xnorm2=None, num_acc=100, Time=True, bias_correction=False, b0_dim=1):
'''
This runs several streaming PCA algorithms simultaneously on bag of words
data
Inputs:
----------------------------------------------------------------------------
filename: The name of the file containing the bag-of-words data
k: int, the number of top eigenvectors to compute using the streaming PCA
algorithms
b0: optional float > 0, default 1e-5. The initial "guess" for the learning
rate parameter for adagrad
p: optional int, default None (which initializes to k). p >= k, the number
of vectors used in the SPM method.
B: optional int, the batch size for the streaming methods. Default 10.
m: optional int > 0, default 1. The number of convergence iterations per
block for HPCA
Sparse: optional Bool, default True. Indicates whether the samples are
added in as sparse or dense arrays.
Acc: optional Bool, default False. Indicates whether the accuracy, here the
explained variance, is computed at each block step.
X: Nonetype, nxd array, or list of Bval x d blocks Xi s.t. Xi make up the
rows of X (note the last block in X may not be of length Bval, but all
other blocks are assumed to have the same number of rows). X must be
provided if Acc=True.
xnorm2: optional float, the squared frobenius norm of X.
num_acc: optional number of accuracy readings to take out of all possible
block samples. Acc <= int(n/B).
Time: optional Bool, default False. Indicates whether or not to time the
implementation.
Outputs:
----------------------------------------------------------------------------
adaoja =
hpca =
spm =
'''
with open(filename, 'r') as f:
n = int(f.readline())
d = int(f.readline())
nnz = int(f.readline())
spca_objects = []
# Initialize the streaming objects
if 'AdaOja' in methods:
adaoja = stsb.AdaOja(d, k, b0=b0, B=B, Sparse=Sparse, Acc=Acc, xnorm2=xnorm2, X=X, num_acc=num_acc, Time=Time, b0_dim=b0_dim)
spca_objects.append(adaoja)
if 'HPCA' in methods:
hpca = stsb.HPCA(d, k, B=B, m=m, Sparse=Sparse, Acc=Acc, xnorm2=xnorm2, X=X, num_acc=num_acc, Time=Time)
spca_objects.append(hpca)
if 'SPM' in methods:
spm = stsb.SPM(d, k, p=p, B=B, Sparse=Sparse, Acc=Acc, X=X, xnorm2=xnorm2, num_acc=num_acc, Time=Time)
spca_objects.append(spm)
if 'RMSProp' in methods:
rmsp = stsb.RMSProp(d, k, gamma=gamma, b0=b0, eta=eta, B=B, Sparse=Sparse, Acc=Acc, X=X, xnorm2=xnorm2, num_acc=num_acc, Time=Time, b0_dim=b0_dim)
spca_objects.append(rmsp)
if 'ADAM' in methods:
adam = stsb.ADAM(d, k, beta_1 = beta_1, beta_2 = beta_2, delta=1e-8, eta=eta, B=B, Sparse=Sparse, Acc=Acc, X=X, xnorm2=xnorm2, num_acc=num_acc, Time=Time, bias_correction=bias_correction, b0_dim=b0_dim)
spca_objects.append(adam)
if 'WindOja' in methods:
woja = stsb.WindOja(d, k, b0=b0, B=B, Sparse=Sparse, Acc=Acc, xnorm2=xnorm2, X=X, num_acc=num_acc, Time=Time, b0_dim = b0_dim, tol=tol)
spca_objects.append(woja)
blocknum = 1
row, col, data = [], [], []
for i in range(nnz):
entry = list(map(int, f.readline().split()))
# if the row num (with zero based indexing)
# is in the current block
if entry[0] - 1 < blocknum * B:
# note bag of words uses 1 based indexing
row.append((entry[0]-1) % B)
col.append(entry[1]-1)
data.append(entry[2])
else:
# Add the current block to the model
if Sparse:
Xi = sp.csr_matrix((data, (row, col)), shape=(B,d))
else:
Xi = np.zeros((B, d))
Xi[row, col] = data
for spca in spca_objects:
spca.add_block(Xi)
# Increase the block number
blocknum += 1
# Start the new block in the row, col, and data entries.
row, col, data = [(entry[0] - 1) % B], [entry[1] - 1], [entry[2]]
# Insert final block
if Sparse:
Xi = sp.csr_matrix((data, (row, col)), shape=(max(row) + 1,d))
else:
Xi = np.zeros((max(row) + 1, d))
Xi[row,col] = data
for spca in spca_objects:
spca.add_block(Xi, final_sample=True)
return spca_objects
def run_sim_fullX(X, k, methods=['AdaOja', 'HPCA', 'SPM'], tol=.005, b0=1e-5, gamma=.9, beta_1 = 0.9, beta_2 = 0.999, eta=1e-3, delta=1e-8, p=None, B=10, m=1, Sparse=True, Acc=True, xnorm2=None, num_acc=100, Time=True, num_samples=None, bias_correction=False, b0_dim=1):
'''
This runs several streaming PCA algorithms simultaneously on data that is
provided in array X
'''
n, d = X.shape
if num_samples is not None:
num_acc = int(n / num_samples * num_acc)
nblock = int(num_samples / B)
endBsize = num_samples - nblock * B
else:
nblock = int(n / B)
endBsize = n - nblock * B
spca_objects = []
# Initialize the streaming objects
if 'AdaOja' in methods:
adaoja = stsb.AdaOja(d, k, b0=b0, B=B, Sparse=Sparse, Acc=Acc, xnorm2=xnorm2, X=X, num_acc=num_acc, Time=Time, b0_dim=b0_dim)
spca_objects.append(adaoja)
if 'HPCA' in methods:
hpca = stsb.HPCA(d, | |
initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: MenuItem) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class WindowAutomationPeer(FrameworkElementAutomationPeer):
"""
Exposes System.Windows.Window types to UI Automation.
WindowAutomationPeer(owner: Window)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: Window) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class NavigationWindowAutomationPeer(WindowAutomationPeer):
"""
Exposes System.Windows.Navigation.NavigationWindow types to UI Automation.
NavigationWindowAutomationPeer(owner: NavigationWindow)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: NavigationWindow) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class TextAutomationPeer(FrameworkElementAutomationPeer):
""" Exposes System.Windows.Automation.TextPattern types to UI Automation. """
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, owner: FrameworkElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class PasswordBoxAutomationPeer(TextAutomationPeer, IValueProvider):
"""
Exposes System.Windows.Controls.PasswordBox types to UI Automation.
PasswordBoxAutomationPeer(owner: PasswordBox)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: UIElementAutomationPeer) -> str
Gets the accelerator key for the System.Windows.UIElement that is associated
with this System.Windows.Automation.Peers.UIElementAutomationPeer. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: The System.Windows.Automation.AutomationProperties.AcceleratorKey that is
returned by
System.Windows.Automation.AutomationProperties.GetAcceleratorKey(System.Windows.
DependencyObject).
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
"""
GetAutomationControlTypeCore(self: PasswordBoxAutomationPeer) -> AutomationControlType
Gets the control type for the System.Windows.Controls.PasswordBox that is
associated with this System.Windows.Automation.Peers.PasswordBoxAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationControlType.
Returns: The System.Windows.Automation.Peers.AutomationControlType.Edit enumeration
value.
"""
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.FrameworkElement
that is associated with this
System.Windows.Automation.Peers.FrameworkElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The automation identifier for the element associated with the
System.Windows.Automation.Peers.FrameworkElementAutomationPeer, or
System.String.Empty if there isn't an automation identifier.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally, if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource, this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
"""
GetChildrenCore(self: UIElementAutomationPeer) -> List[AutomationPeer]
Gets the collection of child elements of the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetChildren.
Returns: A list of child System.Windows.Automation.Peers.AutomationPeer elements.
"""
pass
def GetClassNameCore(self, *args): #cannot find CLR method
"""
GetClassNameCore(self: PasswordBoxAutomationPeer) -> str
Gets the name of the System.Windows.Controls.PasswordBox that is associated
with this System.Windows.Automation.Peers.PasswordBoxAutomationPeer. This
method is called by
System.Windows.Automation.Peers.AutomationPeer.GetClassName.
Returns: A string that contains "PasswordBox".
"""
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN, System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item type that the
System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer represents. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetItemType.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemType that is returned by
System.Windows.Automation.AutomationProperties.GetItemType(System.Windows.Depend
encyObject).
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: UIElementAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetLabeledBy.
Returns: The System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: TextAutomationPeer) -> str
Gets the text label of the element that is associated with this
System.Windows.Automation.Peers.TextAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: The value of System.Windows.Automation.AutomationProperties.Name or
System.Windows.Automation.AutomationProperties.LabeledBy if either is set;
otherwise this method returns an empty string.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: UIElementAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
laid out in a specific direction. This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetOrientation.
Returns: The System.Windows.Automation.Peers.AutomationOrientation.None enumeration
value.
"""
pass
def GetPattern(self, patternInterface):
"""
GetPattern(self: PasswordBoxAutomationPeer, patternInterface: PatternInterface) -> object
Gets the control pattern for the System.Windows.Controls.PasswordBox that is
associated with this System.Windows.Automation.Peers.PasswordBoxAutomationPeer.
patternInterface: A value in the enumeration.
Returns: The System.Windows.Automation.Peers.PatternInterface.Value enumeration value.
"""
pass
def GetPeerFromPointCore(self, *args): #cannot find CLR method
""" GetPeerFromPointCore(self: AutomationPeer, point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self, *args): #cannot find CLR method
"""
HasKeyboardFocusCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
currently has keyboard input focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.HasKeyboardFocus.
Returns: true if the element has keyboard input focus; otherwise, false.
"""
pass
def IsContentElementCore(self, *args): #cannot find CLR method
"""
IsContentElementCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
an element that contains data that is presented to the user. This method is
called by System.Windows.Automation.Peers.AutomationPeer.IsContentElement.
| |
# coding=utf-8
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HuggingFace의 Bart 모델 코드를 바탕으로 일부 재작성한 Meena 모델 코드입니다.
원코드: https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_bart.py
"""
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from transformers.modeling_utils import PreTrainedModel
from .configuration_meena import MeenaConfig
def gelu(inputs: torch.Tensor):
cdf = 0.5 * (1.0 + torch.tanh((np.sqrt(2 / np.pi) * (inputs + 0.044715 * torch.pow(inputs, 3)))))
return inputs * cdf
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
def _prepare_meena_decoder_inputs(
config,
input_ids,
decoder_input_ids=None,
decoder_padding_mask=None,
causal_mask_dtype=torch.float32,
):
"""Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if
none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
Note: this is not called during generation
"""
pad_token_id = config.pad_token_id
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()
if decoder_padding_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
else:
decoder_padding_mask = invert_mask(decoder_padding_mask)
if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:
# never mask leading token, even if it is pad
decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]
tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))
mask = torch.arange(tmp.size(-1))
tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)
causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)
return decoder_input_ids, decoder_padding_mask, causal_mask
class PretrainedMeenaModel(PreTrainedModel):
config_class = MeenaConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=0):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
class EncoderLayer(nn.Module):
def __init__(self, config: MeenaConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
self.embed_dim,
config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = gelu
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(self, x, encoder_padding_mask, output_attentions=False):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
x, attn_weights = self.self_attn(
query=x,
key=x,
key_padding_mask=encoder_padding_mask,
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.fc2(self.activation_fn(self.fc1(x)))
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn_weights
class MeenaEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: MeenaConfig
"""
def __init__(
self,
config: MeenaConfig,
embed_tokens: nn.Embedding,
embed_positions: nn.Embedding,
):
super().__init__()
self.dropout = config.dropout
self.embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])
def forward(
self,
input_ids,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (tuple(torch.FloatTensor)): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *output_hidden_states:* is True.
- **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
bsz, seq_len = input_ids.shape[:2]
inputs_embeds = self.embed_tokens(input_ids)
inputs_embeds = inputs_embeds * (self.embed_dim ** 0.5)
positions = torch.arange(seq_len, dtype=torch.long, device=input_ids.device)
embed_pos = self.embed_positions(positions)
x = inputs_embeds + embed_pos
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = [] if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states.append(x)
x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)
if output_attentions:
all_attentions = all_attentions + (attn,)
if output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if not return_dict:
return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
class DecoderLayer(nn.Module):
def __init__(self, config: MeenaConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = gelu
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class MeenaDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: MeenaConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(
self,
config: MeenaConfig,
embed_tokens: nn.Embedding,
embed_positions: nn.Embedding,
):
super().__init__()
self.dropout = config.dropout
self.embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.position_start_idx = config.max_encoder_length
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
self.layers = nn.ModuleList(
[DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
**unused,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- the cache
- hidden states
- attentions
"""
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_past_key_values")
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
_, | |
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2 = Line2D([end-x_extent,end-x_extent],[-y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2_top = Line2D([end,end-x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2_bottom = Line2D([end,end-x_extent],[-y_extent,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
ax.add_line(l2)
ax.add_line(l2_top)
ax.add_line(l2_bottom)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_primer_binding_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL primer binding site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 2.0
y_offset = 1.5
x_extent = 8.0
arrowhead_length = 2.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
direction = 'F'
if start > end:
direction = 'R'
temp_end = end
end = start
start = temp_end
final_end = prev_end
final_start = prev_end
if direction == 'F':
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
else:
final_start = prev_end
end = prev_end+end_pad
start = end+x_extent
final_start = start+start_pad
if direction == 'F':
verts = [(start, y_offset), (end, y_offset), (end-arrowhead_length, y_offset+y_extent)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO]
path = Path(verts, codes)
patch = PathPatch(path, lw=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=1+zorder_add)
ax.add_patch(patch)
else:
verts = [(start, -y_offset), (end, -y_offset), (end+arrowhead_length, -y_offset-y_extent)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO]
path = Path(verts, codes)
patch = PathPatch(path, lw=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=1+zorder_add)
ax.add_patch(patch)
if opts != None and 'label' in list(opts.keys()):
if start > end:
write_label(ax, opts['label'], end+((start-end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start+((end-start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_5_sticky_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 5' sticky-end restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
x_extent = 8.0
end_space = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'end_space' in list(opts.keys()):
end_space = opts['end_space']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Direction is meaningless for this part => start is always < end
if start > end:
temp_end = end
end = start
start = temp_end
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+end_space+x_extent+end_space
final_end = end+end_pad
l1 = Line2D([start+end_space,start+end_space+x_extent],[0,0],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_top = Line2D([start+end_space,start+end_space],[0,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_bottom = Line2D([start+end_space+x_extent,start+end_space+x_extent],[0,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
# White rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end, -y_extent),
(end, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_3_sticky_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 3' sticky-end restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
x_extent = 8.0
end_space = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'end_space' in list(opts.keys()):
end_space = opts['end_space']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Direction is meaningless for this part => start is always < end
if start > end:
temp_end = end
end = start
start = temp_end
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+end_space+x_extent+end_space
final_end = end+end_pad
l1 = Line2D([start+end_space,start+end_space+x_extent],[0,0],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_top = Line2D([start+end_space+x_extent,start+end_space+x_extent],[0,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_bottom = Line2D([start+end_space,start+end_space],[0,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
# White rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end, -y_extent),
(end, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_user_defined (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL user-defined element renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 12.0
y_extent = 3.0
linestyle = '-'
fill_color = (1,1,1)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'fill_color' in list(opts.keys()):
fill_color = opts['fill_color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=color, facecolor=fill_color, linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_signature (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL signature renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 12.0
y_extent = 3.0
linestyle = '-'
fill_color = (1,1,1)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'fill_color' in list(opts.keys()):
fill_color = opts['fill_color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
direction = 'F'
if start > end:
direction = 'R'
temp_end = end
end = start
start = temp_end
final_end = prev_end
final_start = prev_end
if direction == 'F':
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
else:
final_start = prev_end
end = prev_end+end_pad
start = end+x_extent
final_start = start+start_pad
indent_fac = (y_extent*2.0)*0.3
cross_width = (y_extent*2.0)*0.7
if direction == 'F':
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=color, facecolor=fill_color, linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
top1x = start + indent_fac
top1y = y_extent - indent_fac
top2x = start + cross_width
top2y = y_extent - indent_fac
bot1x = start | |
attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
input = kwargs.pop('input', None)
timeout = kwargs.pop('timeout', None)
check = kwargs.pop('check', False)
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE
process = Popen(*popenargs, **kwargs)
try:
process.__enter__() # No-Op really... illustrate "with in 2.4"
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
finally:
# None because our context manager __exit__ does not use them.
process.__exit__(None, None, None)
return CompletedProcess(process.args, retcode, stdout, stderr)
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._child_created = False
self._input = None
self._communication_started = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
import warnings
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread != -1:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread != -1:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
self._closed_child_pipe_fds = False
exception_cleanup_needed = False
try:
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# The cleanup is performed within the finally block rather
# than simply within this except block before the raise so
# that any exceptions raised and handled within it do not
# clobber the exception context we want to propagate upwards.
# This is only necessary in Python 2.
exception_cleanup_needed = True
raise
finally:
if exception_cleanup_needed:
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except EnvironmentError:
pass # Ignore EBADF or other errors
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except EnvironmentError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def _stdin_write(self, input):
if input:
try:
self.stdin.write(input)
except EnvironmentError as e:
if e.errno == errno.EPIPE:
# communicate() must ignore broken pipe error
pass
elif e.errno == errno.EINVAL :
# bpo-19612, bpo-30418: On Windows, stdin.write() fails
# with EINVAL if the child process exited or if the child
# process is still running but closed the pipe.
pass
else:
raise
try:
self.stdin.close()
except EnvironmentError as e:
if e.errno in (errno.EPIPE, errno.EINVAL):
pass
else:
raise
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
| |
"""Power series manipulating functions acting on polys.ring.PolyElement()"""
from sympy.polys.domains import QQ
from sympy.polys.rings import ring, PolyElement
from sympy.polys.monomials import monomial_min, monomial_mul
from mpmath.libmp.libintmath import ifac
from sympy.core.numbers import Rational
from sympy.core.compatibility import as_int
from mpmath.libmp.libintmath import giant_steps
import math
def _invert_monoms(p1):
"""
Compute ``x**n * p1(1/x)`` for ``p1`` univariate polynomial.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _invert_monoms
>>> R, x = ring('x', ZZ)
>>> p = x**2 + 2*x + 3
>>> _invert_monoms(p)
3*x**2 + 2*x + 1
See Also
========
sympy.polys.densebasic.dup_reverse
"""
terms = list(p1.items())
terms.sort()
deg = p1.degree()
ring = p1.ring
p = ring.zero
cv = p1.listcoeffs()
mv = p1.listmonoms()
for i in range(len(mv)):
p[(deg - mv[i][0],)] = cv[i]
return p
def _giant_steps(target):
"""
list of precision steps for the Newton's method
"""
res = giant_steps(2, target)
if res[0] != 2:
res = [2] + res
return res
def rs_trunc(p1, x, prec):
"""
truncate the series in the ``x`` variable with precision ``prec``,
that is modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 12)
x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 10)
x**5 + x + 1
"""
ring = p1.ring
p = ring.zero
i = ring.gens.index(x)
for exp1 in p1:
if exp1[i] >= prec:
continue
p[exp1] = p1[exp1]
return p
def rs_mul(p1, p2, x, prec):
"""
product of series modulo ``O(x**prec)``
``x`` is the series variable or its position in the generators.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_mul
>>> R, x = ring('x', QQ)
>>> p1 = x**2 + 2*x + 1
>>> p2 = x + 1
>>> rs_mul(p1, p2, x, 3)
3*x**2 + 3*x + 1
"""
ring = p1.ring
p = ring.zero
if ring.__class__ != p2.ring.__class__ or ring != p2.ring:
raise ValueError('p1 and p2 must have the same ring')
iv = ring.gens.index(x)
if not isinstance(p2, PolyElement):
raise ValueError('p1 and p2 must have the same ring')
if ring == p2.ring:
get = p.get
items2 = list(p2.items())
items2.sort(key=lambda e: e[0][iv])
if ring.ngens == 1:
for exp1, v1 in p1.items():
for exp2, v2 in items2:
exp = exp1[0] + exp2[0]
if exp < prec:
exp = (exp, )
p[exp] = get(exp, 0) + v1*v2
else:
break
else:
monomial_mul = ring.monomial_mul
for exp1, v1 in p1.items():
for exp2, v2 in items2:
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p.strip_zero()
return p
def rs_square(p1, x, prec):
"""
square modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_square
>>> R, x = ring('x', QQ)
>>> p = x**2 + 2*x + 1
>>> rs_square(p, x, 3)
6*x**2 + 4*x + 1
"""
ring = p1.ring
p = ring.zero
iv = ring.gens.index(x)
get = p.get
items = list(p1.items())
items.sort(key=lambda e: e[0][iv])
monomial_mul = ring.monomial_mul
for i in range(len(items)):
exp1, v1 = items[i]
for j in range(i):
exp2, v2 = items[j]
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p = p.imul_num(2)
get = p.get
for expv, v in p1.items():
if 2*expv[iv] < prec:
e2 = monomial_mul(expv, expv)
p[e2] = get(e2, 0) + v**2
p.strip_zero()
return p
def rs_pow(p1, n, x, prec):
"""
return ``p1**n`` modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_pow
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> rs_pow(p, 4, x, 3)
6*x**2 + 4*x + 1
"""
R = p1.ring
p = R.zero
if isinstance(n, Rational):
raise NotImplementedError('to be implemented')
n = as_int(n)
if n == 0:
if p1:
return R(1)
else:
raise ValueError('0**0 is undefined')
if n < 0:
p1 = rs_pow(p1, -n, x, prec)
return rs_series_inversion(p1, x, prec)
if n == 1:
return rs_trunc(p1, x, prec)
if n == 2:
return rs_square(p1, x, prec)
if n == 3:
p2 = rs_square(p1, x, prec)
return rs_mul(p1, p2, x, prec)
p = R(1)
while 1:
if n&1:
p = rs_mul(p1, p, x, prec)
n -= 1
if not n:
break
p1 = rs_square(p1, x, prec)
n = n // 2
return p
def _has_constant_term(p, x):
"""
test if ``p`` has a constant term in ``x``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _has_constant_term
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> _has_constant_term(p, x)
True
"""
ring = p.ring
iv = ring.gens.index(x)
zm = ring.zero_monom
a = [0]*ring.ngens
a[iv] = 1
miv = tuple(a)
for expv in p:
if monomial_min(expv, miv) == zm:
return True
return False
def _series_inversion1(p, x, prec):
"""
univariate series inversion ``1/p`` modulo ``O(x**prec)``
The Newton method is used.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _series_inversion1
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> _series_inversion1(p, x, 4)
-x**3 + x**2 - x + 1
"""
ring = p.ring
zm = ring.zero_monom
if zm not in p:
raise ValueError('no constant term in series')
if _has_constant_term(p - p[zm], x):
raise ValueError('p cannot contain a constant term depending on parameters')
if p[zm] != ring(1):
# TODO add check that it is a unit
p1 = ring(1)/p[zm]
else:
p1 = ring(1)
for precx in _giant_steps(prec):
tmp = p1.square()
tmp = rs_mul(tmp, p, x, precx)
p1 = 2*p1 - tmp
return p1
def rs_series_inversion(p, x, prec):
"""
multivariate series inversion ``1/p`` modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_inversion
>>> R, x, y = ring('x, y', QQ)
>>> rs_series_inversion(1 + x*y**2, x, 4)
-x**3*y**6 + x**2*y**4 - x*y**2 + 1
>>> rs_series_inversion(1 + x*y**2, y, 4)
-x*y**2 + 1
"""
ring = p.ring
zm = ring.zero_monom
ii = ring.gens.index(x)
m = min(p, key=lambda k: k[ii])[ii]
if m:
raise NotImplementedError('no constant term in series')
if zm not in p:
raise NotImplementedError('no constant term in series')
if _has_constant_term(p - p[zm], x):
raise NotImplementedError('p - p[0] must not have a constant term in the series variables')
return _series_inversion1(p, x, prec)
def rs_series_from_list(p, c, x, prec, concur=1):
"""
series ``sum c[n]*p**n`` modulo ``O(x**prec)``
reduce the number of multiplication summing concurrently
``ax = [1, p, p**2, .., p**(J - 1)]``
``s = sum(c[i]*ax[i] for i in range(r, (r + 1)*J))*p**((K - 1)*J)``
with ``K >= (n + 1)/J``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_from_list, rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> c = [1, 2, 3]
>>> rs_series_from_list(p, c, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> rs_trunc(1 + 2*p + 3*p**2, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> pc = R.from_list(list(reversed(c)))
>>> rs_trunc(pc.compose(x, p), x, 4)
6*x**3 + 11*x**2 + 8*x + 6
See Also
========
sympy.polys.ring.compose
"""
ring = p.ring
n = len(c)
if not concur:
q = ring(1)
s = c[0]*q
for i in range(1, n):
q = rs_mul(q, p, x, prec)
s += c[i]*q
return s
J = int(math.sqrt(n) + 1)
K, r = divmod(n, J)
if r:
K += 1
ax = [ring(1)]
b = 1
q = ring(1)
if len(p) < 20:
for i in range(1, J):
q = rs_mul(q, p, x, prec)
ax.append(q)
else:
for i in range(1, J):
if i % 2 == 0:
q = rs_square(ax[i//2], x, prec)
else:
q = rs_mul(q, p, x, prec)
ax.append(q)
# optimize using rs_square
pj = rs_mul(ax[-1], p, x, prec)
b = ring(1)
s = ring(0)
for k in range(K | |
"""
Cosmology routines: A module for various cosmological calculations.
The bulk of the work is within the class :py:class:`Cosmology` which stores a
cosmology and can calculate quantities like distance measures.
"""
from dataclasses import dataclass, asdict
import numpy as np
# Import integration routines
from scipy import integrate as si
# Package imports
from .nputil import FloatArrayLike
from . import units as u
@dataclass
class Cosmology(object):
"""Class to store a cosmology, and compute measures.
Defines a cosmology and allows calculation of a few simple
quantities (notably distance measures and lookback time).
Default params from final Planck params: Planck+TT,TE,EE+lowE+lensing+BAO
(https://arxiv.org/pdf/1807.06209.pdf).
Attributes
----------
units : {'astro', 'cosmo', 'si'}
The unit system to return quantities in. SI is self
explanatory, `astro` has distances in Mpc and time in Myr, and
`cosmo` has distances in Mpc/h and time in Myr. Defaults to 'cosmo'.
omega_b : scalar
Baryon fraction relative to the critical density.
omega_c : scalar
Dark matter fraction relative to the critical density.
omega_l : scalar
Dark energy fraction relative to the critical density (assumes
a cosmological constant).
omega_g : scalar
Fraction of electromagnetic radiation relative to the critical density.
omega_n : scalar
Fraction of massless neutrinos relative to the critical density.
H0 : scalar
The Hubble constant in km/s / Mpc
w_0, w_a : scalars
Dark energy parameters.
omega_k : scalar, readonly
The curvature as a fraction of the critical density.
omega_m : scalar, readonly
The total matter density as a fraction of the critical density.
omega_r : scalar, readonly
The total radiation density as a fraction of the critical density.
"""
# The unit type to use
# TODO: switch to Literal["cosmo", "astro", "si"] on Python 3.8
units: str = "cosmo"
# Standard density parameters
# NOTE: that omega_l is adjusted slightly from the Planck value to make Omega_k = 0
omega_b: float = 0.04897
omega_c: float = 0.26067
omega_l: float = 0.69036
# Density parameters more relevant for the early Universe
omega_g: float = 0.0
omega_n: float = 0.0
# H_0 given in km/s / Mpc
H0: float = 67.66
# Dark energy parameters
w_0: float = -1.0
w_a: float = 0.0
@property
def omega_m(self) -> float:
return self.omega_b + self.omega_c
@property
def omega_r(self) -> float:
return self.omega_g + self.omega_n
@property
def omega_k(self) -> float:
return 1.0 - (
self.omega_l + self.omega_b + self.omega_c + self.omega_g + self.omega_n
)
@classmethod
def from_physical(
cls,
ombh2: float = 0.02242,
omch2: float = 0.11933,
H0: float = 67.66,
omk: float = 0.0,
TCMB=2.7255,
nnu=3.046,
) -> "Cosmology":
r"""Initialise a new cosmology from the physical parameters.
This uses the CMB relevant parameterization that is commonly
used. The dark energy density is calculated from the given
parameters.
Parameters
----------
ombh2 : scalar, optional
The relative baryon density times h^2 (h = H_0 / 100).
omch2 : scalar, optional
The fractional dark matter density times h^2 (h = H_0 / 100).
H0 : scalar, optional
The Hubble constant
omk : scalar, optional
The curvature fraction.
TCMB : scalar, optional
The CMB temperature (used to calculate the radiation density).
nnu : scalar, optional
The number of massless neutrinos. Used to set the neutrino density.
Returns
-------
cosmo : instance of Cosmology
"""
h = H0 / 100.0
H_si = H0 * 1000.0 / u.mega_parsec
rhoc = 3.0 * H_si ** 2 * u.c_sl ** 2 / (8.0 * np.pi * u.G_n)
rhorad = u.a_rad * TCMB ** 4
rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
omkh2 = omk * h ** 2
omega_b = ombh2 / h ** 2
omega_c = omch2 / h ** 2
omega_g = rhorad / rhoc
omega_n = rhonu / rhoc
omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (omega_g + omega_n)
return cls(
omega_b=omega_b,
omega_c=omega_c,
omega_l=omega_l,
omega_g=omega_g,
omega_n=omega_n,
)
def to_dict(self) -> dict:
return asdict(self)
def H(self, z: FloatArrayLike = 0.0) -> FloatArrayLike:
"""The Hubble parameter at redshift z.
Return the Hubble parameter in SI units (s^-1), regardless of
the value of `self.units`.
Parameters
----------
z
The redshift to calculate the Hubble parameter
at. Defaults to z = 0.
Returns
-------
H
The Hubble parameter.
"""
H = (
self.H0
* (
self.omega_r * (1 + z) ** 4
+ self.omega_m * (1 + z) ** 3
+ self.omega_k * (1 + z) ** 2
+ self.omega_l
* (1 + z) ** (3 * (1 + self.w_0 + self.w_a))
* np.exp(-3 * self.w_a * z / (1 + z))
)
** 0.5
)
# Convert to SI
return H * 1000.0 / u.mega_parsec
def comoving_distance(self, z: FloatArrayLike) -> FloatArrayLike:
r"""The comoving distance to redshift z.
This routine is vectorized.
Parameters
----------
z
The redshift(s) to calculate at.
Returns
-------
dist
The comoving distance to each redshift.
"""
# Calculate the integrand.
def f(z1):
return u.c_sl / self.H(z1)
return _intf_0_z(f, z) / self._unit_distance
def proper_distance(self, z: FloatArrayLike) -> FloatArrayLike:
r"""The proper distance to an event at redshift z.
The proper distance can be ill defined. In this case we mean
the comoving transverse separation between two events at the
same redshift divided by their angular separation. This
routine is vectorized.
Parameters
----------
z
The redshift(s) to calculate at.
Returns
-------
dist
The proper distance to each redshift.
"""
x = self.comoving_distance(z)
om_k = self.omega_k
dhi = np.sqrt(np.fabs(om_k)) * self.H() / u.c_sl * self._unit_distance
if om_k < 0.0:
x = np.sin(x * dhi) / dhi
elif om_k > 0.0:
x = np.sinh(x * dhi) / dhi
return x
def angular_distance(self, z: FloatArrayLike) -> FloatArrayLike:
r"""The angular diameter distance to redshift z.
Not to be confused with the `proper_distance`. This is the
*physical* transverse separation between two events at the
same redshift divided by their angular separation. This
routine is vectorized.
Parameters
----------
z
The redshift(s) to calculate at.
Returns
-------
dist
The angular diameter distance to each redshift.
"""
return self.proper_distance(z) / (1 + z)
def luminosity_distance(self, z: FloatArrayLike) -> FloatArrayLike:
r"""The luminosity distance to redshift z. This
routine is vectorized.
Parameters
----------
z
The redshift(s) to calculate at.
Returns
-------
dist
The luminosity distance to each redshift.
"""
return self.proper_distance(z) * (1 + z)
def lookback_time(self, z: FloatArrayLike) -> FloatArrayLike:
r"""The lookback time out to redshift z.
Parameters
----------
z
The redshift(s) to calculate at.
Returns
-------
time
The lookback time to each redshift.
"""
# Calculate the integrand.
def f(z1):
return 1.0 / (self.H(z1) * (1 + z1))
return _intf_0_z(f, z) / self._unit_time
@property
def _unit_distance(self) -> float:
# Select the appropriate distance unit
if self.units == "astro":
return u.mega_parsec
elif self.units == "cosmo":
return u.mega_parsec / (self.H0 / 100.0)
elif self.units == "si":
return 1.0
raise RuntimeError("Units not known")
@property
def _unit_time(self) -> float:
# Select the appropriate time unit
if self.units == "astro":
return u.mega_year
elif self.units == "cosmo":
return u.mega_year
elif self.units == "si":
return 1.0
raise RuntimeError("Units not known")
def growth_factor(self, z: FloatArrayLike) -> FloatArrayLike:
"""Approximation for the matter growth factor.
Uses a Pade approximation.
Parameters
----------
z
Redshift to calculate at.
Returns
-------
growth_factor
Notes
-----
See [1]_.
References
----------
.. [1] http://arxiv.org/abs/1012.2671
"""
if np.abs(self.omega_k) > 1e-3:
raise RuntimeError(
f"Calculation only valid in a flat universe. Omega_k = {self.omega_k}"
)
x = ((1.0 / self.omega_m) - 1.0) / (1.0 + z) ** 3
num = 1.0 + 1.175 * x + 0.3064 * x ** 2 + 0.005355 * x ** 3
den = 1.0 + 1.857 * x + 1.021 * x ** 2 + 0.1530 * x ** 3
d = (1.0 + x) ** 0.5 / (1.0 + z) * num / den
return d
def growth_rate(self, z: FloatArrayLike) -> FloatArrayLike:
"""Approximation for the matter growth rate.
From explicit differentiation of the Pade approximation for
the growth factor.
Parameters
----------
z
Redshift to calculate at.
Returns
-------
growth_rate
Notes
| |
== "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[3] == "W" or word[3] == "w" :
toGuess = toGuess[:3] + "w" + toGuess[4:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[3] == "X" or word[3] == "x" :
toGuess = toGuess[:3] + "x" + toGuess[4:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[3] == "Y" or word[3] == "y" :
toGuess = toGuess[:3] + "y" + toGuess[4:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[3] == "Z" or word[3] == "z" :
toGuess = toGuess[:3] + "z" + toGuess[4:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 3 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 4 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| /|\\")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and | |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`585`-compliant type hint utilities.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeDecorHintPep585Exception
from beartype._cave._cavefast import HintGenericSubscriptedType
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_9
from beartype._util.utilobject import Iota
from beartype._data.datatyping import TupleTypes
from typing import Any, Set
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ HINTS }....................
HINT_PEP585_TUPLE_EMPTY = (
tuple[()] if IS_PYTHON_AT_LEAST_3_9 else Iota()) # type: ignore[misc]
'''
:pep:`585`-compliant empty fixed-length tuple type hint if the active Python
interpreter supports at least Python 3.9 and thus :pep:`585` *or* a unique
placeholder object otherwise to guarantee failure when comparing arbitrary
objects against this object via equality tests.
'''
# ....................{ VALIDATORS }....................
def die_unless_hint_pep585_generic(hint: object) -> None:
'''
Raise an exception unless the passed object is a :pep:`585`-compliant
**generic** (i.e., class superficially subclassing at least one subscripted
:pep:`585`-compliant pseudo-superclass).
Parameters
----------
hint : object
Object to be validated.
Raises
----------
BeartypeDecorHintPep585Exception
If this hint is *not* a :pep:`585`-compliant generic.
'''
# If this hint is *NOT* a PEP 585-compliant generic, raise an exception
if not is_hint_pep585_generic(hint):
raise BeartypeDecorHintPep585Exception(
f'Type hint {repr(hint)} not PEP 585 generic.')
# ....................{ TESTERS }....................
# If the active Python interpreter targets at least Python >= 3.9 and thus
# supports PEP 585, correctly declare this function.
if IS_PYTHON_AT_LEAST_3_9:
def is_hint_pep585_builtin(hint: object) -> bool:
# Avoid circular import dependencies.
from beartype._util.hint.pep.proposal.pep484585.utilpep484585generic import (
is_hint_pep484585_generic)
# Return true only if this hint...
return (
# Is either a PEP 484- or -585-compliant subscripted generic or
# PEP 585-compliant builtin *AND*...
isinstance(hint, HintGenericSubscriptedType) and
# Is *NOT* a PEP 484- or -585-compliant subscripted generic.
not is_hint_pep484585_generic(hint)
)
@callable_cached
def is_hint_pep585_generic(hint: object) -> bool:
# Avoid circular import dependencies.
from beartype._util.hint.pep.proposal.pep484585.utilpep484585generic import (
get_hint_pep484585_generic_type_or_none)
# If this hint is *NOT* a type, reduce this hint to the object
# originating this hint if any. See the comparable
# is_hint_pep484_generic() tester for further details.
hint = get_hint_pep484585_generic_type_or_none(hint)
# Tuple of all pseudo-superclasses originally subclassed by the passed
# hint if this hint is a generic *OR* false otherwise.
hint_bases_erased = getattr(hint, '__orig_bases__', False)
# If this hint subclasses *NO* pseudo-superclasses, this hint *CANNOT*
# be a generic. In this case, immediately return false.
if not hint_bases_erased:
return False
# Else, this hint subclasses one or more pseudo-superclasses.
# For each such pseudo-superclass...
#
# Unsurprisingly, PEP 585-compliant generics have absolutely *NO*
# commonality with PEP 484-compliant generics. While the latter are
# trivially detectable as subclassing "typing.Generic" after type
# erasure, the former are *NOT*. The only means of deterministically
# deciding whether or not a hint is a PEP 585-compliant generic is if:
# * That class defines both the __class_getitem__() dunder method *AND*
# the "__orig_bases__" instance variable. Note that this condition in
# and of itself is insufficient to decide PEP 585-compliance as a
# generic. Why? Because these dunder attributes have been
# standardized under various PEPs and may thus be implemented by
# *ANY* arbitrary classes.
# * The "__orig_bases__" instance variable is a non-empty tuple.
# * One or more objects listed in that tuple are PEP 585-compliant
# objects.
#
# Note we could technically also test that this hint defines the
# __class_getitem__() dunder method. Since this condition suffices to
# ensure that this hint is a PEP 585-compliant generic, however, there
# exists little benefit to doing so.
for hint_base_erased in hint_bases_erased: # type: ignore[union-attr]
# If this pseudo-superclass is itself a PEP 585-compliant type
# hint, return true.
if is_hint_pep585_builtin(hint_base_erased):
return True
# Else, this pseudo-superclass is *NOT* PEP 585-compliant. In this
# case, continue to the next pseudo-superclass.
# Since *NO* such pseudo-superclasses are PEP 585-compliant, this hint
# is *NOT* a PEP 585-compliant generic. In this case, return false.
return False
# Else, the active Python interpreter targets at most Python < 3.9 and thus
# fails to support PEP 585. In this case, fallback to declaring this function
# to unconditionally return False.
else:
def is_hint_pep585_builtin(hint: object) -> bool:
return False
def is_hint_pep585_generic(hint: object) -> bool:
return False
# ....................{ TESTERS ~ doc }....................
# Docstring for this function regardless of implementation details.
is_hint_pep585_builtin.__doc__ = '''
``True`` only if the passed object is a C-based :pep:`585`-compliant
**builtin type hint** (i.e., C-based type hint instantiated by subscripting
either a concrete builtin container class like :class:`list` or
:class:`tuple` *or* an abstract base class (ABC) declared by the
:mod:`collections.abc` submodule like :class:`collections.abc.Iterable` or
:class:`collections.abc.Sequence`).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Caveats
----------
**This test returns false for** :pep:`585`-compliant **generics,** which
fail to satisfy the same API as all other :pep:`585`-compliant type hints.
Why? Because :pep:`560`-type erasure erases this API on
:pep:`585`-compliant generics immediately after those generics are
declared, preventing their subsequent detection as :pep:`585`-compliant.
Instead, :pep:`585`-compliant generics are only detectable by calling
either:
* The high-level PEP-agnostic
:func:`beartype._util.hint.pep.utilpeptest.is_hint_pep484585_generic`
tester.
* The low-level :pep:`585`-specific :func:`is_hint_pep585_generic` tester.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a :pep:`585`-compliant type hint.
'''
is_hint_pep585_generic.__doc__ = '''
``True`` only if the passed object is a :pep:`585`-compliant **generic**
(i.e., object that may *not* actually be a class originally subclassing at
least one subscripted :pep:`585`-compliant pseudo-superclass).
This tester is memoized for efficiency.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a :pep:`585`-compliant generic.
'''
# ....................{ GETTERS }....................
def get_hint_pep585_generic_bases_unerased(hint: Any) -> tuple:
'''
Tuple of all unerased :pep:`585`-compliant **pseudo-superclasses** (i.e.,
:mod:`typing` objects originally listed as superclasses prior to their
implicit type erasure under :pep:`560`) of the passed :pep:`585`-compliant
**generic** (i.e., class subclassing at least one non-class
:pep:`585`-compliant object).
This getter is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
Tuple[object]
Tuple of the one or more unerased pseudo-superclasses of this
:pep:`585`-compliant generic.
Raises
----------
BeartypeDecorHintPep585Exception
If this hint is *not* a :pep:`585`-compliant generic.
See Also
----------
:func:`beartype._util.hint.pep.proposal.pep484585.utilpep484585generic.get_hint_pep484585_generic_bases_unerased`
Further details.
'''
# Avoid circular import dependencies.
from beartype._util.hint.pep.proposal.pep484585.utilpep484585generic import (
get_hint_pep484585_generic_type_or_none)
# If this hint is *NOT* a class, reduce this hint to the object originating
# this hint if any. See the is_hint_pep484_generic() tester for details.
hint = get_hint_pep484585_generic_type_or_none(hint)
# If this hint is *NOT* a PEP 585-compliant generic, raise an exception.
die_unless_hint_pep585_generic(hint)
# Return the tuple of all unerased pseudo-superclasses of this generic.
# While the "__orig_bases__" dunder instance variable is *NOT* guaranteed
# to exist for PEP 484-compliant generic types, this variable is guaranteed
# to exist for PEP 585-compliant generic types. Thanks for small favours.
return hint.__orig_bases__
@callable_cached
def get_hint_pep585_generic_typevars(hint: object) -> TupleTypes:
'''
Tuple of all **unique type variables** (i.e., subscripted :class:`TypeVar`
instances of the passed :pep:`585`-compliant generic listed by the caller
at hint declaration time ignoring duplicates) if any *or* the empty tuple
otherwise.
This getter is memoized for efficiency.
Motivation
----------
The current implementation of :pep:`585` under at least Python 3.9 is
fundamentally broken with respect to parametrized generics. While `PEP
484`_-compliant generics properly propagate type variables from
pseudo-superclasses to subclasses, :pep:`585` fails to do so. This function
"fills in the gaps" by recovering these type variables from parametrized
:pep:`585`-compliant generics by iteratively constructing a new tuple from
the type variables parametrizing all pseudo-superclasses of this generic.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
Tuple[TypeVar, ...]
Either:
* If this :pep:`585`-compliant generic defines a ``__parameters__``
dunder attribute, the value of that attribute.
* Else, the empty tuple.
Raises
----------
:exc:`BeartypeDecorHintPep585Exception`
If this hint is *not* a :pep:`585`-compliant generic.
'''
# Avoid circular import dependencies.
from beartype._util.hint.pep.utilpepget import get_hint_pep_typevars
# Tuple of all pseudo-superclasses of this PEP 585-compliant generic.
hint_bases = get_hint_pep585_generic_bases_unerased(hint)
# Set of all type variables parametrizing these | |
<filename>3.7.0/lldb-3.7.0.src/scripts/Python/finishSwigPythonLLDB.py
""" Python SWIG post process script for each language
--------------------------------------------------------------------------
File: finishSwigPythonLLDB.py
Overview: Python script(s) to post process SWIG Python C++ Script
Bridge wrapper code on the Windows/LINUX/OSX platform.
The Python scripts are equivalent to the shell script (.sh)
files.
For the Python script interpreter (external to liblldb) to
be able to import and use the lldb module, there must be
two files, lldb.py and _lldb.so, that it can find. lldb.py
is generated by SWIG at the same time it generates the C++
file. _lldb.so is actually a symlink file that points to
the LLDB shared library/framework.
The Python script interpreter needs to be able to
automatically find these two files. On Darwin systems it
searches in the LLDB.framework, as well as in all the normal
Python search paths. On non-Darwin systems these files will
need to be put some place where Python will find them.
This shell script creates the _lldb.so symlink in the
appropriate place, and copies the lldb.py (and
embedded_interpreter.py) file to the correct directory.
Gotchas: Python debug complied pythonXX_d.lib is required for SWIG
to build correct LLDBWrapperPython.cpp in order for Visual
Studio to compile successfully. The release version of the
Python lib will not work (20/12/2013).
LLDB (dir) CMakeLists.txt uses windows environmental
variables $PYTHON_INCLUDE and $PYTHON_LIB to locate
Python files required for the build.
Copyright: None.
--------------------------------------------------------------------------
"""
# Python modules:
import os # Provide directory and file handling, determine OS information
import sys # System specific parameters and functions
import errno # OS error results
import shutil # High-level operations on files and collections of files
import subprocess # Call external programs
import ctypes # Invoke Windows API for creating symlinks
# Third party modules:
# In-house modules:
import utilsOsType # Determine the OS type this script is running on
import utilsDebug # Debug Python scripts
# User facing text:
strMsgOsVersion = "The current OS is %s";
strMsgPyVersion = "The Python version is %d.%d";
strErrMsgProgFail = "Program failure: ";
strErrMsgLLDBPyFileNotNotFound = "Unable to locate lldb.py at path '%s'";
strMsgCopyLLDBPy = "Copying lldb.py from '%s' to '%s'";
strErrMsgFrameWkPyDirNotExist = "Unable to find the LLDB.framework directory '%s'";
strMsgCreatePyPkgCopyPkgFile = "create_py_pkg: Copied file '%s' to folder '%s'";
strMsgCreatePyPkgInitFile = "create_py_pkg: Creating pakage init file '%s'";
strMsgCreatePyPkgMkDir = "create_py_pkg: Created folder '%s'";
strMsgConfigBuildDir = "Configuration build directory located at '%s'";
strMsgFoundLldbFrameWkDir = "Found '%s'";
strMsgPyFileLocatedHere = "Python file will be put in '%s'";
strMsgFrameWkPyExists = "Python output folder '%s' already exists";
strMsgFrameWkPyMkDir = "Python output folder '%s' will be created";
strErrMsgCreateFrmWkPyDirFailed = "Unable to create directory '%s' error: %s";
strMsgSymlinkExists = "Symlink for '%s' already exists";
strMsgSymlinkMk = "Creating symlink for %s (%s -> %s)";
strErrMsgCpLldbpy = "copying lldb to lldb package directory";
strErrMsgCreatePyPkgMissingSlash = "Parameter 3 fn create_py_pkg() missing slash";
strErrMsgMkLinkExecute = "Command mklink failed: %s";
strErrMsgMakeSymlink = "creating symbolic link";
strErrMsgUnexpected = "Unexpected error: %s";
def is_debug_interpreter():
return hasattr(sys, 'gettotalrefcount')
#++---------------------------------------------------------------------------
# Details: Copy files needed by lldb/macosx/heap.py to build libheap.dylib.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def macosx_copy_file_for_heap( vDictArgs, vstrFrameworkPythonDir ):
dbg = utilsDebug.CDebugFnVerbose( "Python script macosx_copy_file_for_heap()" );
bOk = True;
strMsg = "";
eOSType = utilsOsType.determine_os_type();
if eOSType != utilsOsType.EnumOsType.Darwin:
return (bOk, strMsg);
strHeapDir = os.path.join(vstrFrameworkPythonDir, "macosx", "heap");
strHeapDir = os.path.normcase( strHeapDir );
if (os.path.exists( strHeapDir ) and os.path.isdir( strHeapDir )):
return (bOk, strMsg);
os.makedirs( strHeapDir );
strRoot = os.path.normpath(vDictArgs[ "--srcRoot" ]);
strSrc = os.path.join(strRoot, "examples", "darwin", "heap_find", "heap", "heap_find.cpp");
shutil.copy( strSrc, strHeapDir );
strSrc = os.path.join(strRoot, "examples", "darwin", "heap_find", "heap", "Makefile");
shutil.copy( strSrc, strHeapDir );
return (bOk, strMsg);
#++---------------------------------------------------------------------------
# Details: Create Python packages and Python __init__ files.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# vstrPkgDir - (R) Destination for copied Python files.
# vListPkgFiles - (R) List of source Python files.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def create_py_pkg( vDictArgs, vstrFrameworkPythonDir, vstrPkgDir, vListPkgFiles ):
dbg = utilsDebug.CDebugFnVerbose( "Python script create_py_pkg()" );
dbg.dump_object( "Package file(s):", vListPkgFiles );
bDbg = vDictArgs.has_key( "-d" );
bOk = True;
strMsg = "";
if vstrPkgDir.__len__() != 0 and vstrPkgDir[ 0 ] != "/":
bOk = False;
strMsg = strErrMsgCreatePyPkgMissingSlash;
return (bOk, strMsg);
strPkgName = vstrPkgDir;
strPkgName = "lldb" + strPkgName.replace( "/", "." );
strPkgDir = vstrFrameworkPythonDir;
strPkgDir += vstrPkgDir;
strPkgDir = os.path.normcase( strPkgDir );
if not(os.path.exists( strPkgDir ) and os.path.isdir( strPkgDir )):
if bDbg:
print(strMsgCreatePyPkgMkDir % strPkgDir);
os.makedirs( strPkgDir );
for strPkgFile in vListPkgFiles:
if os.path.exists( strPkgFile ) and os.path.isfile( strPkgFile ):
if bDbg:
print(strMsgCreatePyPkgCopyPkgFile % (strPkgFile, strPkgDir));
shutil.copy( strPkgFile, strPkgDir );
# Create a packet init files if there wasn't one
strPkgIniFile = os.path.normpath(os.path.join(strPkgDir, "__init__.py"));
if os.path.exists( strPkgIniFile ) and os.path.isfile( strPkgIniFile ):
return (bOk, strMsg);
strPyScript = "__all__ = [";
strDelimiter = "";
for strPkgFile in vListPkgFiles:
if os.path.exists( strPkgFile ) and os.path.isfile( strPkgFile ):
strBaseName = os.path.basename( strPkgFile );
nPos = strBaseName.find( "." );
if nPos != -1:
strBaseName = strBaseName[ 0 : nPos ];
strPyScript += "%s\"%s\"" % (strDelimiter, strBaseName);
strDelimiter = ",";
strPyScript += "]\n";
strPyScript += "for x in __all__:\n";
strPyScript += "\t__import__('%s.' + x)" % strPkgName;
if bDbg:
print(strMsgCreatePyPkgInitFile % strPkgIniFile);
file = open( strPkgIniFile, "w" );
file.write( strPyScript );
file.close();
return (bOk, strMsg);
#++---------------------------------------------------------------------------
# Details: Copy the lldb.py file into the lldb package directory and rename
# to __init_.py.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# vstrCfgBldDir - (R) Config directory path.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def copy_lldbpy_file_to_lldb_pkg_dir( vDictArgs, vstrFrameworkPythonDir, vstrCfgBldDir ):
dbg = utilsDebug.CDebugFnVerbose( "Python script copy_lldbpy_file_to_lldb_pkg_dir()" );
bOk = True;
bDbg = vDictArgs.has_key( "-d" );
strMsg = "";
strSrc = os.path.join(vstrCfgBldDir, "lldb.py");
strSrc = os.path.normcase( strSrc );
strDst = os.path.join(vstrFrameworkPythonDir, "__init__.py");
strDst = os.path.normcase( strDst );
if not os.path.exists( strSrc ):
strMsg = strErrMsgLLDBPyFileNotNotFound % strSrc;
return (bOk, strMsg);
try:
if bDbg:
print(strMsgCopyLLDBPy % (strSrc, strDst));
shutil.copyfile( strSrc, strDst );
except IOError as e:
bOk = False;
strMsg = "I/O error( %d ): %s %s" % (e.errno, e.strerror, strErrMsgCpLldbpy);
if e.errno == 2:
strMsg += " Src:'%s' Dst:'%s'" % (strSrc, strDst);
except:
bOk = False;
strMsg = strErrMsgUnexpected % sys.exec_info()[ 0 ];
return (bOk, strMsg);
#++---------------------------------------------------------------------------
# Details: Make the symbolic link on a Windows platform.
# Args: vstrSrcFile - (R) Source file name.
# vstrTargetFile - (R) Destination file name.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def make_symlink_windows( vstrSrcPath, vstrTargetPath ):
print "Making symlink from %s to %s" % (vstrSrcPath, vstrTargetPath);
dbg = utilsDebug.CDebugFnVerbose( "Python script make_symlink_windows()" );
bOk = True;
strErrMsg = "";
try:
csl = ctypes.windll.kernel32.CreateHardLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
if csl(vstrTargetPath, vstrSrcPath, 0) == 0:
raise ctypes.WinError()
except Exception as e:
if e.errno != 17:
bOk = False;
strErrMsg = "WinError( %d ): %s %s" % (e.errno, e.strerror, strErrMsgMakeSymlink);
strErrMsg += " Src:'%s' Target:'%s'" % (vstrSrcPath, vstrTargetPath);
return (bOk, strErrMsg);
#++---------------------------------------------------------------------------
# Details: Make the symbolic link on a UNIX style platform.
# Args: vstrSrcFile - (R) Source file name.
# vstrTargetFile - (R) Destination file name.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def make_symlink_other_platforms( vstrSrcPath, vstrTargetPath ):
dbg = utilsDebug.CDebugFnVerbose( "Python script make_symlink_other_platforms()" );
bOk = True;
strErrMsg = "";
try:
os.symlink( vstrSrcPath, vstrTargetPath );
except OSError as e:
bOk = False;
strErrMsg = "OSError( %d ): %s %s" % (e.errno, e.strerror, strErrMsgMakeSymlink);
strErrMsg += " Src:'%s' Target:'%s'" % (vstrSrcPath, vstrTargetPath);
except:
bOk = False;
strErrMsg = strErrMsgUnexpected % sys.exec_info()[ 0 ];
return (bOk, strErrMsg);
#++---------------------------------------------------------------------------
# Details: Make the symbolic link.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# vstrSrcFile - (R) Source file name.
# vstrTargetFile - (R) Destination file name.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def make_symlink( vDictArgs, vstrFrameworkPythonDir, vstrSrcFile, vstrTargetFile ):
dbg = utilsDebug.CDebugFnVerbose( "Python | |
XXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXX XXXX XXXXX XXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XXXXXX XXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXX XXX XXX XXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXX XX
X XXXXXX XXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXX XXXXXX XXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXX XX
X XXXXXX XXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXX XXXXX XXXXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXX XXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXX XXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XXX XXX XXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXX XXX XXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXX XXXX XXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXX XXX XXXXXXXX XXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXX XXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XXXXXX XXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXX XXXX XXXXX XXXXXX XXXXXXX XXXXXXXX XXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXX XXXXX XXXXXXX XXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXX XXXXX XXXXXXXXX XXX XXX XXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XXXXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXX XXXX XXXXX XXXXXXXXX XXX XXX XXXXXX XX
X XXXXXX XXXXXXX XXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXX XXXXXXXXXXX XXXXXXXX XXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXX XXXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXX XXXXXX XXXXXXXX XXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXX XXXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XXX XXX XXXXXX XX
X XXXXXX XXXXXXX XXXXXXX XXXXXXXX XXXXXXX XXXX XXXXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XXXXX XXXX XX
X XXXXXX XXXXXXX XXXXXXXXX XXXX XXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX XXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXX XXXXXX XXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXXX XXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXXXXX XXXXXXXX XXXX XXXXX XXXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXX XXXXXXXX XXXX XXXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXXXXX XXXX XXXXX XXXXXXXXXX XXXXXXXXX XX
X XXXXXX XXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XX
X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXX | |
large reach sets.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
Returns:
A list of ScenarioConfigs of scenario 4(b) with subset sets.
"""
scenario_config_list = []
for num_large_sets in [1, int(num_sets / 2), num_sets - 1]:
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'subset',
'universe_size:' + str(universe_size),
'order:' + str(order),
'num_large_sets:' + str(num_large_sets),
'num_small_sets:' + str(num_sets - num_large_sets),
'large_set_size:' + str(large_set_size),
'small_set_size:' + str(small_set_size),
]),
set_generator_factory=(
set_generator.SubSetGenerator
.get_generator_factory_with_num_and_size(
order=order,
universe_size=universe_size,
num_large_sets=num_large_sets,
num_small_sets=num_sets - num_large_sets,
large_set_size=large_set_size,
small_set_size=small_set_size)))
)
return scenario_config_list
def _generate_configs_scenario_5(num_sets, small_set_size,
large_set_size, order, shared_prop_list):
"""Generate configs of Scenario 5.
In this scenario, the sets are sequentially correlated.
See Scenario 5: Sequentially correlated campaigns for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-5-sequentially-correlated-campaigns
Args:
num_sets: the number of sets.
small_set_size: the reach of the small reach sets.
large_set_size: the reach of the large reach sets.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
shared_prop_list: a sequence of the shared proportion of sequentially
correlated sets.
Returns:
A list of ScenarioConfigs of scenario 5 Sequentiall correlated sets.
"""
name_to_choices_of_set_size_list = {
**_get_default_name_to_choices_of_set_size_list(
small_set_size, large_set_size, num_sets
),
'large_then_last_small': [large_set_size] * (num_sets - 1) + [
small_set_size],
'all_large_except_middle_small': (
[large_set_size] * int(num_sets / 2) + [small_set_size]
+ [large_set_size] * (num_sets - 1 - int(num_sets / 2))),
'1st_large_then_small': [large_set_size] + [small_set_size] * (
num_sets - 1),
'all_small_except_middle_large': (
[small_set_size] * int(num_sets / 2) + [large_set_size]
+ [small_set_size] * (num_sets - 1 - int(num_sets / 2))),
'1st_half_large_2nd_half_small': (
[large_set_size] * int(num_sets / 2)
+ [small_set_size] * (num_sets - int(num_sets / 2))),
'repeated_small_large': (
[small_set_size, large_set_size] * int(num_sets / 2)
+ ([] if num_sets % 2 == 0 else [small_set_size]))
}
scenario_config_list = []
for correlated_sets in (set_generator.CORRELATED_SETS_ONE,
set_generator.CORRELATED_SETS_ALL):
for shared_prop in shared_prop_list:
for set_type, set_size_list in name_to_choices_of_set_size_list.items():
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'sequentially_correlated',
'order:' + str(order),
'correlated_sets:' + str(correlated_sets),
'shared_prop:' + str(shared_prop),
'set_type:' + str(set_type),
'large_set_size:' + str(large_set_size),
'small_set_size:' + str(small_set_size)
]),
set_generator_factory=(
set_generator.SequentiallyCorrelatedSetGenerator.
get_generator_factory_with_set_size_list(
order=order,
correlated_sets=correlated_sets,
shared_prop=shared_prop,
set_size_list=set_size_list)))
)
return scenario_config_list
def _generate_freq_configs_scenario_1(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 1.
See Frequency Scenario 1: Homogeneous user activities within a publisher for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-1-homogeneous-user-activities-within-a-publisher-1
Args:
universe_size: the universal size of reach
num_sets: the number of sets
set_size: size of each set, assuming they're all equal
Returns:
A list of ScenarioConfigs of freq scenario 1 Homogeneous user activities within a publisher correlated sets.
"""
freq_rate_lists = [0.5, 1, 1.5, 2]
freq_cap_lists = [3, 5, 10]
scenario_config_list = []
for freq_rate, freq_cap in itertools.product(freq_rate_lists, freq_cap_lists):
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'homogeneous',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'freq_rate:' + str(freq_rate),
'freq_cap:' + str(freq_cap),
]),
set_generator_factory=(
frequency_set_generator.HomogeneousMultiSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=num_sets,
set_size=set_size, freq_rates=[freq_rate]*num_sets,
freq_cap=freq_cap
))),
)
return scenario_config_list
def _generate_freq_configs_scenario_2(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 2.
See Frequency Scenario 2: Heterogeneous user frequency.:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-2-heterogeneous-user-frequency-1
Args:
universe_size: the universal size of reach
num_sets: the number of sets
set_size: size of each set, assuming they're all equal
Returns:
A list of ScenarioConfigs of freq scenario 2 heterogeneous user frequency.
"""
distribution_rate_lists = [0.5, 1, 1.5, 2]
freq_cap_lists = [3, 5, 10]
scenario_config_list = []
for distribution_rate, freq_cap in itertools.product(
distribution_rate_lists, freq_cap_lists):
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'heterogeneous',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'distribution_rate:' + str(distribution_rate),
'freq_cap:' + str(freq_cap),
]),
set_generator_factory=(
frequency_set_generator.HeterogeneousMultiSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=num_sets,
set_size=set_size,
gamma_params=[[1,distribution_rate]]*num_sets,
freq_cap=freq_cap
))),
)
return scenario_config_list
def _generate_freq_configs_scenario_3(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 3.
This is a stress testing, in which each publisher serves a fixed number of
impressions to every reached id.
See Frequency Scenario 3: Per-publisher frequency capping:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-3-per-publisher-frequency-capping
Args:
universe_size: the universal size of reach.
num_sets: the number of sets.
set_size: size of each set, assuming they're all equal.
Returns:
A list of ScenarioConfigs of freq scenario 3 per-publisher frequency
capping.
"""
frequency_list = [2, 3, 5, 10]
scenario_config_list = []
for frequency in frequency_list:
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'publisher_constant_frequency',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'frequency:' + str(frequency),
]),
set_generator_factory=(
frequency_set_generator.PublisherConstantFrequencySetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size,
num_sets=num_sets,
set_size=set_size,
frequency=frequency)
)),
)
return scenario_config_list
def _complete_frequency_test_with_selected_parameters(
num_runs=NUM_RUNS_VALUE,
universe_size=FREQ_UNIVERSE_SIZE,
num_sets=NUM_SETS_VALUE_FREQ,
set_size=SET_SIZE_FOR_FREQ):
"""Generate configurations with handy selected parameters for scenarios.
This evaluation covers the frequency simulation scenarios
Args:
num_runs: the number of runs per scenario * parameter setting.
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
set_size: reached size of each publisher, assuming all publishers have the
same size
Returns:
An EvaluationConfig.
"""
scenario_config_list = []
# Scenario 1. Homogeneous user activities within a publisher
scenario_config_list += _generate_freq_configs_scenario_1(
universe_size, num_sets, set_size)
# Scenario 2. Heterogeneous user frequency
scenario_config_list += _generate_freq_configs_scenario_2(
universe_size, num_sets, set_size)
# Scenario 3. Per-publisher frequency capping.
scenario_config_list += _generate_freq_configs_scenario_3(
universe_size, num_sets, set_size)
return EvaluationConfig(
name='complete_frequency_test_with_selected_parameters',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _complete_test_with_selected_parameters(
num_runs=NUM_RUNS_VALUE,
universe_size=UNIVERSE_SIZE_VALUE,
num_sets=NUM_SETS_VALUE,
order=set_generator.ORDER_RANDOM,
small_set_size_rate=SMALL_REACH_RATE_VALUE,
large_set_size_rate=LARGE_REACH_RATE_VALUE,
remarketing_rate=REMARKETING_RATE_VALUE,
shared_prop_list=SHARED_PROP_LIST_VALUE):
"""Generate configurations with handy selected parameters for scenarios.
This evaluation covers the reach simulation scenarios
Args:
num_runs: the number of runs per scenario * parameter setting.
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
user_activity_assciation: user activity association used in the Exponential
Bow model. Should be one of the defined user activity association defined
by the set_generator.USER_ACTIVITY_ASSOCIATION_XXX.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
small_set_size_rate: the reach percentage of the small reach sets.
large_set_size_rate: the reach percentage of the large reach sets.
shared_prop_list: a sequence of the shared proportion of sequentially
correlated sets.
Returns:
An EvaluationConfig.
"""
scenario_config_list = []
small_set_size = int(small_set_size_rate * universe_size)
large_set_size = int(large_set_size_rate * universe_size)
# Scenario 1. Independent publishers
scenario_config_list += _generate_configs_scenario_1_2(
universe_size, num_sets, small_set_size, large_set_size)
# Scenario 2. publishers independently serve a remarketing list
scenario_config_list += _generate_configs_scenario_1_2(
universe_size, num_sets, small_set_size, large_set_size, remarketing_rate)
# Scenario 3 (a). Exponential bow, independent user behavior.
scenario_config_list += _generate_configs_scenario_3(
universe_size, num_sets, small_set_size, large_set_size,
set_generator.USER_ACTIVITY_ASSOCIATION_INDEPENDENT)
# Scenario 3 (b). Exponential bow, identical user behavior.
scenario_config_list += _generate_configs_scenario_3(
universe_size, num_sets, small_set_size, large_set_size,
set_generator.USER_ACTIVITY_ASSOCIATION_IDENTICAL)
# Scenario 4(a). Fully-overlapped.
scenario_config_list += _generate_configs_scenario_4a(
universe_size, num_sets, small_set_size, large_set_size)
# Scenario 4(b). Subset campaigns.
scenario_config_list += _generate_configs_scenario_4b(
universe_size, num_sets, small_set_size, large_set_size, order)
# Scenario 5. Sequentially correlated campaigns
scenario_config_list += _generate_configs_scenario_5(
num_sets, small_set_size, large_set_size, order,
shared_prop_list)
return EvaluationConfig(
name='complete_test_with_selected_parameters',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _stress_test_cardinality_global_dp(universe_size=None,
num_runs=NUM_RUNS_VALUE):
"""Stress test for cardinality estimator under global DP."""
# The universe_size argument is included to conform to the run_evaluation
# module.
_ = universe_size
scenario_config_list = []
for scenario_id, reach in enumerate(sorted(REACH_STRESS_TEST)):
scenario_config_list.append(ScenarioConfig(
name=f'{scenario_id}-reach:{reach}',
set_generator_factory=(
set_generator.DisjointSetGenerator
.get_generator_factory_with_set_size_list(
set_sizes=[reach]))))
return EvaluationConfig(
name='global_dp_stress_test',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _frequency_end_to_end_test(universe_size=10000, num_runs=NUM_RUNS_VALUE):
"""EvaluationConfig of end-to-end test of frequency evaluation code."""
num_sets = 3
set_size = int(universe_size * REACH_RATE_FREQ_END_TO_END_TEST)
freq_rates = [1, 2, 3]
freq_cap = 5
return EvaluationConfig(
name='frequency_end_to_end_test',
num_runs=num_runs,
scenario_config_list=[
ScenarioConfig(
name='-'.join([
'subset',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets)
]),
set_generator_factory=(
frequency_set_generator.HomogeneousMultiSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size,
num_sets=num_sets,
set_size=set_size,
freq_rates=freq_rates,
freq_cap=freq_cap)))]
)
def _generate_evaluation_configs():
return (
_smoke_test,
_complete_test_with_selected_parameters,
_stress_test_cardinality_global_dp,
_frequency_end_to_end_test,
_frequency_smoke_test,
_complete_frequency_test_with_selected_parameters,
)
def get_evaluation_config(config_name):
"""Returns the evaluation config with the specified config_name."""
configs = _generate_evaluation_configs()
valid_config_names = [c().name for c in configs]
duplicate_configs = []
for i in range(len(valid_config_names)-1):
if valid_config_names[i] in valid_config_names[(i+1):]:
duplicate_configs.append(valid_config_names[i])
if duplicate_configs:
raise ValueError("Duplicate names found in evaluation configs: {}".
format(','.join(duplicate_configs)))
config = [c for c in configs if c().name == config_name]
if not config:
raise ValueError("Invalid evaluation config: {}\n"
"Valid choices are as follows: {}".format(
config_name, ','.join(valid_config_names)))
return config[0]
def _format_epsilon(dp_type, epsilon=None, decimals=EPSILON_DECIMALS):
"""Format epsilon value to string.
Args:
dp_type: one of LOCAL_DP_STR and GLOBAL_DP_STR.
epsilon: an optional differential private parameter. By default set to None.
decimals: an integer value which set the number of decimal points of the
epsilon to keep. By default, set to EPSILON_DECIMALS.
Returns:
A string representation of epsilon.
Raises:
ValueError: if dp_type is not one of 'local' and 'global'.
"""
if epsilon is None:
if dp_type == GLOBAL_DP_STR:
return NO_GLOBAL_DP_STR
elif dp_type == LOCAL_DP_STR:
return NO_LOCAL_DP_STR
else:
raise ValueError(f'dp_type should be one of "{GLOBAL_DP_STR}" and '
f'"{LOCAL_DP_STR}".')
str_format = dp_type + '_' + '{:0.' + str(decimals) + 'f}'
return str_format.format(float(epsilon))
def _format_privacy_parameters(dp_type, epsilon=None, delta=None, num_queries=1,
noise_type=None,
epsilon_decimals=EPSILON_DECIMALS,
delta_decimals=DELTA_DECIMALS):
"""Format privacy parameters to string.
Args:
dp_type: one of LOCAL_DP_STR and GLOBAL_DP_STR.
| |
"""
Personal Reader Emotion Topic model,
extended from TTM
<EMAIL>
"""
import numpy as np
from scipy.sparse import lil_matrix
from scipy.special import gammaln
from datetime import datetime
from datetime import timedelta
from tqdm import tqdm
import cPickle
from functions import probNormalize, multinomial, logfactorial, probNormalizeLog, logfactorialSparse
from dataDUE_generator import dataDUE
np.seterr(divide='raise')
class PRET(object):
def __init__(self, K, G):
"""
:param K: # topics
:param G: # groups
"""
# model hyperparameters #
self.alpha = 0.1 # topic distribution prior
self.beta = 0.01 # topic-word distribution prior
self.gamma = 0.1 # (topic * group)-emotion distribution prior
self.delta = 0.01 # background-vs-topic distribution prior
self.zeta = 0.1 # user-group distribution
# data dimension #
self.E = 0 # number of emotions
self.K = K # number of topics
self.G = G # number of groups
self.D = 0 # number of documents
self.Nd = [] # number of words of documents (varying over docs)
self.Md = [] # number of emotions of documents (varying over docs)
self.V = 0 # size of vocabulary
self.U = 0 # number of users
# model latent variables #
self.theta = None # corpus-level topic distribution [self.K]
self.pi = None # background-vs-topic distribution
self.eta = None # topic-emotion distribution [self.K, self.G, self.E]
self.phiB = None # background word distribution [self.V]
self.phiT = None # topic-word distribution [self.K, self.V]
self.psi = None # user-group distribution [self.U, self.G]
self.z = None # document-level topic [self.D]
self.y = None # word-level background-vs-topic indicator "[self.D, self.Nd]"
self.x = None # emoticon-level group indicator "[self.D, self.Md]"
# intermediate variables for fitting #
self.YI = None # count of background-vs-topic indicator over corpus [2]
self.Y0V = None # count of background word [self.V]
self.Y1TV = None # count of topic-word cooccurrences [self.K, self.V]
self.TI = None # count of topic [self.K]
self.TXE = None # count of topic-group-emotion cooccurrences [self.K, self.G, self.E]
self.UX = None # count of user-group cooccurrences [self.U, self.G]
self.DY1V = None # count of document-level topic-specific word lil_matrix [self.D, self.V]
self.DXE = None # count of document-level group-emotion cooccurrences [self.G, self.E]
# save & restore #
self.checkpoint_file = "ckpt/PRET"
def fit(self, dataDUE, dataW, corpus=None, alpha=0.1, beta=0.01, gamma=0.1, delta=0.01, zeta=0.1, max_iter=500, resume=None):
"""
Collapsed Gibbs sampler
:param dataDUE: data generator for each document id, generate [[reader_id], [emoticon]]
:param dataW: Indexed corpus np.ndarray([self.D, self.V]) scipy.sparse.csr_matrix
"""
self._setHyperparameters(alpha, beta, gamma, delta, zeta)
if corpus is None:
dataToken = self._matrix2corpus(dataW=dataW)
else:
dataToken = corpus
self._setDataDimension(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken)
if resume is None:
self._initialize(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken)
else:
self._restoreCheckPoint(filename=resume)
self._intermediateParameterInitialize(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken)
ppl_initial = self._ppl(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken)
print "before training, ppl: %s" % str(ppl_initial)
## Gibbs Sampling ##
for epoch in range(max_iter):
self._GibbsSamplingLocal(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken, epoch=epoch)
self._estimateGlobal(dataDUE)
ppl = self._ppl(dataDUE=dataDUE, dataW=dataW, dataToken=dataToken)
print "epoch: %d, ppl: %s" % (epoch, str(ppl))
self._saveCheckPoint(epoch, ppl)
def _setHyperparameters(self, alpha, beta, gamma, delta, zeta):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.delta = delta
self.zeta = zeta
""" copied from ETM """
def _matrix2corpus(self, dataW):
start = datetime.now()
dataToken = []
for d in range(dataW.shape[0]):
docW = dataW.getrow(d)
docToken = []
for w_id in docW.indices:
w_freq = docW[0, w_id]
for i in range(w_freq):
docToken.append(w_id)
dataToken.append(docToken)
duration = datetime.now() - start
print "_matrix2corpus() takes %fs" % duration.total_seconds()
return dataToken
def _setDataDimension(self, dataDUE, dataW, dataToken):
self.E = dataDUE.E
self.U = dataDUE.U
self.Md = dataDUE.Md
self.D = dataW.shape[0]
self.Nd = map(lambda x: len(x), dataToken)
self.V = dataW.shape[1]
def _initialize(self, dataDUE, dataW, dataToken):
start = datetime.now()
self.theta = probNormalize(np.random.random([self.K]))
self.pi = probNormalize(np.random.random([2]))
self.eta = probNormalize(np.random.random([self.K, self.G, self.E]))
self.phiB = probNormalize(np.random.random([self.V]))
self.phiT = probNormalize(np.random.random([self.K, self.V]))
self.psi = probNormalize(np.random.random([self.U, self.G]))
self.z = np.zeros([self.D], dtype=np.int8)
self.y = []
self.x = []
for d in range(self.D):
self.z[d] = multinomial(self.theta)
self.y.append(multinomial(self.pi, self.Nd[d]))
doc_x = []
for m in range(self.Md[d]):
u = np.random.randint(0,self.U)
doc_x.append(multinomial(self.psi[u]))
self.x.append(np.array(doc_x, dtype=np.int8))
duration = datetime.now() - start
print "_initialize() takes %fs" % duration.total_seconds()
def _intermediateParameterInitialize(self, dataDUE, dataW, dataToken):
self.YI = np.zeros([2], dtype=np.int32)
self.Y0V = np.zeros([self.V], dtype=np.int32)
self.Y1TV = np.zeros([self.K, self.V], dtype=np.int32)
self.TI = np.zeros([self.K], dtype=np.int32)
self.TXE = np.zeros([self.K, self.G, self.E], dtype=np.int32)
self.UX = np.zeros([self.U, self.G], dtype=np.int16)
self.DY1V = lil_matrix((self.D, self.V), dtype = np.int8)
self.DXE = np.zeros([self.D, self.G, self.E], dtype = np.int32)
for d, [doc_u, doc_e] in dataDUE.generate(batch_size=1, random_shuffle=False):
self.TI[self.z[d]] += 1
docToken = dataToken[d]
doc_z = self.z[d]
doc_y = self.y[d]
doc_x = self.x[d]
for n in range(self.Nd[d]):
w = docToken[n]
w_y = doc_y[n]
self.YI[w_y] += 1
if w_y == 0:
self.Y0V[w] += 1
else:
self.Y1TV[doc_z, w] += 1
self.DY1V[d, w] += 1
for m in range(self.Md[d]):
u = doc_u[m]
e = doc_e[m]
x = doc_x[m]
self.TXE[doc_z, x, e] += 1
self.UX[u, x] += 1
self.DXE[d, x, e] += 1
def _GibbsSamplingLocal(self, dataDUE, dataW, dataToken, epoch):
"""
Gibbs sampling word-level background-vs-topic
document-level topic
emoticon-level group
"""
pbar = tqdm(dataDUE.generate(batch_size=1, random_shuffle=False),
total = self.D,
desc = '({0:^3})'.format(epoch))
for d, [doc_u, doc_e] in pbar:
docToken = dataToken[d]
docW = dataW[d]
doc_Nd = self.Nd[d]
doc_Md = self.Md[d]
# update document-level topic #
self._doc_z_update(d, doc_u, doc_e, docW, docToken)
# update word-level background-vs-topic #
Y1T = np.sum(self.Y1TV, axis=1)
for n in xrange(doc_Nd):
Y1T = self._y_update(d, n, doc_u, doc_e, docW, docToken, Y1T)
# update emoticon-level group #
TX = np.sum(self.TXE, axis=-1)
for m in xrange(doc_Md):
TX = self._x_update(d, m, doc_u, doc_e, docW, docToken, TX)
def _doc_z_update(self, d, doc_u, doc_e, docW, docToken):
""" update document-level topic """
doc_z = self.z[d]
doc_XE = self.DXE[d]
doc_Y1V = self.DY1V.getrow(d)
doc_Y1V_array = doc_Y1V.toarray().squeeze()
# calculate leave-one out statistics #
TI_no_d, TXE_no_d, Y1TV_no_d = self.TI, self.TXE, self.Y1TV
TI_no_d[doc_z] += -1
TXE_no_d[doc_z,:,:] += - doc_XE
Y1TV_no_d[doc_z,:] += - doc_Y1V_array
# conditional probability #
prob_doc_z = self._prob_doc_z(TI_no_d, TXE_no_d, Y1TV_no_d, doc_XE, doc_Y1V)
# new sampled result #
doc_z_new = multinomial(prob_doc_z)
# update #
self.z[d] = doc_z_new
TI_no_d[doc_z_new] += 1
TXE_no_d[doc_z_new, :, :] += doc_XE
Y1TV_no_d[doc_z_new, :, :] += doc_Y1V_array
self.TI, self.TXE, self.Y1TV = TI_no_d, TXE_no_d, Y1TV_no_d
def _prob_doc_z(self, TI_no_d, TXE_no_d, Y1TV_no_d, doc_XE, doc_Y1V):
"""
calculate conditional probability for document-level topic doc_z
:param: doc_Y1V: lil_matrix((1, self.V), dtype=int8)
"""
# alpha #
log_prob_alpha = np.log(TI_no_d + self.alpha)
# gamma # (without sparsity, directly calculating log gamma function)
a = TXE_no_d + self.gamma + doc_XE
log_prob_gamma = np.sum(np.sum(gammaln(a), axis=-1) - gammaln(np.sum(a, axis=-1)), axis=-1)
# beta # (with sparsity)
b = Y1TV_no_d + self.beta
log_prob_beta = np.sum(logfactorialSparse(doc_Y1V, b), axis=-1) - logfactorial(doc_Y1V.sum(), np.sum(b, axis=-1))
prob_doc_z = probNormalizeLog(log_prob_alpha + log_prob_gamma + log_prob_beta)
return prob_doc_z
def _y_update(self, d, n, doc_u, doc_e, docW, docToken, Y1T):
"""
update word-level background-vs-topic indicator
"""
w = docToken[n]
w_y = self.y[d][n]
doc_z = self.z[d]
# calculate leave-one out statistics #
YI_no_dn_y, Y0V_no_dn_y, Y1TV_no_dn_y = self.YI, self.Y0V, self.Y1TV
Y1T_no_dn_y = Y1T
YI_no_dn_y[w_y] += -1
if w_y == 0:
Y0V_no_dn_y[w] += -1
else:
Y1TV_no_dn_y[doc_z, w] += -1
Y1T_no_dn_y[doc_z] += -1
self.DY1V[d, w] += -1 # delete w_y == 1 word
# conditional probability #
prob_w_y_unnorm = np.zeros([2], dtype=np.float32)
prob_w_y_unnorm[0] = (self.delta + YI_no_dn_y[0]) * (self.beta + Y0V_no_dn_y[w]) / \
(self.V * self.beta + YI_no_dn_y[0])
prob_w_y_unnorm[1] = (self.delta + YI_no_dn_y[1]) * (self.beta + Y1TV_no_dn_y[doc_z, w]) / \
(self.V * self.beta + Y1T_no_dn_y[doc_z])
prob_w_y = probNormalize(prob_w_y_unnorm)
# new sampled result #
w_y_new = multinomial(prob_w_y)
# update #
self.y[d][n] = w_y_new
YI_no_dn_y[w_y_new] += 1
if w_y_new == 0:
Y0V_no_dn_y[w] += 1
else:
Y1TV_no_dn_y[doc_z, w] += 1
Y1T_no_dn_y[doc_z] += 1
self.DY1V[d, w] += 1 # add back word with w_y_new == 1
self.YI, self.Y0V, self.Y1TV = YI_no_dn_y, Y0V_no_dn_y, Y1TV_no_dn_y
Y1T = Y1T_no_dn_y
return Y1T
def _x_update(self, d, m, doc_u, doc_e, docW, docToken, TX):
""" update emoticon-level group indicator"""
doc_z = self.z[d]
u = doc_u[m]
e = doc_e[m]
x = self.x[d][m]
# calculate leave-one out statistics #
TXE_no_dm, UX_no_dm, TX_no_dm = self.TXE, self.UX, TX
TXE_no_dm[doc_z, x, e] += -1
UX_no_dm[u, x] += -1
TX_no_dm[doc_z, x] += -1
# calculate conditional probability #
prob_gamma = (self.gamma + TXE_no_dm[doc_z,:,e]) / (self.E * self.gamma + TX_no_dm[doc_z, :])
prob_zeta = self.zeta + UX_no_dm[u, :]
prob_x = probNormalize(prob_gamma * prob_zeta)
# new sampled result #
x_new = multinomial(prob_x)
# update #
self.x[d][m] = x_new
TXE_no_dm[doc_z, x_new, e] += 1
UX_no_dm[u, x_new] | |
else:
calp1 = max(-1.0, x)
salp1 = math.sqrt(1 - Math.sq(calp1))
else:
# Estimate alp1, by solving the astroid problem.
#
# Could estimate alpha1 = theta + pi/2, directly, i.e.,
# calp1 = y/k; salp1 = -x/(1+k); for _f >= 0
# calp1 = x/(1+k); salp1 = -y/k; for _f < 0 (need to check)
#
# However, it's better to estimate omg12 from astroid and use
# spherical formula to compute alp1. This reduces the mean number of
# Newton iterations for astroid cases from 2.24 (min 0, max 6) to 2.12
# (min 0 max 5). The changes in the number of iterations are as
# follows:
#
# change percent
# 1 5
# 0 78
# -1 16
# -2 0.6
# -3 0.04
# -4 0.002
#
# The histogram of iterations is (m = number of iterations estimating
# alp1 directly, n = number of iterations estimating via omg12, total
# number of trials = 148605):
#
# iter m n
# 0 148 186
# 1 13046 13845
# 2 93315 102225
# 3 36189 32341
# 4 5396 7
# 5 455 1
# 6 56 0
#
# Because omg12 is near pi, estimate work with omg12a = pi - omg12
k = Geodesic.Astroid(x, y)
if self._f >= 0:
omg12a = lamscale * -x * k/(1 + k)
else:
omg12a = lamscale * -y * (1 + k)/k
somg12 = math.sin(omg12a); comg12 = -math.cos(omg12a)
# Update spherical estimate of alp1 using omg12 instead of lam12
salp1 = cbet2 * somg12
calp1 = sbet12a - cbet2 * sbet1 * Math.sq(somg12) / (1 - comg12)
salp1, calp1 = Geodesic.SinCosNorm(salp1, calp1)
return sig12, salp1, calp1, salp2, calp2
# return lam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps,
# domg12, dlam12
def Lambda12(self, sbet1, cbet1, sbet2, cbet2, salp1, calp1, diffp,
# Scratch areas of the right size
C1a, C2a, C3a):
if sbet1 == 0 and calp1 == 0:
# Break degeneracy of equatorial line. This case has already been
# handled.
calp1 = -Geodesic.tiny_
# sin(alp1) * cos(bet1) = sin(alp0)
salp0 = salp1 * cbet1
calp0 = math.hypot(calp1, salp1 * sbet1) # calp0 > 0
# real somg1, comg1, somg2, comg2, omg12, lam12
# tan(bet1) = tan(sig1) * cos(alp1)
# tan(omg1) = sin(alp0) * tan(sig1) = tan(omg1)=tan(alp1)*sin(bet1)
ssig1 = sbet1; somg1 = salp0 * sbet1
csig1 = comg1 = calp1 * cbet1
ssig1, csig1 = Geodesic.SinCosNorm(ssig1, csig1)
# SinCosNorm(somg1, comg1); -- don't need to normalize!
# Enforce symmetries in the case abs(bet2) = -bet1. Need to be careful
# about this case, since this can yield singularities in the Newton
# iteration.
# sin(alp2) * cos(bet2) = sin(alp0)
if cbet2 != cbet1:
salp2 = salp0 / cbet2
else:
salp2 = salp1
# calp2 = sqrt(1 - sq(salp2))
# = sqrt(sq(calp0) - sq(sbet2)) / cbet2
# and subst for calp0 and rearrange to give (choose positive sqrt
# to give alp2 in [0, pi/2]).
if cbet2 != cbet1 or abs(sbet2) != -sbet1:
if cbet1 < -sbet1:
calp2 = math.sqrt(Math.sq(calp1 * cbet1) +
(cbet2 - cbet1) * (cbet1 + cbet2)) / cbet2
else:
calp2 = math.sqrt(Math.sq(calp1 * cbet1) +
(sbet1 - sbet2) * (sbet1 + sbet2)) / cbet2
else:
calp2 = abs(calp1)
# tan(bet2) = tan(sig2) * cos(alp2)
# tan(omg2) = sin(alp0) * tan(sig2).
ssig2 = sbet2; somg2 = salp0 * sbet2
csig2 = comg2 = calp2 * cbet2
ssig2, csig2 = Geodesic.SinCosNorm(ssig2, csig2)
# SinCosNorm(somg2, comg2); -- don't need to normalize!
# sig12 = sig2 - sig1, limit to [0, pi]
sig12 = math.atan2(max(csig1 * ssig2 - ssig1 * csig2, 0.0),
csig1 * csig2 + ssig1 * ssig2)
# omg12 = omg2 - omg1, limit to [0, pi]
omg12 = math.atan2(max(comg1 * somg2 - somg1 * comg2, 0.0),
comg1 * comg2 + somg1 * somg2)
# real B312, h0
k2 = Math.sq(calp0) * self._ep2
eps = k2 / (2 * (1 + math.sqrt(1 + k2)) + k2)
self.C3f(eps, C3a)
B312 = (Geodesic.SinCosSeries(True, ssig2, csig2, C3a, Geodesic.nC3_-1) -
Geodesic.SinCosSeries(True, ssig1, csig1, C3a, Geodesic.nC3_-1))
h0 = -self._f * self.A3f(eps)
domg12 = salp0 * h0 * (sig12 + B312)
lam12 = omg12 + domg12
if diffp:
if calp2 == 0:
dlam12 = - 2 * math.sqrt(1 - self._e2 * Math.sq(cbet1)) / sbet1
else:
dummy, dlam12, dummy, dummy, dummy = self.Lengths(
eps, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2, False, C1a, C2a)
dlam12 /= calp2 * cbet2
else:
dlam12 = Math.nan
return (lam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps,
domg12, dlam12)
# return a12, s12, azi1, azi2, m12, M12, M21, S12
def GenInverse(self, lat1, lon1, lat2, lon2, outmask):
a12 = s12 = azi1 = azi2 = m12 = M12 = M21 = S12 = Math.nan # return vals
outmask &= Geodesic.OUT_ALL
lon1 = Geodesic.AngNormalize(lon1)
lon12 = Geodesic.AngNormalize(Geodesic.AngNormalize(lon2) - lon1)
# If very close to being on the same meridian, then make it so.
# Not sure this is necessary...
lon12 = Geodesic.AngRound(lon12)
# Make longitude difference positive.
if lon12 >= 0:
lonsign = 1
else:
lonsign = -1
lon12 *= lonsign
if lon12 == 180:
lonsign = 1
# If really close to the equator, treat as on equator.
lat1 = Geodesic.AngRound(lat1)
lat2 = Geodesic.AngRound(lat2)
# Swap points so that point with higher (abs) latitude is point 1
if abs(lat1) >= abs(lat2):
swapp = 1
else:
swapp = -1
lonsign *= -1
lat2, lat1 = lat1, lat2
# Make lat1 <= 0
if lat1 < 0:
latsign = 1
else:
latsign = -1
lat1 *= latsign
lat2 *= latsign
# Now we have
#
# 0 <= lon12 <= 180
# -90 <= lat1 <= 0
# lat1 <= lat2 <= -lat1
#
# longsign, swapp, latsign register the transformation to bring the
# coordinates to this canonical form. In all cases, 1 means no change was
# made. We make these transformations so that there are few cases to
# check, e.g., on verifying quadrants in atan2. In addition, this
# enforces some symmetries in the results returned.
# real phi, sbet1, cbet1, sbet2, cbet2, s12x, m12x
phi = lat1 * Math.degree
# Ensure cbet1 = +epsilon at poles
sbet1 = self._f1 * math.sin(phi)
if lat1 == -90:
cbet1 = Geodesic.tiny_
else:
cbet1 = math.cos(phi)
sbet1, cbet1 = Geodesic.SinCosNorm(sbet1, cbet1)
phi = lat2 * Math.degree
# Ensure cbet2 = +epsilon at poles
sbet2 = self._f1 * math.sin(phi)
if abs(lat2) == 90:
cbet2 = Geodesic.tiny_
else:
cbet2 = math.cos(phi)
sbet2, cbet2 = Geodesic.SinCosNorm(sbet2, cbet2)
# If cbet1 < -sbet1, then cbet2 - cbet1 is a sensitive measure of the
# |bet1| - |bet2|. Alternatively (cbet1 >= -sbet1), abs(sbet2) + sbet1 is
# a better measure. This logic is used in assigning calp2 in Lambda12.
# Sometimes these quantities vanish and in that case we force bet2 = +/-
# bet1 exactly. An example where is is necessary is the inverse problem
# 48.522876735459 0 -48.52287673545898293 179.599720456223079643
# which failed with Visual Studio 10 (Release and Debug)
if cbet1 < -sbet1:
if cbet2 == cbet1:
if sbet2 < 0:
sbet2 = sbet1
else:
sbet2 = -sbet1
else:
if abs(sbet2) == -sbet1:
cbet2 = cbet1
lam12 = lon12 * Math.degree
if lon12 == 180:
slam12 = 0.0
else:
slam12 = math.sin(lam12)
clam12 = math.cos(lam12) # lon12 == 90 isn't interesting
# real a12, sig12, calp1, salp1, calp2, salp2
# index zero elements of these arrays are unused
C1a = list(range(Geodesic.nC1_ + 1))
C2a = list(range(Geodesic.nC2_ + 1))
C3a = list(range(Geodesic.nC3_))
meridian = lat1 == -90 or slam12 == 0
if meridian:
# Endpoints are on a single full meridian, so the geodesic might lie on
# a meridian.
calp1 = clam12; salp1 = slam12 # Head to the target longitude
calp2 = 1; salp2 = 0 # At the | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# test_tatoeba.py
# cjktools
#
from __future__ import unicode_literals
import os
from .._common import to_unicode_stream, to_string_stream
import unittest
from six import text_type
from functools import partial
from datetime import datetime
from cjktools.resources import tatoeba, auto_format, cjkdata
from cjktools.common import sopen
from nose_parameterized import parameterized
def get_data_loc(key, suffix='_00', extension='.csv'):
script_dir = os.path.dirname(os.path.realpath(__file__))
data_loc = os.path.join(script_dir, 'sample_data/')
base_names = dict(sentences='sentences',
jpn_indices='jpn_indices',
links='links',
sentences_detailed='sentences_detailed',
edict='je_edict')
fname = base_names[key] + suffix + extension
return os.path.join(data_loc, fname)
def get_edict():
if getattr(get_edict, '_cached', None) is not None:
return get_edict._cached
with sopen(get_data_loc('edict', extension=''), mode='r') as edf:
edict = auto_format.load_dictionary(edf)
get_edict._cached = edict
return edict
class ReaderBaseCase(unittest.TestCase):
def resource_fpath(self):
return get_data_loc(self._resource_name)
def load_file(self, resource=None, **kwargs):
"""
Loads the file into a TatoebaSentenceReader object.
"""
if resource is None:
resource = self.resource_fpath()
reader = self.ReaderClass(resource, **kwargs)
return reader
class FileObjReaderMixin(object):
"""
Mixin to run the existing tests using an already opened file object.
"""
def load_file(self, resource=None, **kwargs):
if resource is None:
resource = self.resource_fpath()
with open(self.resource_fpath(), mode='r') as resource_file:
try:
resource_file.filename = self.resource_fpath()
except AttributeError:
class FileWrapper(object):
def __init__(self, fin, fpath):
self.f = fin
self.filename = fpath
def __getattr__(self, attr):
return getattr(self.f, attr)
def __iter__(self):
return iter(self.f)
resource_file = FileWrapper(resource_file,
self.resource_fpath())
r = super(FileObjReaderMixin, self).load_file(resource_file,
**kwargs)
return r
###
# Tatoeba Reader base class test
class TatoebaReaderTest(unittest.TestCase):
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
tatoeba.TatoebaReader()
###
# Tatoeba Sentence Reader tests
class TatoebaSentenceReaderTest(ReaderBaseCase):
_resource_name = 'sentences'
ReaderClass = tatoeba.TatoebaSentenceReader
def test_basic(self):
# Test that all the sentence IDs load properly
sr = self.load_file()
sent_ids = [
6381, 29390, 36809, 46235, 54432, 62093, 68807, 82526, 93620,
109744, 112733, 156245, 192227, 199398, 208975, 224758, 231440, 258289,
290943, 293946, 310087, 321190, 410787, 508870, 723598, 817971,
2031040, 2031042, 2172488
]
self.assertEqual(sorted(sr.keys()), sent_ids)
self.assertEqual(sorted(sr.keys()), sorted(sr.sentence_ids))
def test_language_filter(self):
sr = self.load_file(languages={'fra', 'pol', 'rus'})
sent_ids = [6381, 508870, 2172488]
self.assertEqual(sorted(sr.keys()), sent_ids)
def test_custom_filter_row(self):
class TatoebaSentenceSubsetReader(tatoeba.TatoebaSentenceReader):
def __init__(self, sentences, subset=None, **kwargs):
self._subset = set(subset)
super(TatoebaSentenceSubsetReader, self).__init__(
sentences, **kwargs)
def filter_row(self, row):
return int(row[0]) not in self._subset
sr = TatoebaSentenceSubsetReader(
sentences=self.resource_fpath(),
subset={224758, 6381, 29390},
languages={'eng', 'jpn'})
sent_inds = [29390, 224758]
self.assertEqual(sorted(sr.keys()), sent_inds)
def test_language(self):
sr = self.load_file()
for sent_id in [82526, 93620, 109744, 208975]:
self.assertEqual(sr.language(sent_id), 'jpn')
for sent_id in [36809, 293946, 410787, 2031042]:
self.assertEqual(sr.language(sent_id), 'eng')
def test_language_error(self):
sr = self.load_file()
with self.assertRaises(tatoeba.InvalidIDError):
sr.language(193)
def test_sentence(self):
sr = self.load_file()
sentence_pairs = [
(192227, 'ロールプレイングのテレビゲームは時間を食う。'),
(29390, 'Role-playing video games are time consuming.'),
(208975, 'その上手な運転手は車の列を縫うように車を走らせた。'),
(46235, 'The good driver wove his way through the traffic.')
]
for sent_id, sentence in sentence_pairs:
self.assertEqual(sr[sent_id], sentence)
def test_sentence_error(self):
sr = self.load_file()
with self.assertRaises(tatoeba.InvalidIDError):
sr.sentence(24)
def test_details_error(self):
sr = self.load_file()
# One that is in the data set
with self.assertRaises(tatoeba.MissingDataError):
sr.details(192227)
# One that's not in the data set
with self.assertRaises(tatoeba.MissingDataError):
sr.details(0)
def test_repr(self):
sr = self.load_file()
expected_fmt = "TatoebaSentenceReader(sentences='{}')"
expected = expected_fmt.format(self.resource_fpath())
self.assertEqual(repr(sr), expected)
class TatoebaSentenceReaderDetailedTest(TatoebaSentenceReaderTest):
_resource_name = 'sentences_detailed'
def test_details(self):
sr = self.load_file()
# Note: I modified 410787 in the csv to make the added/modified
# different.
details_pairs = [
(82526, (None, None, None)),
(199398, (None, None, None)),
(258289, ('CK', None, datetime(2010, 10, 7, 15, 55, 17))),
(410787, ('CK',
datetime(2010, 6, 24, 14, 20, 10),
datetime(2010, 6, 24, 14, 20, 28)))
]
for sent_id, details in details_pairs:
self.assertEqual(sr.details(sent_id), details)
def test_details_error(self):
sr = self.load_file()
with self.assertRaises(tatoeba.InvalidIDError):
sr.details(24)
class TatoebaSentenceReaderMiscTests(unittest.TestCase):
@parameterized.expand([
# Too few columns
('too_few', '3444\teng\n3949\tjp\n'),
# Too many for undetailed, too few for detailed
('in_between', '3444\teng\tThe boy ate a tiger\tMB'),
# Too many even for detailed
('too_many', '3444\teng\tThe boy ate a tiger\tMB\t\\N\t\\N\t\\N'),
# In between, but with unicode
('in_between_unicode', '3444\teng\tThe boy ate a 虎\tMB')
])
def test_invalid_file(self, name, rowstr):
invalid = to_string_stream(rowstr)
with self.assertRaises(tatoeba.InvalidFileError):
sr = tatoeba.TatoebaSentenceReader(invalid)
class TatoebaSentenceReaderFObjTest(FileObjReaderMixin,
TatoebaSentenceReaderTest):
pass
class TatoebaSentenceReaderDetailedFObjTest(FileObjReaderMixin,
TatoebaSentenceReaderDetailedTest):
pass
###
# Tatoeba Links Reader tests
class TatoebaLinksReaderTests(ReaderBaseCase):
_resource_name = 'links'
ReaderClass = tatoeba.TatoebaLinksReader
def test_basic(self):
lr = self.load_file()
self.assertEqual(len(lr), 29)
groups = [
(6381, {6381, 156245, 258289, 817971}),
(29390, {29390, 192227}),
(36809, {36809, 54432, 199398, 410787}),
(46235, {46235, 208975}),
(62093, {62093, 224758, 723598, 2031040, 2031042}),
(68807, {68807, 231440}),
(82526, {82526, 321190, 508870}),
(93620, {93620, 310087, 2172488}),
(109744, {109744, 293946}),
(112733, {112733, 290943})
]
for sent_id, group in groups:
self.assertEqual(lr[sent_id], group)
self.assertEqual(lr.group(sent_id), group)
def test_filter_both(self):
subset = {6381, 82526, 258289, 192227, 508870}
lr = self.load_file(sentence_ids=subset, sentence_ids_filter='both')
groups = [
(6381, {6381, 258289}),
(82526, {82526, 508870})
]
for sent_id, group in groups:
self.assertEqual(lr[sent_id], group)
self.assertEqual(lr.sentence_ids_filter, 'both')
def test_filter_sent_id(self):
# Normally there isn't much difference between sent_id and
# trans_id because the links file stores things redundantly, but
# I've removed all but the 6381->817971 edge in the graph, so it
# will show up in sent_id, but not in trans_id
subset = {6381, 82526, 258289, 192227, 508870}
lr = self.load_file(sentence_ids=subset, sentence_ids_filter='sent_id')
groups = [
(6381, {6381, 156245, 258289, 817971}),
(82526, {82526, 321190, 508870}),
(29390, {29390, 192227})
]
for sent_id, group in groups:
self.assertEqual(lr[sent_id], group)
self.assertEqual(lr.sentence_ids_filter, 'sent_id')
def test_filter_trans_id(self):
subset = {6381, 82526, 258289, 192227, 508870}
lr = self.load_file(sentence_ids=subset, sentence_ids_filter='trans_id')
groups = [
(6381, {6381, 156245, 258289}),
(82526, {82526, 321190, 508870}),
(29390, {29390, 192227})
]
for sent_id, group in groups:
self.assertEqual(lr[sent_id], group)
self.assertEqual(lr.sentence_ids_filter, 'trans_id')
def test_filter_error(self):
subset = {6381, 82526, 258289, 192227, 508870}
with self.assertRaises(ValueError):
lr = self.load_file(sentence_ids=subset,
sentence_ids_filter='banana')
def test_group_error(self):
lr = self.load_file()
with self.assertRaises(tatoeba.InvalidIDError):
lr.group(100)
def test_groups(self):
lr = self.load_file(sentence_ids={6381, 156245, 29390, 192227})
groups = [
{6381, 156245},
{29390, 192227}
]
actual = sorted(lr.groups(), key=lambda x: min(x))
self.assertEqual(groups, actual)
def test_links(self):
lr = self.load_file()
self.assertEqual(lr.links, self.resource_fpath())
def test_repr(self):
lr = self.load_file()
expected_fmt = "TatoebaLinksReader(links='{}')"
expected = expected_fmt.format(self.resource_fpath())
self.assertEqual(repr(lr), expected)
class TatoebaLinksReaderMiscTests(unittest.TestCase):
@parameterized.expand([
# Too few columns
('too_few', '3444\n3949\n'),
# Too many columns
('too_many', '3444\teng\tThe boy ate a tiger\tMB'),
# Too many columns, with unicode
('too_many', '3444\teng\tThe boy ate a tiger (虎)\tMB'),
])
def test_invalid_file(self, name, rowstr):
# Too few columns
invalid = to_string_stream(rowstr)
with self.assertRaises(tatoeba.InvalidFileError):
sr = tatoeba.TatoebaLinksReader(invalid)
###
# Tanaka word tests
class TanakaWordTests(unittest.TestCase):
WordClass = tatoeba.TanakaWord
def _default_args(self, args):
default_args = (None, None, None, None, False)
return args + default_args[len(args):]
@parameterized.expand([
('alone', 'は|1', ('は',)),
('after_reading', '度(ど)|1', ('度', 'ど')),
('before_sense', 'は|1[01]', ('は', None, 1)),
('before_disp', 'ばれる|1{ばれた}', ('ばれる', None, None, 'ばれた')),
('before_example', 'わっと|2~', ('わっと', None, None, None, True))
])
def test_legacy_tag(self, name, tagstr, expected):
exp_word = self.WordClass(*self._default_args(expected))
act = self.WordClass.from_text(tagstr)
self.assertEqual(exp_word, act)
self.assertEqual(exp_word.display, act.display)
@parameterized.expand([
('headword', ('を',), 'を'),
('reading', ('時', 'とき'), '時(とき)'),
('sense', ('が', None, 3), 'が[03]'),
('read_sense', ('大学', 'だいがく', 1), '大学(だいがく)[01]'),
('display', ('である', None, None, 'であった'), 'である{であった}'),
('read_disp', ('為る', 'する', None, 'し'), '為る(する){し}'),
('sense_disp', ('其の', None, 1, 'その'), '其の[01]{その}'),
('read_sense_disp', ('其の', 'その', 1, 'その'), '其の(その)[01]{その}'),
('example', ('ロールプレイング', None, None, None, True),
'ロールプレイング~'),
('read_ex', ('時', 'とき', None, None, True), '時(とき)~'),
('sense_ex', ('食う', None, 7, None, True), '食う[07]~'),
('read_sense_ex', ('彼', 'かれ', 1, None, True), '彼(かれ)[01]~'),
('disp_ex',
('ネイティブアメリカン', None, None, 'ネイティブ・アメリカン', True),
'ネイティブアメリカン{ネイティブ・アメリカン}~'),
('read_disp_ex',
('喝采を送る', 'かっさいをおくる', None, '喝采を送った', True),
'喝采を送る(かっさいをおくる){喝采を送った}~'),
('sense_disp_ex', ('ソフト', None, 1, 'ソフトな', True),
'ソフト[01]{ソフトな}~'),
('read_sense_disp_ex', ('立て', 'たて', 2, 'たて', True),
'立て(たて)[02]{たて}~'),
])
def test_str(self, name, args, expected):
word = self.WordClass(*self._default_args(args))
self.assertEqual(text_type(word), expected)
@parameterized.expand([
('headword', ('を',), ('が',)),
('reading', ('時', 'とき'), ('時', 'じ')),
('sense', ('が', None, 3), ('が', None, 2)),
('display',
('飲ませる', None, None, '飲ませて'),
('飲ませる', None, None, '飲ませない')),
('example',
('ロールプレイング', None, None, None, True),
('ロールプレイング', None, None, None, False)),
])
def test_neq(self, name, arg1, arg2):
w1, w2 = (self.WordClass(*self._default_args(arg))
for arg in (arg1, arg2))
self.assertNotEqual(w1, w2)
def test_req(self):
class StrEq(object):
def __init__(self, base_str):
self.base_str = base_str
def __eq__(self, other):
return text_type(other) == self.base_str
w = self.WordClass('時', 'とき', None, None, False)
self.assertTrue(w == StrEq('時(とき)'))
self.assertTrue(StrEq('時(とき)') == w)
def test_rneq(self):
class StrEq(object):
def __init__(self, base_str):
self.base_str = base_str
def __eq__(self, other):
return text_type(other) == self.base_str
w = self.WordClass('時', 'とき', None, None, False)
self.assertFalse(w != StrEq('時(とき)'))
self.assertFalse(StrEq('時(とき)') != w)
###
# Tatoeba Index Reader tests
class TatoebaIndexReaderTests(ReaderBaseCase):
_resource_name = 'jpn_indices'
ReaderClass = tatoeba.TatoebaIndexReader
WordClass = tatoeba.TanakaWord
@property
def sentences(self):
_sentences = {
109744: [
self.WordClass('彼', 'かれ', 1, None, False),
self.WordClass('は', None, None, None, False),
self.WordClass('英語', None, None, None, False),
self.WordClass('が', None, None, None, False),
self.WordClass('苦手', None, None, None, False),
self.WordClass('だ', None, None, None, False),
self.WordClass('が', None, 3, None, False),
self.WordClass('数学', None, None, None, False),
self.WordClass('で', None, None, None, False),
self.WordClass('は', None, None, None, False),
self.WordClass('誰にも', None, None, None, False),
self.WordClass('劣る', None, None, '劣らない', False)
],
112733: [
self.WordClass('彼', 'かれ', 1, | |
<reponame>Chamaco326/xraylarch<filename>larch/io/athena_project.py
#!/usr/bin/env python
"""
Code to read and write Athena Project files
"""
import os
import sys
import time
import json
import platform
from fnmatch import fnmatch
from gzip import GzipFile
from collections import OrderedDict
from glob import glob
import numpy as np
from numpy.random import randint
from larch import Group
from larch import __version__ as larch_version
from larch.utils.strutils import bytes2str, str2bytes, fix_varname
from xraydb import guess_edge
alist2json = str.maketrans("();'\n", "[] \" ")
def plarray2json(text):
return json.loads(text.split('=', 1)[1].strip().translate(alist2json))
def parse_arglist(text):
txt = text.split('=', 1)[1].strip()
if txt.endswith(';'):
txt = txt[:-1]
for beg, end in (('[', ']'), ('(', ')'), ('{', '}')):
if txt.startswith(beg) and txt.endswith(end):
txt = txt[1:-1]
txt = txt.translate(alist2json)
words = []
i0 = 0
inparen1 = False
inparen2 = False
def get_word(x):
w = x
for d in ('"', "'"):
if w.startswith(d) and w.endswith(d):
w = w[1:-1]
return w
for i in range(len(txt)):
c = txt[i]
if inparen1:
if c == "'":
inparen1 = False
continue
elif inparen2:
if c == '"':
inparen2 = False
continue
if c == ',':
words.append(get_word(txt[i0:i]))
i0 = i+1
elif c == '"':
inparen2 = True
elif c == "'":
inparen1 = True
words.append(get_word(txt[i0:]))
return words
def asfloat(x):
"""try to convert value to float, or fail gracefully"""
try:
return float(x)
except (ValueError, TypeError):
return x
ERR_MSG = "Error reading Athena Project File"
def _read_raw_athena(filename):
"""try to read athena project file as plain text,
to determine validity
"""
# try gzip
text = None
try:
fh = GzipFile(filename)
text = bytes2str(fh.read())
except Exception:
errtype, errval, errtb = sys.exc_info()
text = None
if text is None:
# try plain text file
try:
fh = open(filename, 'r')
text = bytes2str(fh.read())
except Exception:
errtype, errval, errtb = sys.exc_info()
text = None
return text
def _test_athena_text(text):
return "Athena project file -- " in text[:500]
def is_athena_project(filename):
"""tests whether file is a valid Athena Project file"""
text = _read_raw_athena(filename)
if text is None:
return False
return _test_athena_text(text)
def make_hashkey(length=5):
"""generate an 'athena hash key': 5 random lower-case letters
"""
return ''.join([chr(randint(97, 122)) for i in range(length)])
def make_athena_args(group, hashkey=None, **kws):
"""make athena args line from a group"""
# start with default args:
from larch.xafs.xafsutils import etok
if hashkey is None:
hashkey = make_hashkey()
args = {}
for k, v in (('annotation', ''),
('beamline', ''),
('beamline_identified', '0'), ('bft_dr', '0.0'),
('bft_rmax', '3'), ('bft_rmin', '1'),
('bft_rwindow', 'hanning'), ('bkg_algorithm', 'autobk'),
('bkg_cl', '0'), ('bkg_clamp1', '0'), ('bkg_clamp2', '24'),
('bkg_delta_eshift', '0'), ('bkg_dk', '1'),
('bkg_e0_fraction', '0.5'), ('bkg_eshift', '0'),
('bkg_fixstep', '0'), ('bkg_flatten', '1'),
('bkg_former_e0', '0'), ('bkg_funnorm', '0'),
('bkg_int', '7.'), ('bkg_kw', '1'),
('bkg_kwindow', 'hanning'), ('bkg_nclamp', '5'),
('bkg_rbkg', '1.0'), ('bkg_slope', '-0.0'),
('bkg_stan', 'None'), ('bkg_tie_e0', '0'),
('bkg_nc0', '0'), ('bkg_nc1', '0'),
('bkg_nc2', '0'), ('bkg_nc3', '0'),
('bkg_rbkg', '1.0'), ('bkg_slope', '0'),
('bkg_pre1', '-150'), ('bkg_pre2', '-30'),
('bkg_nor1', '150'), ('bkg_nor2', '800'),
('bkg_nnorm', '1'),
('prjrecord', 'athena.prj, 1'), ('chi_column', ''),
('chi_string', ''), ('collided', '0'), ('columns', ''),
('daq', ''), ('denominator', '1'), ('display', '0'),
('energy', ''), ('energy_string', ''), ('epsk', ''),
('epsr', ''), ('fft_dk', '4'), ('fft_edge', 'k'),
('fft_kmax', '15.'), ('fft_kmin', '2.00'),
('fft_kwindow', 'kaiser-bessel'), ('fft_pc', '0'),
('fft_pcpathgroup', ''), ('fft_pctype', 'central'),
('forcekey', '0'), ('from_athena', '1'),
('from_yaml', '0'), ('frozen', '0'), ('generated', '0'),
('i0_scale', '1'), ('i0_string', '1'),
('importance', '1'), ('inv', '0'), ('is_col', '1'),
('is_fit', '0'), ('is_kev', '0'), ('is_merge', ''),
('is_nor', '0'), ('is_pixel', '0'), ('is_special', '0'),
('is_xmu', '1'), ('ln', '0'), ('mark', '0'),
('marked', '0'), ('maxk', '15'), ('merge_weight', '1'),
('multiplier', '1'), ('nidp', '5'), ('nknots', '4'),
('numerator', ''), ('plot_scale', '1'),
('plot_yoffset', '0'), ('plotkey', ''),
('plotspaces', 'any'), ('provenance', ''),
('quenched', '0'), ('quickmerge', '0'),
('read_as_raw', '0'), ('rebinned', '0'),
('recommended_kmax', '1'), ('recordtype', 'mu(E)'),
('referencegroup', ''), ('rmax_out', '10'),
('signal_scale', '1'), ('signal_string', '-1'),
('trouble', ''), ('tying', '0'),
('unreadable', '0'), ('update_bft', '1'),
('update_bkg', '1'), ('update_columns', '0'),
('update_data', '0'), ('update_fft', '1'),
('update_norm', '1'), ('xdi_will_be_cloned', '0'),
('xdifile', ''), ('xmu_string', ''),
('valence', ''), ('lasso_yvalue', ''),
('atsym', ''), ('edge', '') ):
args[k] = v
args['datagroup'] = args['tag'] = args['label'] = hashkey
en = getattr(group, 'energy', [])
args['npts'] = len(en)
if len(en) > 0:
args['xmin'] = '%.1f' % min(en)
args['xmax'] = '%.1f' % max(en)
main_map = dict(source='filename', file='filename', label='filename',
bkg_e0='e0', bkg_step='edge_step',
bkg_fitted_step='edge_step', valence='valence',
lasso_yvalue='lasso_yvalue', atsym='atsym',
edge='edge')
for aname, lname in main_map.items():
val = getattr(group, lname, None)
if val is not None:
args[aname] = val
bkg_map = dict(nnorm='nnorm', nor1='norm1', nor2='norm2', pre1='pre1',
pre2='pre2')
if hasattr(group, 'pre_edge_details'):
for aname, lname in bkg_map.items():
val = getattr(group.pre_edge_details, lname, None)
if val is not None:
args['bkg_%s' % aname] = val
emax = max(group.energy) - group.e0
args['bkg_spl1e'] = '0'
args['bkg_spl2e'] = '%.5f' % emax
args['bkg_spl1'] = '0'
args['bkg_spl2'] = '%.5f' % etok(emax)
if hasattr(group, 'fft_params'):
for aname in ('dk', 'kmin', 'kmax', 'kwindow', 'pc', 'edge',
'pc', 'pcpathgroup', 'pctype'):
val = getattr(group.fft_params, aname, None)
if val is not None:
args['fft_%s' % aname] = val
args.update(kws)
return args
def athena_array(group, arrname):
"""convert ndarray to athena representation"""
arr = getattr(group, arrname, None)
if arr is None:
return None
return arr # json.dumps([repr(i) for i in arr])
# return "(%s)" % ','.join(["'%s'" % i for i in arr])
def format_dict(d):
""" format dictionary for Athena Project file"""
o = []
for key in sorted(d.keys()):
o.append("'%s'" % key)
val = d[key]
if val is None: val = ''
o.append("'%s'" % val)
return ','.join(o)
def format_array(arr):
""" format dictionary for Athena Project file"""
o = ["'%s'" % v for v in arr]
return ','.join(o)
def clean_bkg_params(grp):
grp.nnorm = getattr(grp, 'nnorm', 2)
grp.e0 = getattr(grp, 'e0', -1)
grp.rbkg = getattr(grp, 'rbkg', 1)
grp.pre1 = getattr(grp, 'pre1', -150)
grp.pre2 = getattr(grp, 'pre2', -25)
grp.nor1 = getattr(grp, 'nor1', 100)
grp.nor2 = getattr(grp, 'nor2', 1200)
grp.spl1 = getattr(grp, 'spl1', 0)
grp.spl2 = getattr(grp, 'spl2', 30)
grp.kw = getattr(grp, 'kw', 1)
grp.dk = getattr(grp, 'dk', 3)
if getattr(grp, 'kwindow', None) is None:
grp.kwindow = getattr(grp, 'win', 'hanning')
try:
grp.clamp1 = float(grp.clamp1)
except:
grp.clamp1 = 1
try:
grp.clamp2 = float(grp.clamp2)
except:
grp.clamp2 = 1
return grp
def clean_fft_params(grp):
grp.kmin = getattr(grp, 'kmin', 0)
grp.kmax = getattr(grp, 'kmax', 25)
grp.kweight = getattr(grp, 'kweight', 2)
grp.dk = getattr(grp, 'dk', 3)
grp.kwindow = getattr(grp, 'kwindow', 'hanning')
return grp
def parse_perlathena(text, filename):
"""
parse old athena file format to Group of Groups
"""
lines = text.split('\n')
athenagroups = []
raw = {'name':''}
vline = lines.pop(0)
if "Athena project file -- " not in vline:
raise ValueError("%s '%s': invalid Athena File" % (ERR_MSG, filename))
major, minor, fix = '0', '0', '0'
if 'Demeter' in vline:
try:
vs = vline.split("Athena project file -- Demeter version")[1]
major, minor, fix = vs.split('.')
except:
raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename))
else:
try:
vs = vline.split("Athena project file -- Athena version")[1]
major, minor, fix = vs.split('.')
except:
raise ValueError("%s '%s': cannot read version" % (ERR_MSG, filename))
header = [vline]
journal = ['']
is_header = True
for t in lines:
if t.startswith('#') or len(t) < 2 or 'undef' in t:
if is_header:
header.append(t)
continue
is_header = False
key = t.split(' ')[0].strip()
key = key.replace('$', '').replace('@', '').replace('%', '').strip()
if key == 'old_group':
raw['name'] = plarray2json(t)
elif key == '[record]':
athenagroups.append(raw)
raw = {'name':''}
elif key == 'journal':
journal = parse_arglist(t)
elif key == 'args':
raw['args'] = parse_arglist(t)
elif key == 'xdi':
raw['xdi'] = t
elif key in ('x', 'y', 'i0', 'signal', 'stddev'):
raw[key] = np.array([float(x) for x in plarray2json(t)])
elif key in ('1;', 'indicator', 'lcf_data', 'plot_features'):
pass
else:
print(" do not know what to do with key '%s' at '%s'" % (key, raw['name']))
out = Group()
out.__doc__ = """XAFS Data from Athena Project File %s""" % (filename)
out.journal = '\n'.join(journal)
out.group_names = []
out.header = '\n'.join(header)
for dat in athenagroups:
label = dat.get('name', 'unknown')
this = Group(athena_id=label, energy=dat['x'], mu=dat['y'],
bkg_params=Group(),
fft_params=Group(),
athena_params=Group())
if 'i0' in dat:
this.i0 = dat['i0']
if 'signal' in dat:
this.signal = dat['signal']
if 'stddev' in dat:
this.stddev = dat['stddev']
if 'args' in dat:
for i in range(len(dat['args'])//2):
key = dat['args'][2*i]
val = dat['args'][2*i+1]
if key.startswith('bkg_'):
setattr(this.bkg_params, key[4:], asfloat(val))
elif key.startswith('fft_'):
setattr(this.fft_params, key[4:], asfloat(val))
elif key == 'label':
label = this.label = val
elif key in ('valence', 'lasso_yvalue',
'epsk', 'epsr', 'importance'):
setattr(this, key, asfloat(val))
elif key in ('atsym', 'edge', 'provenance'):
setattr(this, key, val)
else:
setattr(this.athena_params, key, asfloat(val))
this.__doc__ = """Athena Group Name %s (key='%s')""" % (label, dat['name'])
name = fix_varname(label)
if name.startswith('_'):
name = 'd' + name
setattr(out, name, this)
out.group_names.append(name)
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""All security in windows is handled via Security Principals. These can
be a user (the most common case), a group of users, a computer, or something
else. Security principals are uniquely identified by their SID: a binary code
represented by a string S-a-b-cd-efg... where each of the segments represents
an aspect of the security authorities involved. (A computer, a domain etc.).
Certain of the SIDs are considered well-known such as the AuthenticatedUsers
account on each machine which will always have the same SID.
Most of the access to this module will be via the :func:`principal`
or :func:`me` functions. Although the module is designed to be used
standalone, it is imported directly into the :mod:`security` module's
namespace so its functionality can also be accessed from there.
"""
from __future__ import unicode_literals
import os, sys
import contextlib
import socket
import ntsecuritycon
import pywintypes
import win32con
import win32security
import win32api
import win32cred
import win32event
import win32net
import win32netcon
import winerror
from winsys._compat import *
from winsys import constants, core, exc, utils
from winsys import _advapi32
__all__ = ['LOGON', 'EXTENDED_NAME', 'x_accounts', 'principal', 'Principal', 'User', 'Group', 'me']
LOGON = constants.Constants.from_pattern("LOGON32_*", namespace=win32security)
LOGON.doc("Types of logon used by LogonUser and related APIs")
EXTENDED_NAME = constants.Constants.from_pattern("Name*", namespace=win32con)
EXTENDED_NAME.doc("Extended display formats for usernames")
WELL_KNOWN_SID = constants.Constants.from_pattern("Win*Sid", namespace=win32security)
WELL_KNOWN_SID.doc("Well-known SIDs common to all computers")
USER_PRIV = constants.Constants.from_list(["USER_PRIV_GUEST", "USER_PRIV_USER", "USER_PRIV_ADMIN"], pattern="USER_PRIV_*", namespace=win32netcon)
USER_PRIV.doc("User-types for creating new users")
UF = constants.Constants.from_pattern("UF_*", namespace=win32netcon)
UF.doc("Flags for creating new users")
SID_NAME_USE = constants.Constants.from_pattern("SidType*", namespace=ntsecuritycon)
SID_NAME_USE.doc("Types of accounts for which SIDs exist")
FILTER = constants.Constants.from_pattern("FILTER_*", namespace=win32netcon)
FILTER.doc("Filters when enumerating users")
PySID = pywintypes.SIDType
class x_accounts(exc.x_winsys):
"Base for all accounts-related exceptions"
WINERROR_MAP = {
winerror.ERROR_NONE_MAPPED : exc.x_not_found
}
wrapped = exc.wrapper(WINERROR_MAP, x_accounts)
def _win32net_enum(win32_fn, system_or_domain):
resume = 0
while True:
items, total, resume = wrapped(win32_fn, system_or_domain, 0, resume)
for item in items:
yield item
if resume == 0: break
def principal(principal, cls=core.UNSET):
"""Factory function for the :class:`Principal` class. This is the most
common way to create a :class:`Principal` object::
from winsys import accounts
service_account = accounts.principal (accounts.WELL_KNOWN_SID.Service)
local_admin = accounts.principal ("Administrator")
domain_users = accounts.principal (r"DOMAIN\Domain Users")
:param principal: any of None, a :class:`Principal`, a `PySID`,
a :const:`WELL_KNOWN_SID` or a string
:returns: a :class:`Principal` object corresponding to `principal`
"""
cls = Principal if cls is core.UNSET else cls
if principal is None:
return None
elif type(principal) == PySID:
return cls.from_sid(principal)
elif isinstance(principal, int):
return cls.from_well_known(principal)
elif isinstance(principal, cls):
return principal
else:
return cls.from_string(unicode(principal))
def user(name):
"""If you know you're after a user, use this. Particularly
useful when a system user is defined as an alias type
"""
return principal(name, cls=User)
def group(name):
"""If you know you're after a group, use this. Particularly
useful when a system group is defined as an alias type
"""
return principal(name, cls=Group)
def local_group(name):
"""If you know you're after a local group, use this.
"""
return principal(name, cls=LocalGroup)
def global_group(name):
"""If you know you're after a global group, use this.
"""
return principal(name, cls=GlobalGroup)
def me():
"""Convenience function for the common case of getting the
logged-on user's account.
"""
return Principal.me()
_domain = None
def domain(system=None):
global _domain
if _domain is None:
_domain = wrapped(win32net.NetWkstaGetInfo, system, 100)['langroup']
return _domain
def domain_controller(domain=None):
return wrapped(win32net.NetGetAnyDCName, None, domain)
def users(system=None):
"""Convenience function to yield each of the local users
on a system.
:param system: optional security authority
:returns: yield :class:`User` objects
"""
return iter(_LocalUsers(system))
class Principal(core._WinSysObject):
"""Object wrapping a Windows security principal, represented by a SID
and, where possible, a name. :class:`Principal` compares and hashes
by SID so can be sorted and used as a dictionary key, set element, etc.
A :class:`Principal` is its own context manager, impersonating the
corresponding user::
from winsys import accounts
with accounts.principal("python"):
print accounts.me()
Note, though, that this will prompt for a password using the
Win32 password UI. To logon with a password, use the :meth:`impersonate`
context-managed function. TODO: allow password to be set securely.
"""
def __init__(self, sid, system=None):
"""Initialise a Principal from and (optionally) a system name. The sid
must be a PySID and the system name, if present must be a security
authority, eg a machine or a domain.
"""
core._WinSysObject.__init__(self)
self.sid = sid
self.system = system
try:
self.name, self.domain, self.type = wrapped(win32security.LookupAccountSid, self.system, self.sid)
except exc.x_not_found:
self.name = str(self.sid)
self.domain = self.type = None
#~ if self.system is None:
#~ self.system = domain_controller(self.domain)
def __hash__(self):
return hash(str(self.sid))
def __eq__(self, other):
return self.sid == principal(other).sid
def __lt__(self, other):
return self.sid < principal(other).sid
def pyobject(self):
"""Return the internal representation of this object.
:returns: pywin32 SID
"""
return self.sid
def as_string(self):
if self.domain:
return "%s\%s" % (self.domain, self.name)
else:
return self.name or str(self.sid)
def dumped(self, level):
return utils.dumped("user: %s\nsid: %s" % (
self.as_string(),
wrapped(win32security.ConvertSidToStringSid, self.sid)
), level)
def logon(self, password=core.UNSET, logon_type=core.UNSET):
"""Log on as an authenticated user, returning that
user's token. This is used by security.impersonate
which wraps the token in a Token object and manages
its lifetime in a context.
(EXPERIMENTAL) If no password is given, a UI pops up
to ask for a password.
:param password: the password for this account
:param logon_type: one of the :const:`LOGON` values
:returns: a pywin32 handle to a token
"""
if logon_type is core.UNSET:
logon_type = LOGON.LOGON_NETWORK
else:
logon_type = LOGON.constant(logon_type)
#~ if password is core.UNSET:
#~ password = dialogs.get_password(self.name, self.domain)
hUser = wrapped(
win32security.LogonUser,
self.name,
self.domain,
password,
logon_type,
LOGON.PROVIDER_DEFAULT
)
return hUser
@classmethod
def from_string(cls, string, system=None):
r"""Return a :class:`Principal` based on a name and a
security authority. If `string` is blank, the logged-on user is assumed.
:param string: name of an account in the form "domain\name". domain is optional so the simplest form is simply "name"
:param system: name of a security authority (typically a machine or a domain)
:returns: a :class:`Principal` object for `string`
"""
if string == "":
string = wrapped(win32api.GetUserNameEx, win32con.NameSamCompatible)
sid, domain, type = wrapped(
win32security.LookupAccountName,
None if system is None else unicode(system),
unicode(string)
)
cls = cls.SID_NAME_USE_MAP.get(type, cls)
return cls(sid, None if system is None else unicode(system))
@classmethod
def from_sid(cls, sid, system=None):
"""Return a :class:`Principal` based on a sid and a security authority.
:param sid: a PySID
:param system_name: optional name of a security authority
:returns: a :class:`Principal` object for `sid`
"""
try:
name, domain, type = wrapped(
win32security.LookupAccountSid,
None if system is None else unicode(system),
sid
)
except exc.x_not_found:
name = domain = type = core.UNSET
cls = cls.SID_NAME_USE_MAP.get(type, cls)
return cls(sid, None if system is None else unicode(system))
@classmethod
def from_well_known(cls, well_known, domain=None):
"""Return a :class:`Principal` based on one of the :const:`WELL_KNOWN_SID` values.
:param well_known: one of the :const:`WELL_KNOWN_SID`
:param domain: anything accepted by :func:`principal` and corresponding to a domain
"""
return cls.from_sid(wrapped(win32security.CreateWellKnownSid, well_known, principal(domain)))
@classmethod
def me(cls):
"""Convenience factory method for the common case of referring to the
logged-on user
"""
return cls.from_string(wrapped(win32api.GetUserNameEx, EXTENDED_NAME.SamCompatible))
@contextlib.contextmanager
def impersonate(self, password=core.UNSET, logon_type=core.UNSET):
"""Context-managed function to impersonate this user and then
revert::
from winsys import accounts, security
print accounts.me()
python = accounts.principal("python")
with python.impersonate("<PASSWORD>"):
print accounts.me()
open("temp.txt", "w").close()
print accounts.me()
security.security("temp.txt").owner == python
Note that the :class:`Principal` class is also its own
context manager but does not allow the password to be specified.
:param password: <PASSWORD>
:param logon_type: one of the :const:`LOGON` values
"""
hLogon = self.logon(password, logon_type)
wrapped(win32security.ImpersonateLoggedOnUser, hLogon)
yield hLogon
wrapped(win32security.RevertToSelf)
def __enter__(self):
wrapped(win32security.ImpersonateLoggedOnUser, self.logon(logon_type=LOGON.LOGON_INTERACTIVE))
def __exit__(self, *exc_info):
wrapped(win32security.RevertToSelf)
class User(Principal):
@classmethod
def create(cls, username, password, system=None):
"""Create a new user with `username` and `password`. Return
a :class:`User` for the new user.
:param username: username of the new user. Must not already exist on `system`
:param password: password for the new user. Must meet security policy on `system`
:param system: optional system name
:returns: a :class:`User` for `username`
"""
user_info = dict(
name = username,
password = password,
priv = USER_PRIV.USER,
home_dir = None,
comment = None,
flags = UF.SCRIPT,
script_path = None
)
wrapped(win32net.NetUserAdd, system, 1, user_info)
return cls.from_string(username, system)
def delete(self):
"""Delete this user from `system`.
:param system: optional security authority
"""
wrapped(win32net.NetUserDel, self.system, self.name)
def groups(self):
"""Yield the groups this user belongs to
:param system: optional security authority
"""
for group_name, attributes in wrapped(win32net.NetUserGetGroups, self.system, self.name):
yield group(group_name)
for group_name in wrapped(win32net.NetUserGetLocalGroups, self.system, self.name):
yield group(group_name)
def join(self, other_group):
"""Add this user to a group
:param other_group: anything | |
# Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander / All Developers are free to add commands for new features
r""" Update particle properties.
When an updater is specified, it acts on the particle system each time step to change
it in some way. See the documentation of specific updaters to find out what they do.
"""
from hoomd import _hoomd
from hoomd.md import _md
import hoomd
from hoomd.update import _updater
import math
def _table_eval(r, rmin, rmax, XB, M, L, dr):
i = int(round((r - rmin) / dr))
return (XB[i], M[i], L[i])
class popbd(_updater):
r"""
Args:
r_cut: cutoff radius (scalar)
nlist: neighbor list object
period: period at which to evaluate the updater
bond_type: type of bond to be created or destroyed
prob_form: probability that a bond will be formed
prob_break: probability that a bond will be broken
seed: rng seed
"""
def __init__(self, group, nlist, seed, integrator, table_width, period=1):
hoomd.util.print_status_line()
# initialize base class
_updater.__init__(self)
# create the c++ mirror class
self.cpp_updater = _md.PopBD(
hoomd.context.current.system_definition,
group.cpp_group,
nlist.cpp_nlist,
seed,
integrator.dt,
period,
table_width,
)
phase = 0
self.table_width = table_width
self.setupUpdater(period, phase)
def set_params(self, r_cut, bond_type, n_polymer):
self.check_initialization()
self.cpp_updater.setParams(r_cut, bond_type, n_polymer)
# store metadata
# metadata_fields = ['r_cut', 'bond_type', 'prob_form', 'prob_break']
def set_from_file(self, filename):
r""" Set a bond pair interaction from a file.
Args:
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced r values.
Example::
#r XB M L
1.0
1.1
1.2
1.3
1.4 0.0
1.5 -1.0
The first r value sets ``rmin``, the last sets ``rmax``. Any line with # as the first non-whitespace character is treated as a comment. The ``r`` values must monotonically increase and be equally spaced. The table is read directly into the grid points used to evaluate :math:`F_{\mathrm{user}}(r)` and :math:`V_{\mathrm{user}}(r)`.
"""
hoomd.util.print_status_line()
# open the file
f = open(filename)
r_table = []
XB_table = []
M_table = []
L_table = []
# read in lines from the file
for line in f.readlines():
line = line.strip()
# skip comment lines
if line[0] == "#":
continue
# split out the columns
cols = line.split()
values = [float(f) for f in cols]
# validate the input
if len(values) != 4:
hoomd.context.msg.error(
"bond.table: file must have exactly 4 columns\n"
)
raise RuntimeError("Error reading table file")
# append to the tables
r_table.append(values[0])
XB_table.append(values[1])
M_table.append(values[2])
L_table.append(values[3])
# validate input
if self.table_width != len(r_table):
hoomd.context.msg.error(
"bond.table: file must have exactly " + str(self.table_width) + " rows\n"
)
raise RuntimeError("Error reading table file")
# extract rmin and rmax
rmin_table = r_table[0]
rmax_table = r_table[-1]
# check for even spacing
dr = (rmax_table - rmin_table) / float(self.table_width - 1)
for i in range(0, self.table_width):
r = rmin_table + dr * i
if math.fabs(r - r_table[i]) > 1e-3:
hoomd.context.msg.error(
"bond.table: r must be monotonically increasing and evenly spaced\n"
)
raise RuntimeError("Error reading table file")
XB_hoomd_table = _hoomd.std_vector_scalar();
M_hoomd_table = _hoomd.std_vector_scalar();
L_hoomd_table = _hoomd.std_vector_scalar();
# evaluate each point of the function
for i in range(0, self.table_width):
r = rmin_table + dr * i;
(XB, M, L) = _table_eval(r, rmin_table, rmax_table, XB_table, M_table, L_table, dr);
# fill out the tables
XB_hoomd_table.append(XB);
M_hoomd_table.append(M);
L_hoomd_table.append(L);
hoomd.util.quiet_status()
self.cpp_updater.setTable(XB_hoomd_table, M_hoomd_table, L_hoomd_table, rmin_table, rmax_table)
hoomd.util.unquiet_status()
class rescale_temp(_updater):
r""" Rescales particle velocities.
Args:
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature set point (in energy units)
period (int): Velocities will be rescaled every *period* time steps
phase (int): When -1, start on the current time step. When >= 0, execute on steps where *(step + phase) % period == 0*.
Every *period* time steps, particle velocities and angular momenta are rescaled by equal factors
so that they are consistent with a given temperature in the equipartition theorem
.. math::
\langle 1/2 m v^2 \rangle = k_B T
\langle 1/2 I \omega^2 \rangle = k_B T
.. attention::
:py:class:`rescale_temp` does **not** run on the GPU, and will significantly slow down simulations.
Examples::
update.rescale_temp(kT=1.2)
rescaler = update.rescale_temp(kT=0.5)
update.rescale_temp(period=100, kT=1.03)
update.rescale_temp(period=100, kT=hoomd.variant.linear_interp([(0, 4.0), (1e6, 1.0)]))
"""
def __init__(self, kT, period=1, phase=0):
hoomd.util.print_status_line()
# initialize base class
_updater.__init__(self)
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT)
# create the compute thermo
thermo = hoomd.compute._get_unique_thermo(group=hoomd.context.current.group_all)
# create the c++ mirror class
self.cpp_updater = _md.TempRescaleUpdater(
hoomd.context.current.system_definition, thermo.cpp_compute, kT.cpp_variant
)
self.setupUpdater(period, phase)
# store metadata
self.kT = kT
self.period = period
self.metadata_fields = ["kT", "period"]
def set_params(self, kT=None):
r""" Change rescale_temp parameters.
Args:
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): New temperature set point (in energy units)
Examples::
rescaler.set_params(kT=2.0)
"""
hoomd.util.print_status_line()
self.check_initialization()
if kT is not None:
kT = hoomd.variant._setup_variant_input(kT)
self.cpp_updater.setT(kT.cpp_variant)
self.kT = kT
class zero_momentum(_updater):
r""" Zeroes system momentum.
Args:
period (int): Momentum will be zeroed every *period* time steps
phase (int): When -1, start on the current time step. When >= 0, execute on steps where *(step + phase) % period == 0*.
Every *period* time steps, particle velocities are modified such that the total linear
momentum of the system is set to zero.
Examples::
update.zero_momentum()
zeroer= update.zero_momentum(period=10)
"""
def __init__(self, period=1, phase=0):
hoomd.util.print_status_line()
# initialize base class
_updater.__init__(self)
# create the c++ mirror class
self.cpp_updater = _md.ZeroMomentumUpdater(
hoomd.context.current.system_definition
)
self.setupUpdater(period, phase)
# store metadata
self.period = period
self.metadata_fields = ["period"]
class enforce2d(_updater):
r""" Enforces 2D simulation.
Every time step, particle velocities and accelerations are modified so that their z components are 0: forcing
2D simulations when other calculations may cause particles to drift out of the plane. Using enforce2d is only
allowed when the system is specified as having only 2 dimensions.
Examples::
update.enforce2d()
"""
def __init__(self):
hoomd.util.print_status_line()
period = 1
# initialize base class
_updater.__init__(self)
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_updater = _md.Enforce2DUpdater(
hoomd.context.current.system_definition
)
else:
self.cpp_updater = _md.Enforce2DUpdaterGPU(
hoomd.context.current.system_definition
)
self.setupUpdater(period)
class constraint_ellipsoid(_updater):
r""" Constrain particles to the surface of a ellipsoid.
Args:
group (:py:mod:`hoomd.group`): Group for which the update will be set
P (tuple): (x,y,z) tuple indicating the position of the center of the ellipsoid (in distance units).
rx (float): radius of an ellipsoid in the X direction (in distance units).
ry (float): radius of an ellipsoid in the Y direction (in distance units).
rz (float): radius of an ellipsoid in the Z direction (in distance units).
r (float): radius of a sphere (in distance units), such that r=rx=ry=rz.
:py:class:`constraint_ellipsoid` specifies that all particles are constrained
to the surface of an ellipsoid. Each time step particles are projected onto the surface of the ellipsoid.
Method from: http://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf
.. attention::
For the algorithm to work, we must have :math:`rx >= rz,~ry >= rz,~rz > 0`.
Note:
This method does not properly conserve virial coefficients.
Note:
random thermal forces from the integrator are applied in 3D not 2D, therefore they aren't fully accurate.
Suggested use is therefore only for T=0.
Examples::
update.constraint_ellipsoid(P=(-1,5,0), r=9)
update.constraint_ellipsoid(rx=7, ry=5, rz=3)
"""
def __init__(self, group, r=None, rx=None, ry=None, rz=None, P=(0, 0, 0)):
hoomd.util.print_status_line()
period = 1
# Error out in MPI simulations
if _hoomd.is_MPI_available():
if (
context.current.system_definition.getParticleData().getDomainDecomposition()
):
context.msg.error(
"constrain.ellipsoid is not supported in multi-processor simulations.\n\n"
)
raise RuntimeError("Error initializing updater.")
# Error out if no radii are set
if r is None and rx is None and ry is None and rz is None:
context.msg.error(
"no radii were defined in update.constraint_ellipsoid.\n\n"
)
raise RuntimeError("Error initializing updater.")
# initialize the base class
_updater.__init__(self)
# Set parameters
P = _hoomd.make_scalar3(P[0], P[1], P[2])
if r is not None:
rx = ry = rz = r
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_updater = _md.ConstraintEllipsoid(
hoomd.context.current.system_definition, group.cpp_group, P, rx, ry, rz
)
else:
self.cpp_updater = _md.ConstraintEllipsoidGPU(
hoomd.context.current.system_definition, group.cpp_group, P, rx, ry, rz
)
self.setupUpdater(period)
# store metadata
self.group = group
self.P = P
self.rx = rx
self.ry = ry
self.rz = rz
self.metadata_fields = ["group", "P", "rx", "ry", "rz"]
class mueller_plathe_flow(_updater):
r""" Updater class for a shear flow according
to an algorithm published by Mueller Plathe.:
"<NAME>. Reversing the perturbation in nonequilibrium | |
#!/usr/bin/env python
import base64
import wave
import json
import array
import math
from bridges.audio_channel import AudioChannel
class AudioClip(object):
"""
@brief This class provides support for reading, modifying, and playing, audio waveform.
This class provides a way to represent an AudioClip (think of a
WAV file) in Bridges as waveforms.
An AudioClip can be composed of multiple channels: a stereo sound
would be composed of 2 channels (Left and Right), a mono sound
would be composed of a single channel. A 5.1 sound would be
composed of 6 channels. When building an AudioClip from a file, the
number of channels is taken from the file; some constructors have a
num_channels parameter that enables to pass the number of channels
explicitly. If unsure, one can know how many channels are in an
audio clip using get_num_channels().
Each channel is essentially a 1D signal. That is to say, it is an
array of values that represent how far the membrane of a speaker
should be from its resting position. The quality of the sound is
controlled by two parameters: sampling rate and sampling depth.
Sampling rate tells how many positions per second are encoded by
the AudioClip. It is expressed in Hertz. CD quality is 44100Hz;
while walkie-talkies use 8000Hz. It is set automatically if read
from a file; or it can be passed as the sampleRate parameter to
some of the constructors. The sampling rate can be obtained from an
AudioClip using get_sample_rate().
The length of an AudioClip is expressed in number of samples. So if
an AudioClip is composed of 16,000 samples with a sampling rate of
8000Hz, the clip would be 2 seconds long. The number of samples
can obtained with get_sample_count(); it is set from a file or can be
passed as the sampleCount parameter of some of the constructor.
The sampling depth indicates how many different positions the
membrane can take. It is typically expressed in bits with supported
values being 8-bit, 16-bit, 24-bit, and 32-bit. If a clip is
encoded with a depth of 8 bits, the membrane can take 2^8 different
position ranging from -128 to +127, with 0 being the resting
position. The sampling depth is read from files or passed as the
sampleBits parameter of the constructor. The sampling depth of an
existing clip can be obtained with get_sample_bits().
The individual samples are accessed with the get_sample() and
set_sample() functions. The samples are integer values in the
2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)[ range. The
functions allow to specify for channel and sample index.
@author <NAME>, <NAME>
@date 2020, 1/31/2020, 2021
"""
def __init__(self, filepath: str="", sample_count: int=0, num_channels: int=1, sample_bits: int=32, sample_rate: int=44100) -> None:
"""
AudioBase constructor.
specify either a filepath or all the other parameters.
Args:
(str) filepath: name of the wav file to creat a clip of. If this parameter is used, all the other ones are ignored.
(int) sample_count: The total number of samples in this audio object
(int) num_channels: number of channels (stereo would be 2)
(int) sample_rate: The number of samples in 1 second of audio (default to cd quality: 44100)
(int) sample_bits: Bit depth, that is to say, each sample will be in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)[ range
Returns:
None
"""
if filepath != "":
self._from_filepath(filepath)
return
if sample_count > 1000000000:
raise ValueError("Maximum frames exceeded with value %d" % self.get_sample_count())
if sample_bits != 8 and sample_bits != 16 and sample_bits != 24 and sample_bits != 32:
raise ValueError("sample_bits should be 8, 16, 24, or 32")
if num_channels <= 0:
raise ValueError("num_channels should be positive")
if sample_rate <= 0:
raise ValueError("sample_rate should be positive")
self.sample_count = sample_count
self.sample_rate = sample_rate
self.sample_bits = sample_bits
self.num_channels = num_channels
# Initialize the channels
self._channels = []
for i in range(self.num_channels):
self._channels.append(AudioChannel(sample_count=self.sample_count, sample_bits=self.sample_bits))
def _from_filepath(self, filepath: str):
with wave.open(filepath, "r") as f:
self.__init__(sample_count=f.getnframes(), sample_rate=f.getframerate(), num_channels=f.getnchannels(), sample_bits=f.getsampwidth()*8)
self.framebytes = f.readframes(f.getnframes())
framebytes = self.framebytes
channel = 0
count = 0
for i in range(0, len(framebytes), self.get_sample_bytes()):
if self.get_sample_bytes() == 1:
val = int.from_bytes(framebytes[i:i+self.get_sample_bytes()], byteorder='little', signed=False)
val = val - 128
self.set_sample(channel, count, val)
else:
val = int.from_bytes(framebytes[i:i+self.get_sample_bytes()], byteorder='little', signed=True)
self.set_sample(channel, count, val)
channel += 1
if channel >= f.getnchannels():
count += 1
channel = 0
def get_num_channels(self) -> int:
"""
Return the number of channels in this AudioClip. 1 for mono, 2 for stereo, etc.
Returns:
int: The number of channels of audio samples this object holds.
"""
return self.num_channels
def get_channel(self, index: int) -> AudioChannel:
"""
Get the audio channel at index. The index should be less than get_num_channels().
Args:
(int) index: The index of the channel to get. 0 for front-left, 1 for front-right, etc.
Returns:
AudioChannel: The audio channel at index
"""
return self._channels[index]
def get_sample_rate(self) -> int:
"""
Get the sample rate of this audio clip. This is the number of samples that are taken in one second.
Returns:
int: The sample rate or number of samples in 1 second of audio
"""
return self.sample_rate
def get_sample_count(self) -> int:
"""
Get the number of samples in each channel of this audio object.
Each channel will contain this number of samples.
Returns:
int: The total number of samples in this audio object
"""
return self.sample_count
def get_sample(self, channel: int, index: int) -> int:
"""
Get the sample at the index of the sample data from a specific channel.
Args:
(int) channel: The index of the channel to get. 0 for front-left, 1 for front-right, etc.
(int) index: The index of the sample to get. From 0 - get_sample_count()
Returns:
int: The sample in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range
"""
value = self.get_channel(channel).get_sample(index)
return int(value)
def set_sample(self, channel: int, index: int, value: int) -> None:
"""
Set the sample at the index of the sample data to value
Args:
(int) channel: The index of the channel to get. 0 for front-left, 1 for front-right, etc.
(int) index: The index of sampledata to set which must be less than get_sample_count()
(int) value: The value to set the sample to which must be a valid signed integer with bit length get_sample_bits(). That is to say in the [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range).
Returns:
None
"""
if (value < -2**(self.get_sample_bits()-1)) or (value >= 2**(self.get_sample_bits()-1)):
raise ValueError("Audio value Out of Bound. Should be in [-2^(get_sample_bits()-1) ; 2^(get_sample_bits()-1)) range")
self.get_channel(channel).set_sample(index, int(value))
def get_sample_bits(self) -> int:
"""
Get the number of bits for the samples in this audio clip. Will be 8, 16, 24, or 32 bits.
Returns:
int: The number of bits for each sample
"""
return self.sample_bits
def get_sample_bytes(self) -> int:
"""
Get the number of bytes for the samples in this audio clip. Will be 1, 2, 3, or 4 bits.
Returns:
int: The number of bytes for each sample
"""
return self.sample_bits // 8
def _get_type_code(self) -> str:
if self.get_sample_bytes() == 1:
return "b"
elif self.get_sample_bytes() == 2:
return "h"
elif self.get_sample_bytes() == 3:
return "f"
elif self.get_sample_bytes() == 4:
return "l"
else:
raise ValueError("Wave file sample bytes of unsupported length %d, supported lengths are 8, 16, 24, and 32 bit" % (self.get_sample_bytes() * 8))
def get_data_structure_type(self) -> str:
"""
Get the data structure type
Returns:
str : data structure type
"""
return "Audio"
def get_data_structure_representation(self) -> dict:
""" Return a dictionary of the data in this audio file
Returns:
dict: The data of this audio file
"""
json_dict = {}
json_dict["encoding"] = "RAW"
json_dict["numChannels"] = self.num_channels
json_dict["sampleRate"] = self.get_sample_rate()
json_dict["bitsPerSample"] = self.get_sample_bits()
json_dict["numSamples"] = self.get_sample_count()
# Combine all channel data
framedata = []
for i in range(self.sample_count):
for c in range(self.num_channels):
# Go straight to channel sample for correct bit data
framedata.append(self._channels[c].get_sample(i))
if self.get_sample_bytes() == 4:
newarr = []
for val in framedata:
minmax32 = (2 ** 32 / 2.0) - 1
minmax16 = (2 ** 16 | |
def __init__(self, share_params=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.share_params = share_params
def __call__(self, results):
"""Call function.
For each dict in results, call the call function of `Resize` to resize
image and corresponding annotations.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains resized results,
'img_shape', 'pad_shape', 'scale_factor', 'keep_ratio' keys
are added into result dict.
"""
outs, scale = [], None
for i, _results in enumerate(results):
if self.share_params and i > 0:
_results['scale'] = scale
_results = super().__call__(_results)
if self.share_params and i == 0:
scale = _results['scale']
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqNormalize(Normalize):
"""Normalize images.
Please refer to `mmdet.datasets.pipelines.transforms.py:Normalize` for
detailed docstring.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, results):
"""Call function.
For each dict in results, call the call function of `Normalize` to
normalize image.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains normalized results,
'img_norm_cfg' key is added into result dict.
"""
outs = []
for _results in results:
_results = super().__call__(_results)
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqRandomFlip(RandomFlip):
"""Randomly flip for images.
Please refer to `mmdet.datasets.pipelines.transforms.py:RandomFlip` for
detailed docstring.
Args:
share_params (bool): If True, share the flip parameters for all images.
Defaults to True.
"""
def __init__(self, share_params, *args, **kwargs):
super().__init__(*args, **kwargs)
self.share_params = share_params
def __call__(self, results):
"""Call function.
For each dict in results, call `RandomFlip` to randomly flip image.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains flipped results, 'flip',
'flip_direction' keys are added into the dict.
"""
if self.share_params:
if isinstance(self.direction, list):
# None means non-flip
direction_list = self.direction + [None]
else:
# None means non-flip
direction_list = [self.direction, None]
if isinstance(self.flip_ratio, list):
non_flip_ratio = 1 - sum(self.flip_ratio)
flip_ratio_list = self.flip_ratio + [non_flip_ratio]
else:
non_flip_ratio = 1 - self.flip_ratio
# exclude non-flip
single_ratio = self.flip_ratio / (len(direction_list) - 1)
flip_ratio_list = [single_ratio] * (len(direction_list) -
1) + [non_flip_ratio]
cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
flip = cur_dir is not None
flip_direction = cur_dir
for _results in results:
_results['flip'] = flip
_results['flip_direction'] = flip_direction
outs = []
for _results in results:
_results = super().__call__(_results)
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqPad(Pad):
"""Pad images.
Please refer to `mmdet.datasets.pipelines.transforms.py:Pad` for detailed
docstring.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, results):
"""Call function.
For each dict in results, call the call function of `Pad` to pad image.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains padding results,
'pad_shape', 'pad_fixed_size' and 'pad_size_divisor' keys are
added into the dict.
"""
outs = []
for _results in results:
_results = super().__call__(_results)
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqRandomCrop(object):
"""Sequentially random crop the images & bboxes & masks.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
height and width.
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Default False.
share_params (bool, optional): Whether share the cropping parameters
for the images.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
`gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
`gt_masks_ignore`.
- If the crop does not contain any gt-bbox region and
`allow_negative_crop` is set to False, skip this image.
"""
def __init__(self,
crop_size,
allow_negative_crop=False,
share_params=False,
bbox_clip_border=False):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.allow_negative_crop = allow_negative_crop
self.share_params = share_params
self.bbox_clip_border = bbox_clip_border
# The key correspondence from bboxes to labels and masks.
self.bbox2label = {
'gt_bboxes': ['gt_labels', 'gt_instance_ids'],
'gt_bboxes_ignore': ['gt_labels_ignore', 'gt_instance_ids_ignore']
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def get_offsets(self, img):
"""Random generate the offsets for cropping."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
return offset_h, offset_w
def random_crop(self, results, offsets=None):
"""Call function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
offsets (tuple, optional): Pre-defined offsets for cropping.
Default to None.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
for key in results.get('img_fields', ['img']):
img = results[key]
if offsets is not None:
offset_h, offset_w = offsets
else:
offset_h, offset_w = self.get_offsets(img)
results['img_info']['crop_offsets'] = (offset_h, offset_w)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# self.allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not self.allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_keys = self.bbox2label.get(key)
for label_key in label_keys:
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
def __call__(self, results):
"""Call function to sequentially randomly crop images, bounding boxes,
masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
if self.share_params:
offsets = self.get_offsets(results[0]['img'])
else:
offsets = None
outs = []
for _results in results:
_results = self.random_crop(_results, offsets)
if _results is None:
return None
outs.append(_results)
return outs
@PIPELINES.register_module()
class SeqPhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
share_params=True,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.share_params = share_params
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def get_params(self):
"""Generate parameters."""
params = dict()
# delta
if np.random.randint(2):
params['delta'] = np.random.uniform(-self.brightness_delta,
self.brightness_delta)
else:
params['delta'] = None
# mode
mode = np.random.randint(2)
params['contrast_first'] = True if mode == 1 else 0
# alpha
if np.random.randint(2):
params['alpha'] = np.random.uniform(self.contrast_lower,
self.contrast_upper)
else:
params['alpha'] = None
# saturation
if np.random.randint(2):
params['saturation'] = np.random.uniform(self.saturation_lower,
self.saturation_upper)
else:
params['saturation'] = None
# hue
if np.random.randint(2):
params['hue'] = np.random.uniform(-self.hue_delta, self.hue_delta)
else:
params['hue'] = None
# swap
if np.random.randint(2):
params['permutation'] = np.random.permutation(3)
else:
params['permutation'] = None
return params
def photo_metric_distortion(self, results, params=None):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
params (dict, optional): Pre-defined parameters. Default to None.
Returns:
dict: Result dict with images distorted.
"""
if params is None:
params = self.get_params()
results['img_info']['color_jitter'] = params
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype np.float32,'\
' please set "to_float32=True" in "LoadImageFromFile" pipeline'
# random brightness
if params['delta'] is not None:
| |
<reponame>Novartis/EQP-QM
#!/usr/bin/env python
## Copyright 2015 Novartis Institutes for BioMedical Research
## Inc.Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an "AS IS"
## BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
################################################################################
##
## Script to compute the length of genes and transcripts based on GTF file
##
## Assumptions: GTF file entries of the same transcript are sorted by ascending
## coordinates for transcripts on the plus strand and descending
## coordinates for transcripts on the minus strand; GTF filr
## contains exonNumber entries for each exon.
##
## Issues:
##
## Genes and transcripts can map to different chromosomes and strands or
## to multiple locations on the same strand of a chromosome. We use
## the exonNumber of the GTF file to distinguish between different locations.
##
################################################################################
import sys
import argparse
import re
import os.path
import numpy
## execfile(os.path.join(os.environ["HOME"], "ngs", "pipelines", "exon-pipeline", "bin", "pipeline-setup-scripts", "util-scripts", "computeGeneLengths.py"))
################################################################################
##
## Define command line options
##
################################################################################
parser = argparse.ArgumentParser(description='Combine the count files from different')
parser.add_argument('-d', type=int, default=0, dest="debugLevel", metavar="INT", help='debug level [0, no debug output]')
parser.add_argument('-g', dest="gtfFilename", metavar="<GTF file>", help='GTF file with the exon coordinates')
parser.add_argument('-o', dest="geneLengthFile", default="", metavar="<Gene len. file>", help='File with the lengths of genes')
parser.add_argument('-w', dest="warningsOn", action='store_true', help='Flag to set warnings on')
if len(sys.argv) > 1:
args = parser.parse_args(sys.argv[1:])
else:
organism = "human"
organism_short="hs"
# organism = "mouse"
# organism_short="mm"
# gene_model="ensembl"
gene_model="refseq"
inputArgs = []
inputArgs.append("-g")
inputArgs.append(os.path.join(projectDir, "gtf-files", gene_model + "_rna_" + organism_short + ".gtf"))
inputArgs.append("-o")
inputArgs.append(os.path.join(projectDir, "gtf-files", gene_model + "_rna_" + organism_short + "-gene-lengths.txt"))
args = parser.parse_args(inputArgs)
debugLevel = args.debugLevel
gtfFilename = args.gtfFilename
geneLengthFile = args.geneLengthFile
warningsOn = args.warningsOn
################################################################################
##
## head
##
################################################################################
def head (x, n=10):
if len(x) == 1 or isinstance(x, str):
return x
if isinstance(x, dict):
L = {}
else:
L = []
for y in sorted(x)[0:n]:
if isinstance(x, dict):
L[y] = x[y]
else:
L.append(y)
return L
################################################################################
##
## Check for overlap of an interval with a list of intervals
##
################################################################################
def overlaps (interval1, interval2):
if (interval1[0] <= interval2[0] and interval2[0] <= interval1[1]) or (interval2[0] <= interval1[0] and interval1[0] <= interval2[1]):
return True
return False
################################################################################
##
## union
##
################################################################################
def union (interval1, interval2):
return [min (interval1[0], interval2[0]), max (interval1[1], interval2[1])]
################################################################################
##
## contains
##
################################################################################
def contains (interval1, interval2):
return (interval1[0] <= interval2[0]) and (interval1[1] >= interval2[1])
################################################################################
##
## Intersection
##
################################################################################
def intersection (interval1, interval2):
leftEndPoint = max (interval1[0], interval2[0])
rightEndPoint = min (interval1[1], interval2[1])
if leftEndPoint <= rightEndPoint:
return rightEndPoint - leftEndPoint + 1
return 0
################################################################################
##
## unique
##
################################################################################
def unique (seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun (item)
# in old Python versions:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
################################################################################
##
## getOverlapLength
##
################################################################################
def getOverlapLength (interval1, interval2):
leftEndPoint = max([min(interval1), min(interval2)])
rightEndPoint = min([max(interval1), max(interval2)])
length1 = max(interval1) - min(interval1) + 1
length2 = max(interval2) - min(interval2) + 1
maxDist = max (length1/2, length2/2)
## We consider intervals that are less than half their lengths
## apart also overlapping
return rightEndPoint - leftEndPoint + 1 + maxDist
################################################################################
##
## intervalListOverlap
##
################################################################################
def intervalListsOverlap (intervalList1, intervalList2):
interval1 = [min(min(intervalList1)), max(max(intervalList1))]
interval2 = [min(min(intervalList2)), max(max(intervalList2))]
return getOverlapLength (interval1, interval2) >= 0
################################################################################
##
## readGtfFile
##
## Use the exonNumber fields of each transcript to infer how many alignments a
## transcript has. This is captured by the variable alignmentNumber.
##
## Each of the three transcript output variables has an entry for each each transcript
## and each alignmentNumber of each transcript.
##
################################################################################
def readGtfFile (gtfFilename):
transcriptExonList = {}
transcriptChromosomeStrand = {}
geneTranscriptMap = {}
transcriptAlignmentExonNumbers = {}
lineNumChunkSize = 50000
try:
gtfFile = open(gtfFilename)
except IOError as e:
raise Exception(gtfFilename + " cannot be opened ... skipping\n" +
"Unix error code and message: " + str(e[0]) + " " + str(e[1]))
print("Opening file: " + gtfFilename, file=sys.stderr)
lineNum = 0
oldTranscriptId = ""
alignmentNumber = 1
for line in gtfFile:
line = line.rstrip ()
gtfEntries = line.split("\t")
chromosome = gtfEntries[0]
source = gtfEntries[1]
featureType = gtfEntries[2]
interval = [int(gtfEntries[3]), int(gtfEntries[4])]
strand = gtfEntries[6]
frame = gtfEntries[7]
score = 0
if gtfEntries[5] != ".":
score = float(gtfEntries[5])
exonId = "/".join(map(str, [chromosome] + interval + [strand]))
if featureType == "exon":
annotationEntries = gtfEntries[8].split(";")
geneId = ""
transcriptId = ""
exonNumber = -1
for annotationEntry in annotationEntries:
if annotationEntry != "":
annotationType, annotationValue, annotationEmpty = annotationEntry.strip().split('"')
if annotationEmpty != "":
raise Exception ("Unknown format of annotation entry: " + annotationEntry)
annotationType = annotationType.strip()
if annotationType == "gene_id":
geneId = annotationValue
elif annotationType == "transcript_id":
transcriptId = annotationValue
elif annotationType == "exon_number":
exonNumber = int(annotationValue)
if oldTranscriptId != transcriptId:
alignmentNumber = 1
oldTranscriptId = transcriptId
if exonNumber == -1:
raise Exception ("WARNING: no exon number for exon " + exonId + " of transcript " + transcriptId)
if not transcriptId in transcriptExonList:
transcriptExonList [transcriptId] = {}
transcriptChromosomeStrand [transcriptId] = {}
transcriptAlignmentExonNumbers[transcriptId] = {}
## Increment the alignment number if the current exon number already exists in the current alignment
## Note that GTF entries of other transcripts may occur between two alignments of the same transcript
while alignmentNumber in transcriptAlignmentExonNumbers[transcriptId] and \
exonNumber in transcriptAlignmentExonNumbers[transcriptId][alignmentNumber]:
alignmentNumber += 1
if not alignmentNumber in transcriptAlignmentExonNumbers[transcriptId]:
transcriptAlignmentExonNumbers[transcriptId][alignmentNumber] = [exonNumber]
else:
transcriptAlignmentExonNumbers[transcriptId][alignmentNumber].append(exonNumber)
if not alignmentNumber in transcriptChromosomeStrand[transcriptId]:
#print "transcriptChromosomeStrand of " + transcriptId + "." + str(alignmentNumber) + " set to " + chromosome + strand
transcriptChromosomeStrand[transcriptId][alignmentNumber] = chromosome + strand
elif transcriptChromosomeStrand[transcriptId][alignmentNumber] != chromosome + strand:
print("WARNING: Exon number " + str(exonNumber) + " of transcript " + transcriptId + " on chromosome/strand " + chromosome + strand + \
" is assigned to alignment " + str(alignmentNumber) + " on chromosome/strand " + transcriptChromosomeStrand[transcriptId][alignmentNumber], file=sys.stderr)
if alignmentNumber in transcriptExonList[transcriptId]:
transcriptExonList[transcriptId][alignmentNumber].append(interval)
else:
transcriptExonList[transcriptId][alignmentNumber] = [interval]
if exonNumber in transcriptExonList[transcriptId][alignmentNumber]:
print(file=sys.stderr ("Exon number: " + str(exonNumber) + " already stored for alignment " + str(alignmentNumber) + " of " + transcriptId))
sys.exit(1)
if geneId != "" and transcriptId != "":
if geneId in geneTranscriptMap:
if not transcriptId in geneTranscriptMap[geneId]:
geneTranscriptMap[geneId].append(transcriptId)
else:
geneTranscriptMap[geneId] = [transcriptId]
lineNum = lineNum + 1
if lineNum % lineNumChunkSize == 0:
sys.stdout.write(".")
sys.stdout.flush()
if lineNum > lineNumChunkSize:
sys.stdout.write("\n")
gtfFile.close()
return transcriptChromosomeStrand, transcriptExonList, geneTranscriptMap
################################################################################
##
## computeIntervalListLength
##
################################################################################
def computeIntervalListLength (intervalList):
intervalList.sort(key=lambda tup: tup[0])
oldInterval = [-1,-1]
combinedList = []
for interval in sorted(intervalList):
if oldInterval[1] < interval[0]:
combinedList.append(oldInterval)
oldInterval = interval
else:
oldInterval = [oldInterval[0], max(oldInterval[1], interval[1])]
combinedList.append(oldInterval)
length = 0
for interval in combinedList[1:]:
#print length
#print (interval[1] - interval[0] + 1)
length = length + interval[1] - interval[0] + 1
return length
################################################################################
##
## sumIntervals
##
################################################################################
def computeSumIntervalLengths (countObjects):
sumIntervalLengthMap = {}
for countObjectId in countObjects:
if not countObjectId in sumIntervalLengthMap:
sumIntervalLengthMap[countObjectId] = {}
countObject = countObjects[countObjectId]
for alignmentNumber in countObject:
sumIntervalLength = 0
for interval in countObjects[countObjectId][alignmentNumber]:
sumIntervalLength = sumIntervalLength + interval[1] - interval[0] + 1
sumIntervalLengthMap[countObjectId][alignmentNumber] = sumIntervalLength
return sumIntervalLengthMap
################################################################################
##
## findAlignmentMatches
##
## For a given query interval I on a chromosome/strand and a list of gene intervals
## look for gene intervals with an non-negative overlap on the same chromosome/strand.
##
################################################################################
def findAlignmentMatches (interval, chromosomeStrand, geneIntervals, geneChromosomeStrand, assignedGeneAlignmentNumbers):
alignmentNumbers = []
for alignmentNumber in [alignNum for alignNum in geneIntervals if not alignNum in assignedGeneAlignmentNumbers]:
if chromosomeStrand == geneChromosomeStrand[alignmentNumber]:
geneInterval = geneIntervals[alignmentNumber]
overlapLength = getOverlapLength (interval, geneInterval)
if overlapLength >= 0:
alignmentNumbers.append(alignmentNumber)
return alignmentNumbers
################################################################################
##
## assignTranscriptAlignments
##
## Assign the alignments of the transcripts to the alignments of the gene. This
## is done by find the best match (via findBestAlignmentMatch) of each transcript
## alignment.
##
################################################################################
def assignTranscriptAlignments (geneId, geneTranscriptList, transcriptIntervals, transcriptExonList, transcriptChromosomeStrand):
geneIntervals = {}
geneExonList = {}
geneChromosomeStrand = {}
geneAlignmentTranscripts = {}
maxGeneAlignmentNumber = 0
for transcriptId in geneTranscriptList:
assignedGeneAlignmentNumbers = []
if len(geneIntervals) == | |
"""
input_to_store = []
print(self._prompt)
list_index = 1
user_input = self._get_and_validate_user_input(prompt=f"{list_index}.")
while user_input:
input_to_store.append(user_input)
list_index += 1
user_input = self._get_and_validate_user_input(prompt=f"{list_index}.")
if not input_to_store:
input_to_store = self._default
return input_to_store
def _get_and_validate_user_input(self, prompt=None):
"""
Prompts user for input from stdin.
Args:
prompt (basestring, optional): The text to prompt the user with.
Default: None (prompts with self._prompt)
Returns:
(`basestring`) the data input by the user. None if user inputs nothing.
"""
if prompt is None:
prompt = self._prompt
if not re.match(r'.*\s$', prompt):
# If the prompt doesn't end with a whitespace character, add a space for padding
prompt += " "
while True:
user_input = input(prompt)
if user_input:
if MongoFederationConfig.validate_input(self._name, user_input):
return user_input
else:
print(f"Input did not pass validation. Try again or skip the value.")
else:
return None
class MongoTestSuite(TestSuite):
"""
Test suite for SAML responses for comparison against known patterns and comparison values.
Attributes:
VALID_NAME_ID_FORMATS (`set` of `basestring`): acceptable formats for Name ID for MongoDB Cloud
REQUIRED_CLAIMS (`set` of `basestring`): claim attribute names that are required in SAML response
"""
VALID_NAME_ID_FORMATS = {
'urn:oasis:names:tc:SAML:1.0:nameid-format:unspecified',
'urn:oasis:names:tc:SAML:1.0:nameid-format:emailAddress',
'urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress',
'urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified'
}
REQUIRED_CLAIMS = {
'firstName',
'lastName',
'email'
}
def __init__(self, saml, comparison_values=None):
"""
Create test suite with supplied SAML and comparison data.
Args:
saml (BaseSamlParser): parsed SAML data
comparison_values (MongoFederationConfig, optional): comparison values to
compare with data in SAML response. Default: None (no comparison
tests will be performed)
"""
super().__init__()
self.set_context({
'saml': saml,
'comparison_values': comparison_values or MongoFederationConfig()
})
self._tests = self._get_tests()
@staticmethod
def _get_tests():
"""
Builds test objects for testing SAML data for patterns specific to MongoDB Cloud
and against supplied comparison values.
Any future tests should be added to this function in an order which makes logical sense
with the tests around it.
Returns:
(`list` of `TestDefinition`) test objects
"""
tests = [
# Name ID and Name ID format tests
TestDefinition("exists_name_id", MongoTestSuite.verify_name_id_exists,
required_context=['saml']),
TestDefinition("regex_name_id", MongoTestSuite.verify_name_id_pattern,
dependencies=['exists_name_id'],
required_context=['saml']),
TestDefinition("exists_name_id_format", MongoTestSuite.verify_name_id_format_exists,
dependencies=['exists_name_id'],
required_context=['saml']),
TestDefinition("regex_name_id_format", MongoTestSuite.verify_name_id_format,
dependencies=['exists_name_id_format'],
required_context=['saml']),
# Claim attribute tests
TestDefinition("exists_all_required_attributes", MongoTestSuite.verify_all_required_attributes_exist,
required_context=['saml']),
TestDefinition("exists_first_name", MongoTestSuite.verify_first_name_exists,
required_context=['saml']),
TestDefinition("regex_first_name", MongoTestSuite.verify_first_name_pattern,
dependencies=['exists_first_name'],
required_context=['saml']),
TestDefinition("exists_last_name", MongoTestSuite.verify_last_name_exists,
required_context=['saml']),
TestDefinition("regex_last_name", MongoTestSuite.verify_last_name_pattern,
dependencies=['exists_last_name'],
required_context=['saml']),
TestDefinition("exists_email", MongoTestSuite.verify_email_exists,
required_context=['saml']),
TestDefinition("regex_email", MongoTestSuite.verify_email_pattern,
dependencies=['exists_email'],
required_context=['saml']),
TestDefinition("exists_member_of", MongoTestSuite.verify_member_of_exists,
required_context=['saml']),
TestDefinition("member_of_not_empty", MongoTestSuite.verify_member_of_not_empty,
dependencies=['exists_member_of'],
required_context=['saml']),
TestDefinition("regex_member_of", MongoTestSuite.verify_member_of_pattern,
dependencies=['member_of_not_empty'],
required_context=['saml']),
# Claim attribute comparison tests
TestDefinition("exists_comparison_first_name", MongoTestSuite.verify_first_name_comparison_exists,
dependencies=['regex_first_name'],
required_context=['comparison_values']),
TestDefinition("compare_first_name", MongoTestSuite.verify_first_name,
dependencies=['exists_comparison_first_name'],
required_context=['saml', 'comparison_values']),
TestDefinition("exists_comparison_last_name", MongoTestSuite.verify_last_name_comparison_exists,
dependencies=['regex_last_name'],
required_context=['comparison_values']),
TestDefinition("compare_last_name", MongoTestSuite.verify_last_name,
dependencies=['exists_comparison_last_name'],
required_context=['saml', 'comparison_values']),
TestDefinition("exists_comparison_email", MongoTestSuite.verify_email_comparison_exists,
dependencies=['regex_email'],
required_context=['comparison_values']),
TestDefinition("compare_email", MongoTestSuite.verify_email,
dependencies=['exists_comparison_email'],
required_context=['saml', 'comparison_values']),
TestDefinition("member_of_is_expected", MongoTestSuite.verify_member_of_is_expected,
dependencies=[('exists_member_of', TEST_FAIL)],
required_context=['comparison_values']),
TestDefinition("exists_comparison_member_of", MongoTestSuite.verify_member_of_comparison_exists,
dependencies=['regex_member_of'],
required_context=['comparison_values']),
TestDefinition("compare_member_of", MongoTestSuite.verify_member_of,
dependencies=['exists_comparison_member_of'],
required_context=['saml', 'comparison_values']),
# Federated domain tests
TestDefinition("exists_comparison_domain", MongoTestSuite.verify_domain_comparison_exists,
required_context=['comparison_values']),
TestDefinition("compare_domain_email", MongoTestSuite.verify_domain_in_email,
dependencies=['regex_email', 'exists_comparison_domain'],
required_context=['saml', 'comparison_values']),
TestDefinition("compare_domain_comparison_email", MongoTestSuite.verify_domain_in_comparison_email,
dependencies=['exists_comparison_email', 'exists_comparison_domain'],
required_context=['comparison_values']),
TestDefinition("compare_domain_name_id", MongoTestSuite.verify_domain_in_name_id,
dependencies=['regex_name_id', 'exists_comparison_domain'],
required_context=['saml', 'comparison_values']),
# Email and Name ID comparison tests
TestDefinition("compare_email_name_id", MongoTestSuite.verify_name_id,
dependencies=['regex_name_id', 'exists_comparison_email'],
required_context=['saml', 'comparison_values']),
TestDefinition("match_name_id_email_in_saml", MongoTestSuite.verify_name_id_and_email_are_the_same,
dependencies=['regex_email', 'regex_name_id'],
required_context=['saml']),
# Issuer URI tests
TestDefinition("exists_issuer", MongoTestSuite.verify_issuer_exists,
required_context=['saml']),
TestDefinition("regex_issuer", MongoTestSuite.verify_issuer_pattern,
dependencies=['exists_issuer'],
required_context=['saml']),
TestDefinition("exists_comparison_issuer", MongoTestSuite.verify_issuer_comparison_exists,
dependencies=['regex_issuer'],
required_context=['comparison_values']),
TestDefinition("match_issuer", MongoTestSuite.verify_issuer,
dependencies=['exists_comparison_issuer'],
required_context=['saml', 'comparison_values']),
# Audience URL tests
TestDefinition("exists_audience", MongoTestSuite.verify_audience_url_exists,
required_context=['saml']),
TestDefinition("regex_audience", MongoTestSuite.verify_audience_url_pattern,
dependencies=['exists_audience'],
required_context=['saml']),
TestDefinition("exists_comparison_audience", MongoTestSuite.verify_audience_comparison_exists,
dependencies=['regex_audience'],
required_context=['comparison_values']),
TestDefinition("match_audience", MongoTestSuite.verify_audience_url,
dependencies=['exists_comparison_audience'],
required_context=['saml', 'comparison_values']),
# ACS URL tests
TestDefinition("exists_acs", MongoTestSuite.verify_assertion_consumer_service_url_exists,
required_context=['saml']),
TestDefinition("regex_acs", MongoTestSuite.verify_assertion_consumer_service_url_pattern,
dependencies=['exists_acs'],
required_context=['saml']),
TestDefinition("exists_comparison_acs",
MongoTestSuite.verify_assertion_consumer_service_url_comparison_exists,
dependencies=['regex_acs'],
required_context=['comparison_values']),
TestDefinition("match_acs", MongoTestSuite.verify_assertion_consumer_service_url,
dependencies=['exists_comparison_acs'],
required_context=['saml', 'comparison_values']),
# Encryption algorithm tests
TestDefinition("exists_encryption", MongoTestSuite.verify_encryption_algorithm_exists,
required_context=['saml']),
TestDefinition("regex_encryption", MongoTestSuite.verify_encryption_algorithm_pattern,
dependencies=['exists_encryption'],
required_context=['saml']),
TestDefinition("exists_comparison_encryption",
MongoTestSuite.verify_encryption_algorithm_comparison_exists,
dependencies=['regex_encryption'],
required_context=['comparison_values']),
TestDefinition("match_encryption", MongoTestSuite.verify_encryption_algorithm,
dependencies=['exists_comparison_encryption'],
required_context=['saml', 'comparison_values']),
]
return tests
def get_list_of_mongo_tests(self):
"""
Get name of tests in order listed. Useful for compiling reports.
Returns:
(`list` of `basestring`) test titles in order
"""
return [test.title for test in self._tests]
@staticmethod
def _matches_regex(regex, value):
"""
Checks if a string matches a given regular expression
Args:
regex (basestring): regex string
value (basestring): string to check against regex
Returns:
(bool) True if `value` matches pattern `regex`, False otherwise
"""
matcher = re.compile(regex)
if matcher.fullmatch(value):
return True
return False
# Issuer URI tests
@staticmethod
def verify_issuer_exists(context):
"""
Checks if Issuer URI was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_issuer_uri() is not None
@staticmethod
def verify_issuer_comparison_exists(context):
"""
Checks if there is a comparison value for the Issuer URI.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_value('issuer') is not None
@staticmethod
def verify_issuer(context):
"""
Checks Issuer URI against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_issuer_uri() == context.get('comparison_values').get_value('issuer')
@staticmethod
def verify_issuer_pattern(context):
"""
Checks if Issuer URI matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(VALIDATION_REGEX_BY_ATTRIB['issuer'],
context.get('saml').get_issuer_uri())
# Audience URL tests
@staticmethod
def verify_audience_url_exists(context):
"""
Checks if Audience URL was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_audience_url() is not None
@staticmethod
def verify_audience_comparison_exists(context):
"""
Checks if there is a comparison value for the Audience URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_value('audience') is not None
@staticmethod
def verify_audience_url(context):
"""
Checks Audience URL against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_audience_url() == \
context.get('comparison_values').get_value('audience')
@staticmethod
def verify_audience_url_pattern(context):
"""
Checks if Audience URL matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(VALIDATION_REGEX_BY_ATTRIB['audience'],
context.get('saml').get_audience_url())
# Assertion Consumer Service URL tests
@staticmethod
def verify_assertion_consumer_service_url_exists(context):
"""
Checks if Assertion Consumer Service URL was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_assertion_consumer_service_url() is not None
@staticmethod
def verify_assertion_consumer_service_url_comparison_exists(context):
"""
Checks if there is a comparison value for the Assertion Consumer Service URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_value('acs') is not None
@staticmethod
def verify_assertion_consumer_service_url(context):
"""
Checks Assertion Consumer Service URL against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_assertion_consumer_service_url() == \
context.get('comparison_values').get_value('acs')
@staticmethod
def verify_assertion_consumer_service_url_pattern(context):
"""
Checks if Assertion Consumer Service URL matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(VALIDATION_REGEX_BY_ATTRIB['acs'],
context.get('saml').get_assertion_consumer_service_url())
# Encryption algorithm tests
@staticmethod
def verify_encryption_algorithm_exists(context):
"""
Checks if encryption algorithm was found in the SAML response.
Args:
context (dict): test context dictionary
Returns:
(bool) True if found, False otherwise
"""
return context.get('saml').get_encryption_algorithm() is not None
@staticmethod
def verify_encryption_algorithm_comparison_exists(context):
"""
Checks if there is a comparison value for the Assertion Consumer Service URL.
Args:
context (dict): test context dictionary
Returns:
(bool) True if a comparison value exists, False otherwise
"""
return context.get('comparison_values').get_value('encryption') is not None
@staticmethod
def verify_encryption_algorithm(context):
"""
Checks encryption algorithm against expected value
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
# expected encryption algorithm format expected to be "SHA1" or "SHA256"
return context.get('saml').get_encryption_algorithm() == \
context.get('comparison_values').get_value('encryption')
@staticmethod
def verify_encryption_algorithm_pattern(context):
"""
Checks if encryption algorithm matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(VALIDATION_REGEX_BY_ATTRIB['encryption'],
context.get('saml').get_encryption_algorithm())
# Name ID and format tests
@staticmethod
def verify_name_id(context):
"""
Checks Name ID against expected value (case-insensitive)
Args:
context (dict): test context dictionary
Returns:
(bool) True if they match, False otherwise
"""
return context.get('saml').get_subject_name_id().lower() == \
context.get('comparison_values').get_value('email').lower()
@staticmethod
def verify_name_id_exists(context):
"""
Checks if Name ID exists in the SAML response
Args:
context (dict): test context dictionary
Returns:
(bool) True if present, False otherwise
"""
return context.get('saml').get_subject_name_id() is not None
@staticmethod
def verify_name_id_pattern(context):
"""
Checks if Name ID matches the expected regular expression
Args:
context (dict): test context dictionary
Returns:
(bool) True if matches the regex, False otherwise
"""
return MongoTestSuite._matches_regex(EMAIL_REGEX_MATCH,
context.get('saml').get_subject_name_id())
@staticmethod
def verify_name_id_format_exists(context):
"""
Checks if Name ID Format was found in the SAML response.
Returns:
(bool) | |
simulation_pipe in simulation_pipes ]
collapsed_pipe = Pipe.join(simulation_source, [ForeachFilter(simulation_filters)])
written_simulations = []
try:
for simulation_id, learner_id, learner, pipe, simulation in zip(simulation_ids, learner_ids, learners, simulation_pipes, collapsed_pipe.read()):
batches = simulation.interaction_batches
interactions = list(chain.from_iterable(batches))
if simulation_id not in written_simulations:
written_simulations.append(simulation_id)
yield Transaction.simulation(simulation_id,
source = pipe.source_description,
filters = pipe.filter_descriptions,
interaction_count = len(interactions),
batch_count = len(batches),
context_size = int(median(self._context_sizes(interactions))),
action_count = int(median(self._action_counts(interactions))))
learner = deepcopy(learner)
learner.init()
if len(batches) > 0:
Ns, Rs = zip(*[ self._process_batch(batch, simulation.reward, learner) for batch in batches ])
yield Transaction.batch(simulation_id, learner_id, N=list(Ns), reward=list(Rs))
except KeyboardInterrupt:
raise
except Exception as e:
ExecutionContext.Logger.log_exception(e, "unhandled exception:")
if not self._ignore_raise: raise e
def _process_batch(self, batch, reward, learner) -> Tuple[int, float]:
keys = []
contexts = []
choices = []
actions = []
probs = []
for interaction in batch:
choice, prob = learner.choose(interaction.key, interaction.context, interaction.actions)
assert choice in range(len(interaction.actions)), "An invalid action was chosen by the learner"
keys .append(interaction.key)
contexts.append(interaction.context)
choices .append(choice)
probs .append(prob)
actions .append(interaction.actions[choice])
rewards = reward(list(zip(keys, choices)))
for (key,context,action,reward,prob) in zip(keys,contexts,actions,rewards, probs):
learner.learn(key,context,action,reward,prob)
return len(rewards), round(mean(rewards),5)
def _context_sizes(self, interactions) -> Iterable[int]:
if len(interactions) == 0:
yield 0
for context in [i.context for i in interactions]:
yield 0 if context is None else len(context) if isinstance(context,tuple) else 1
def _action_counts(self, interactions) -> Iterable[int]:
if len(interactions) == 0:
yield 0
for actions in [i.actions for i in interactions]:
yield len(actions)
class TransactionPromote(Filter):
CurrentVersion = 2
def filter(self, items: Iterable[Any]) -> Iterable[Any]:
items_iter = iter(items)
items_peek = next(items_iter)
items_iter = itertools.chain([items_peek], items_iter)
version = 0 if items_peek[0] != 'version' else items_peek[1]
if version == TransactionPromote.CurrentVersion:
raise StopPipe()
while version != TransactionPromote.CurrentVersion:
if version == 0:
promoted_items = [["version",1]]
for transaction in items:
if transaction[0] == "L":
index = transaction[1][1]['learner_id']
values = transaction[1][1]
del values['learner_id']
promoted_items.append([transaction[0], index, values])
if transaction[0] == "S":
index = transaction[1][1]['simulation_id']
values = transaction[1][1]
del values['simulation_id']
promoted_items.append([transaction[0], index, values])
if transaction[0] == "B":
key_columns = ['learner_id', 'simulation_id', 'seed', 'batch_index']
index = [ transaction[1][1][k] for k in key_columns ]
values = transaction[1][1]
for key_column in key_columns: del values[key_column]
if 'reward' in values:
values['reward'] = values['reward'].estimate
if 'mean_reward' in values:
values['reward'] = values['mean_reward'].estimate
del values['mean_reward']
values['reward'] = round(values['reward', 5])
promoted_items.append([transaction[0], index, values])
items = promoted_items
version = 1
if version == 1:
n_seeds : Optional[int] = None
S_transactions: Dict[int, Any] = {}
S_seeds : Dict[int, List[Optional[int]]] = collections.defaultdict(list)
B_rows: Dict[Tuple[int,int], Dict[str, List[float]] ] = {}
B_cnts: Dict[int, int ] = {}
promoted_items = [["version",2]]
for transaction in items:
if transaction[0] == "benchmark":
n_seeds = transaction[1].get('n_seeds', None)
del transaction[1]['n_seeds']
del transaction[1]['batcher']
del transaction[1]['ignore_first']
promoted_items.append(transaction)
if transaction[0] == "L":
promoted_items.append(transaction)
if transaction[0] == "S":
S_transactions[transaction[1]] == transaction
if transaction[0] == "B":
S_id = transaction[1][1]
seed = transaction[1][2]
L_id = transaction[1][0]
B_id = transaction[1][3]
if n_seeds is None:
raise StopPipe("We are unable to promote logs from version 1 to version 2")
if seed not in S_seeds[S_id]:
S_seeds[S_id].append(seed)
new_S_id = n_seeds * S_id + S_seeds[S_id].index(seed)
new_dict = S_transactions[S_id][2].clone()
new_dict["source"] = str(S_id)
new_dict["filters"] = f'[{{"Shuffle":{seed}}}]'
B_cnts[S_id] = new_dict['batch_count']
promoted_items.append(["S", new_S_id, new_dict])
if B_id == 0: B_rows[(S_id, L_id)] = {"N":[], "reward":[]}
B_rows[(S_id, L_id)]["N" ].append(transaction[2]["N"])
B_rows[(S_id, L_id)]["reward"].append(transaction[2]["reward"])
if len(B_rows[(S_id, L_id)]["N"]) == B_cnts[S_id]:
promoted_items.append(["B", [S_id, L_id], B_rows[(S_id, L_id)]])
del B_rows[(S_id, L_id)]
items = promoted_items
version = 2
return items
class TransactionIsNew(Filter):
def __init__(self, existing: Result):
self._existing = existing
def filter(self, items: Iterable[Any]) -> Iterable[Any]:
for item in items:
tipe = item[0]
if tipe == "version" and self._existing.version is not None:
continue
if tipe == "benchmark" and len(self._existing.benchmark) != 0:
continue
if tipe == "B" and item[1] in self._existing.batches:
continue
if tipe == "S" and item[1] in self._existing.simulations:
continue
if tipe == "L" and item[1] in self._existing.learners:
continue
yield item
class TransactionSink(Sink):
def __init__(self, transaction_log: Optional[str], restored: Result) -> None:
self._sink = Pipe.join([JsonEncode()], DiskSink(transaction_log)) if transaction_log else MemorySink()
self._sink = Pipe.join([TransactionIsNew(restored)], self._sink)
def write(self, items: Sequence[Any]) -> None:
self._sink.write(items)
@property
def result(self) -> Result:
if isinstance(self._sink, Pipe.FiltersSink):
final_sink = self._sink.final_sink()
else:
final_sink = self._sink
if isinstance(final_sink, MemorySink):
return Result.from_transactions(cast(Iterable[Any], final_sink.items))
if isinstance(final_sink, DiskSink):
return Result.from_transaction_log(final_sink.filename)
raise Exception("Transactions were written to an unrecognized sink.")
class BenchmarkLearner:
@property
def family(self) -> str:
try:
return self._learner.family
except AttributeError:
return self._learner.__class__.__name__
@property
def params(self) -> Dict[str, Any]:
try:
return self._learner.params
except AttributeError:
return {}
@property
def full_name(self) -> str:
if len(self.params) > 0:
return f"{self.family}({','.join(f'{k}={v}' for k,v in self.params.items())})"
else:
return self.family
def __init__(self, learner: Learner[Context,Action], seed: Optional[int]) -> None:
self._learner = learner
self._random = CobaRandom(seed)
def init(self) -> None:
try:
self._learner.init()
except AttributeError:
pass
def choose(self, key: Key, context: Context, actions: Sequence[Action]) -> Tuple[Choice, float]:
p = self._learner.predict(key, context, actions)
c = self._random.choice(list(range(len(actions))), p)
return c, p[c]
def learn(self, key: Key, context: Context, action: Action, reward: Reward, probability: float) -> None:
self._learner.learn(key, context, action, reward, probability)
class BenchmarkSimulation(Source[Simulation[_C,_A]]):
def __init__(self,
source : Source[Simulation],
filters : Sequence[Filter[Simulation,Union[Simulation, BatchedSimulation]]],
source_description :str = "",
filter_descriptions:Sequence[str] = []) -> None:
if isinstance(source, BenchmarkSimulation):
self._source = source._source #type: ignore
self._filter = Pipe.FiltersFilter(list(source._filter._filters)+list(filters)) #type: ignore
self.source_description = source.source_description or source_description #type: ignore
self.filter_descriptions = list(source.filter_descriptions) + list(filter_descriptions) #type: ignore
else:
self._source = source
self._filter = Pipe.FiltersFilter(filters)
self.source_description = source_description
self.filter_descriptions = list(filter_descriptions)
def read(self) -> Simulation[_C,_A]:
return self._filter.filter(self._source.read())
class Benchmark(Generic[_C,_A]):
"""An on-policy Benchmark using samples drawn from simulations to estimate performance statistics."""
@staticmethod
def from_file(filename:str) -> 'Benchmark[Context,Action]':
"""Instantiate a Benchmark from a config file."""
suffix = Path(filename).suffix
if suffix == ".json":
return Benchmark.from_json(Path(filename).read_text())
raise Exception(f"The provided file type ('{suffix}') is not a valid format for benchmark configuration")
@staticmethod
def from_json(json_val:Union[str, Dict[str,Any]]) -> 'Benchmark[Context,Action]':
"""Create a UniversalBenchmark from json text or object.
Args:
json_val: Either a json string or the decoded json object.
Returns:
The UniversalBenchmark representation of the given JSON string or object.
"""
if isinstance(json_val, str):
config = cast(Dict[str,Any], json.loads(json_val))
else:
config = json_val
config = ExecutionContext.Templating.parse(config)
if not isinstance(config["simulations"], collections.Sequence):
config["simulations"] = [ config["simulations"] ]
batch_config = config.get('batches', {'size':1})
kwargs: Dict[str, Any] = {}
kwargs['ignore_raise'] = config.get("ignore_raise", True)
kwargs['seeds'] = config.get('shuffle', [None])
if 'min' in batch_config:
kwargs['take'] = batch_config['min']
if 'max' in batch_config:
kwargs['take'] = batch_config['min']
for batch_rule in ['size', 'count', 'sizes']:
if batch_rule in batch_config:
kwargs[f"batch_{batch_rule}"] = batch_config[batch_rule]
simulations: List[BenchmarkSimulation] = []
for sim_config in config["simulations"]:
if sim_config["type"] != "classification":
raise Exception("We were unable to recognize the provided simulation type.")
if sim_config["from"]["format"] != "openml":
raise Exception("We were unable to recognize the provided data format.")
source = OpenmlSimulation(sim_config["from"]["id"], sim_config["from"].get("md5_checksum", None))
source_description = f'{{"OpenmlSimulation":{sim_config["from"]["id"]}}}'
filters: List[Filter[Simulation,Simulation]] = []
filter_descriptions = []
if "pca" in sim_config and sim_config["pca"] == True:
filters.append(PCA())
filter_descriptions.append("PCA")
if "sort" in sim_config:
filters.append(Sort(sim_config["sort"]))
filter_descriptions.append(f'{{"Sort":{sim_config["sort"]}}}')
simulations.append(BenchmarkSimulation(source, filters, source_description, filter_descriptions))
return Benchmark(simulations, **kwargs)
@overload
def __init__(self,
simulations : Sequence[Source[Simulation[_C,_A]]],
*,
batch_size : int = 1,
take : int = None,
seeds : Sequence[Optional[int]] = [None],
ignore_raise : bool = True,
processes : int = None,
maxtasksperchild: int = None) -> None: ...
@overload
def __init__(self,
simulations : Sequence[Source[Simulation[_C,_A]]],
*,
batch_count : int,
take : int = None,
seeds : Sequence[Optional[int]] = [None],
ignore_raise : bool = True,
processes : int = None,
maxtasksperchild: int = None) -> None: ...
@overload
def __init__(self,
simulations : Sequence[Source[Simulation[_C,_A]]],
*,
batch_sizes : Sequence[int],
seeds : Sequence[Optional[int]] = [None],
ignore_raise : bool = True,
processes : int = None,
maxtasksperchild: int = None) -> None: ...
def __init__(self,*args, **kwargs) -> None:
"""Instantiate a UniversalBenchmark.
Args:
simulations: The sequence of simulations to benchmark against.
batcher: How each simulation is broken into evaluation batches.
ignore_raise: Should exceptions be raised or logged during evaluation.
shuffle_seeds: A sequence of seeds for interaction shuffling. None means no shuffle.
processes: The number of process to spawn during evalution (overrides coba config).
maxtasksperchild: The number of tasks each process will perform before a refresh.
See the overloads for more information.
"""
simulations = cast(Sequence[Source[Simulation[_C,_A]]], args[0])
shufflers = [ Shuffle(seed) for seed in kwargs.get('seeds', [None]) ]
taker = Take(kwargs.get('take', None))
if 'batch_count' in kwargs:
batcher = Batch(count=kwargs['batch_count'])
elif 'batch_sizes' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.