input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
= plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
# save a list of min. and max. X, Y, Z coordinates
x_min, y_min, z_min = [1e10] * 3
x_max, y_max, z_max = [0] * 3
# plot the surface:
for (surf3d_idx, s) in enumerate(self.models[model_type].surface_3d):
verts = s['verts']
centroid = s['centroid']
patch_out_of_z_limits = False
# find min and max data points
for v in verts:
if np.max(v[:, 0]) > x_max:
x_max = np.max(v[:, 0])
if np.min(v[:, 0]) < x_min:
x_min = np.min(v[:, 0])
if np.max(v[:, 1]) > y_max:
y_max = np.max(v[:, 1])
if np.min(v[:, 1]) < y_min:
y_min = np.min(v[:, 1])
if np.max(v[:, 2]) > z_max:
z_max = np.max(v[:, 2])
if np.min(v[:, 2]) < z_min:
z_min = np.min(v[:, 2])
if x_max_limit is not None and np.min(v[:, 0]) > x_max_limit:
patch_out_of_z_limits = True
if x_min_limit is not None and np.max(v[:, 0]) < x_min_limit:
patch_out_of_z_limits = True
if y_max_limit is not None and np.min(v[:, 1]) > y_max_limit:
patch_out_of_z_limits = True
if y_min_limit is not None and np.max(v[:, 1]) < y_min_limit:
patch_out_of_z_limits = True
if z_max_limit is not None and np.min(v[:, 2]) > z_max_limit:
patch_out_of_z_limits = True
if z_min_limit is not None and np.max(v[:, 2]) < z_min_limit:
patch_out_of_z_limits = True
if patch_out_of_z_limits:
continue
surf = Poly3DCollection(verts)
# colour the patches which are endpoints
if show_endpoints:
endpoints_abdomen = np.array(self.models['trunk'].endpoints_abdomen)
if Coordinate(centroid) in self.models['trunk'].endpoints_abdomen:
surf.set_facecolor(endpoint_color)
else:
surf.set_facecolor(default_face_color)
# colour the endpoint-patches depending on their cluster
elif show_clusters:
endpoints_abdomen = np.array(self.models['trunk'].endpoints_abdomen)
if centroid.tolist() in endpoints_abdomen.tolist():
center = Coordinate(centroid)
index_center = self.models['trunk'].endpoints_abdomen.index(center)
cluster = str(self.models['trunk'].endpoints_abdomen_clustering[index_center])
surf.set_facecolor(cluster_colour[cluster])
else:
surf.set_facecolor(default_face_color)
else:
surf.set_facecolor(default_face_color)
if colored_endpoint_indices is not None:
found_one = False
for (color, hatch, endpoints) in colored_endpoints:
if Coordinate(centroid) in endpoints:
hatch_temp = surf.get_hatch()
hatch_temp = hatch if hatch_temp is None else hatch_temp + hatch
surf.set_facecolor(color)
surf.set_alpha(1)
surf.set_hatch(hatch_temp)
print('set color to %s at endpoint %s' % (str(color), str(Coordinate(centroid))))
found_one = True
if not found_one:
surf.set_facecolor(default_face_color)
if colored_surf3d_indices is not None:
found = 0
color_list = []
for (color, hatch, endpoints) in colored_surf3d:
if Coordinate(centroid) in endpoints:
hatch_temp = surf.get_hatch()
hatch_temp = hatch if hatch_temp is None else hatch_temp + hatch
surf.set_facecolor(color)
surf.set_alpha(1)
surf.set_hatch(hatch_temp)
print('set color to %s at endpoint %s' % (str(color), str(Coordinate(centroid))))
found += 1
color_list.append(color)
if found == 2:
verts = [verts[0][((1, 0, 3),)], verts[0][1:4]]
surf = Poly3DCollection(verts)
surf.set_facecolor(color_list)
elif found > 2:
logging.warning("More than 2 colors per patch are currently not supported.")
if show_surf3d_indices:
c = surf.get_facecolor()
c[0][3] = 0.5
surf.set_facecolor(c)
ax.text(centroid[0], centroid[1], centroid[2], "%d" % surf3d_idx, None, horizontalalignment='center',
fontsize='x-small')
surf.set_edgecolor('k')
ax.add_collection3d(surf)
ax.plot(np.array([centroid[0]]), np.array([centroid[1]]), np.array([centroid[2]]), '.',
color=[1, 1, 1, 0.5])
ax.set_zlim(self.models[model_type].mask['z'].start * self.scaling.z,
max(self.models[model_type].mask['z']) * self.scaling.z)
ax.set_xlim(0, max(self.models['trunk'].mask['x']) * self.scaling.x)
ax.set_ylim(0, max(self.models['trunk'].mask['y']) * self.scaling.y)
set_axes_equal(ax)
ax.elev = 19
ax.azim = 10
ax.dist = 6
ax.set_axis_off()
return fig, ax
def create_3d_model(self, model_type: str, patch_size: Tuple[float, float], patches: Optional[List] = None,
coordinates: Optional[np.ndarray] = None):
"""
Create a 3D discretized surface model of the given model_type. This should be a model that contains only one
main structure, e.g. a trunk model, the head or a single(!) leg.
:param model_type: name of the model type to use, e.g. 'trunk'
:param patch_size: (width, height) of the desired discretized patches in mm
:param patches:
:param coordinates:
:return:
"""
if patches is None:
coordinates, patches = self.determine_patches(model_type, patch_size, coordinates)
discrete_surface = []
logging.info("\nCreating the 3D surface from the patches..")
if self.show_progress_bar:
ProgBar.progress_bar.max_value = len(patches)
ProgBar.progress_bar.start()
# iterate over all patches and determine the centroid and the vertices
for (i, p) in enumerate(patches):
if self.show_progress_bar:
ProgBar.progress_bar.update(i + 1)
# all coordinates of that patch
c = coordinates[tuple(p), :]
# determine upper and lower bound of patch
z_min = np.min(c[:, 2])
c_min = c[c[:, 2] == z_min, :]
z_max = np.max(c[:, 2])
c_max = c[c[:, 2] == z_max, :]
# calculate distance between all minimum and maximum points to determine the upper and lower corners of
# the patch
d_min = ssdist.cdist(c_min, c_min, 'euclidean')
min_indices = np.unravel_index(np.argmax(d_min), d_min.shape)
lower_corners = c_min[min_indices, :]
d_max = ssdist.cdist(c_max, c_max, 'euclidean')
max_indices = np.unravel_index(np.argmax(d_max), d_max.shape)
upper_corners = c_max[max_indices, :]
# all patches should touch each other, therefore, add half a scaling.z in each direction
upper_corners += np.tile([0, 0, self.scaling.z/2], (2, 1))
lower_corners -= np.tile([0, 0, self.scaling.z / 2], (2, 1))
# determine the correct ordering for a closed patch of the upper and lower corners
d_corner = ssdist.cdist(upper_corners, lower_corners, 'euclidean')
# if the first coordinates are closer together we need to swap the lower corners to make a nearly
# rectangular patch (otherwise the lines will cross)
if d_corner[0, 0] < d_corner[1, 0]:
verts = [np.vstack((upper_corners, lower_corners[::-1]))]
else:
verts = [np.vstack((upper_corners, lower_corners))]
# approximately determine the centroid:
# first the coordinate half way between the left and right upper corner:
max_dist = np.max(d_min)
d_lower_half = ssdist.cdist(lower_corners, c_min, 'euclidean')
# get coordinate that is closest to halfway between the left and right corner
index_lower_half = np.unravel_index(np.argmin(abs(d_lower_half - max_dist / 2)), d_lower_half.shape)
lower_half = c_min[index_lower_half[1], :]
max_dist = np.max(d_max)
d_upper_half = ssdist.cdist(upper_corners, c_max, 'euclidean')
# get coordinate that is closest to halfway between the left and right corner
index_upper_half = np.unravel_index(np.argmin(abs(d_upper_half - max_dist / 2)), d_upper_half.shape)
upper_half = c_max[index_upper_half[1], :]
# centroid is approximately on the half way between the two points upper_half and lower_half
direction = (upper_half - lower_half)
centroid_approx = lower_half + 0.5*direction
centroid_approx.shape = (-1, 3)
# determine the distance to all points in p
d_centroid = ssdist.cdist(centroid_approx, c, 'euclidean')
# choose the one with the minimum distance as centroid
centroid_approx_min_index = np.argmin(d_centroid)
centroid = c[centroid_approx_min_index]
discrete_surface.append({'verts': verts, 'centroid': centroid})
if self.show_progress_bar:
ProgBar.progress_bar.finish()
return discrete_surface
def determine_patches(self, model_type: str,
patch_size: Tuple[float, float], coordinates: Optional[np.ndarray] = None)\
-> Tuple[np.ndarray, List]:
"""
Map all the coordinates to unique patches of the given size in mm. It is assumed that the coordinates
are already sorted by nearest neighbor for each slice. Meaning that coordinates start in the lowest slice, then
follow the trajectory around the body and when the first slice is finished the next coordinate is the
first in the second slice.
The algorithm iterates over these coordinates. For each coordinate the already existing patch with the closest
distance is searched for. The beginning of a patch is always determined by the every_nth_row count, which
depends on the height of the patch. To speed up this process all already finished patches are stored
separately in Patches. Whereas, the currently active patches are stored in ActivePatches. The coordinates
from the current slice are stored in ActiveSlice and are appended to ActivePatches every time a new slice
starts (this is done for the get_nearest_patch_index() function to work properly).
:param model_type: type of the model to use
:param patch_size: Tuple(width, height) of the patch dimensions
:param coordinates: [Optional] an array containing all the outside coordinates
:return:
"""
# These two functions are just needed to find the matchin patches
def get_nearest_patch_index(patches: List, coordinate: np.ndarray) -> int:
"""
Determines for the given coordinate the closest patch. An index to the patches list is returned
:param patches: List of indices to the coordinates array of the outer function
:param coordinate: the coordinate the is currently investigated
:return:
"""
global_min_dist = float('inf')
k_min = 0
# the smallest possible distance from one voxel to the next that forces an abortion of the minimum search
largest_distance_to_next_voxel = self.scaling.z
for (k, p) in enumerate(patches): # each patch has a list of indices to the coordinates array
t = tuple(p) # tuple of all indices, for fancy indexing with numpy
# distance of the given coordinate to all existing ones in that patch:
coordinate.shape = (-1, 3)
dist = ssdist.cdist(coordinate, coordinates[t, :], 'euclidean')
# check for the minimum
dist_min = np.min(dist)
if dist_min < global_min_dist:
global_min_dist = dist_min
k_min = k
# the global minimum cannot be smaller the this value
if global_min_dist < largest_distance_to_next_voxel:
break
return k_min
def add_active_slice_to_active_patches():
"""
Append the coordinates of the last ActiveSlice to the ActivePatches.
:return:
| |
<reponame>joakimzhang/python-electron<gh_stars>0
#!/usr/bin/env python
'''
map display module
<NAME>
June 2012
'''
import sys, os, math
import functools
import time
from MAVProxy.modules.mavproxy_map import mp_elevation
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_settings
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib.mp_menu import *
from pymavlink import mavutil
class MapModule(mp_module.MPModule):
def __init__(self, mpstate):
super(MapModule, self).__init__(mpstate, "map", "map display", public = True)
self.lat = None
self.lon = None
self.heading = 0
self.wp_change_time = 0
self.fence_change_time = 0
self.rally_change_time = 0
self.have_simstate = False
self.have_vehicle = {}
self.move_wp = -1
self.moving_wp = None
self.moving_fencepoint = None
self.moving_rally = None
self.mission_list = None
self.icon_counter = 0
self.click_position = None
self.click_time = 0
self.draw_line = None
self.draw_callback = None
self.have_global_position = False
self.vehicle_type_name = 'plane'
self.ElevationMap = mp_elevation.ElevationModel()
self.last_unload_check_time = time.time()
self.unload_check_interval = 0.1 # seconds
self.map_settings = mp_settings.MPSettings(
[ ('showgpspos', int, 0),
('showgps2pos', int, 1),
('showsimpos', int, 0),
('showahrs2pos', int, 0),
('showahrs3pos', int, 0),
('brightness', float, 1),
('rallycircle', bool, False),
('loitercircle',bool, False)])
service='OviHybrid'
if 'MAP_SERVICE' in os.environ:
service = os.environ['MAP_SERVICE']
import platform
from MAVProxy.modules.mavproxy_map import mp_slipmap
mpstate.map = mp_slipmap.MPSlipMap(service=service, elevation=True, title='Map')
mpstate.map_functions = { 'draw_lines' : self.draw_lines }
mpstate.map.add_callback(functools.partial(self.map_callback))
self.add_command('map', self.cmd_map, "map control", ['icon',
'set (MAPSETTING)'])
self.add_completion_function('(MAPSETTING)', self.map_settings.completion)
self.default_popup = MPMenuSubMenu('Popup', items=[])
self.add_menu(MPMenuItem('Fly To', 'Fly To', '# guided ',
handler=MPMenuCallTextDialog(title='Altitude (m)', default=100)))
self.add_menu(MPMenuItem('Set Home', 'Set Home', '# map sethome '))
self.add_menu(MPMenuItem('Terrain Check', 'Terrain Check', '# terrain check'))
self.add_menu(MPMenuItem('Show Position', 'Show Position', 'showPosition'))
def add_menu(self, menu):
'''add to the default popup menu'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.default_popup.add(menu)
self.mpstate.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True))
def show_position(self):
'''show map position click information'''
pos = self.click_position
dms = (mp_util.degrees_to_dms(pos[0]), mp_util.degrees_to_dms(pos[1]))
msg = "Coordinates in WGS84\n"
msg += "Decimal: %.6f %.6f\n" % (pos[0], pos[1])
msg += "DMS: %s %s\n" % (dms[0], dms[1])
msg += "Grid: %s\n" % mp_util.latlon_to_grid(pos)
if self.logdir:
logf = open(os.path.join(self.logdir, "positions.txt"), "a")
logf.write("Position: %.6f %.6f at %s\n" % (pos[0], pos[1], time.ctime()))
logf.close()
posbox = MPMenuChildMessageDialog('Position', msg, font_size=32)
posbox.show()
def cmd_map(self, args):
'''map commands'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if args[0] == "icon":
if len(args) < 3:
print("Usage: map icon <lat> <lon> <icon>")
else:
lat = args[1]
lon = args[2]
flag = 'flag.png'
if len(args) > 3:
flag = args[3] + '.png'
icon = self.mpstate.map.icon(flag)
self.mpstate.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag),self.icon_counter),
(float(lat),float(lon)),
icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1
elif args[0] == "set":
self.map_settings.command(args[1:])
self.mpstate.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness))
elif args[0] == "sethome":
self.cmd_set_home(args)
else:
print("usage: map <icon|set>")
def display_waypoints(self):
'''display the waypoints'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.mission_list = self.module('wp').wploader.view_list()
polygons = self.module('wp').wploader.polygon_list()
self.mpstate.map.add_object(mp_slipmap.SlipClearLayer('Mission'))
for i in range(len(polygons)):
p = polygons[i]
if len(p) > 1:
popup = MPMenuSubMenu('Popup',
items=[MPMenuItem('Set', returnkey='popupMissionSet'),
MPMenuItem('WP Remove', returnkey='popupMissionRemove'),
MPMenuItem('WP Move', returnkey='popupMissionMove')])
self.mpstate.map.add_object(mp_slipmap.SlipPolygon('mission %u' % i, p,
layer='Mission', linewidth=2, colour=(255,255,255),
popup_menu=popup))
loiter_rad = self.get_mav_param('WP_LOITER_RAD')
labeled_wps = {}
self.mpstate.map.add_object(mp_slipmap.SlipClearLayer('LoiterCircles'))
for i in range(len(self.mission_list)):
next_list = self.mission_list[i]
for j in range(len(next_list)):
#label already printed for this wp?
if (next_list[j] not in labeled_wps):
self.mpstate.map.add_object(mp_slipmap.SlipLabel(
'miss_cmd %u/%u' % (i,j), polygons[i][j], str(next_list[j]), 'Mission', colour=(0,255,255)))
if (self.map_settings.loitercircle and
self.module('wp').wploader.wp_is_loiter(next_list[j])):
self.mpstate.map.add_object(mp_slipmap.SlipCircle('Loiter Circle %u' % (next_list[j] + 1), 'LoiterCircles', polygons[i][j], abs(loiter_rad), (255, 255, 255), 2))
labeled_wps[next_list[j]] = (i,j)
def display_fence(self):
'''display the fence'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.fence_change_time = self.module('fence').fenceloader.last_change
points = self.module('fence').fenceloader.polygon()
self.mpstate.map.add_object(mp_slipmap.SlipClearLayer('Fence'))
if len(points) > 1:
popup = MPMenuSubMenu('Popup',
items=[MPMenuItem('FencePoint Remove', returnkey='popupFenceRemove'),
MPMenuItem('FencePoint Move', returnkey='popupFenceMove')])
self.mpstate.map.add_object(mp_slipmap.SlipPolygon('Fence', points, layer=1,
linewidth=2, colour=(0,255,0), popup_menu=popup))
def closest_waypoint(self, latlon):
'''find closest waypoint to a position'''
(lat, lon) = latlon
best_distance = -1
closest = -1
for i in range(self.module('wp').wploader.count()):
w = self.module('wp').wploader.wp(i)
distance = mp_util.gps_distance(lat, lon, w.x, w.y)
if best_distance == -1 or distance < best_distance:
best_distance = distance
closest = i
if best_distance < 20:
return closest
else:
return -1
def remove_rally(self, key):
'''remove a rally point'''
a = key.split(' ')
if a[0] != 'Rally' or len(a) != 2:
print("Bad rally object %s" % key)
return
i = int(a[1])
self.mpstate.functions.process_stdin('rally remove %u' % i)
def move_rally(self, key):
'''move a rally point'''
a = key.split(' ')
if a[0] != 'Rally' or len(a) != 2:
print("Bad rally object %s" % key)
return
i = int(a[1])
self.moving_rally = i
def selection_index_to_idx(self, key, selection_index):
'''return a mission idx from a selection_index'''
a = key.split(' ')
if a[0] != 'mission' or len(a) != 2:
print("Bad mission object %s" % key)
return None
midx = int(a[1])
if midx < 0 or midx >= len(self.mission_list):
print("Bad mission index %s" % key)
return None
mlist = self.mission_list[midx]
if selection_index < 0 or selection_index >= len(mlist):
print("Bad mission polygon %s" % selection_index)
return None
idx = mlist[selection_index]
return idx
def move_mission(self, key, selection_index):
'''move a mission point'''
idx = self.selection_index_to_idx(key, selection_index)
self.moving_wp = idx
print("Moving wp %u" % idx)
def remove_mission(self, key, selection_index):
'''remove a mission point'''
idx = self.selection_index_to_idx(key, selection_index)
self.mpstate.functions.process_stdin('wp remove %u' % idx)
def remove_fencepoint(self, key, selection_index):
'''remove a fence point'''
self.mpstate.functions.process_stdin('fence remove %u' % (selection_index+1))
def move_fencepoint(self, key, selection_index):
'''move a fence point'''
self.moving_fencepoint = selection_index
print("Moving fence point %u" % selection_index)
def set_mission(self, key, selection_index):
'''set a mission point'''
idx = self.selection_index_to_idx(key, selection_index)
self.mpstate.functions.process_stdin('wp set %u' % idx)
def handle_menu_event(self, obj):
'''handle a popup menu event from the map'''
menuitem = obj.menuitem
if menuitem.returnkey.startswith('# '):
cmd = menuitem.returnkey[2:]
if menuitem.handler is not None:
if menuitem.handler_result is None:
return
cmd += menuitem.handler_result
self.mpstate.functions.process_stdin(cmd)
elif menuitem.returnkey == 'popupRallyRemove':
self.remove_rally(obj.selected[0].objkey)
elif menuitem.returnkey == 'popupRallyMove':
self.move_rally(obj.selected[0].objkey)
elif menuitem.returnkey == 'popupMissionSet':
self.set_mission(obj.selected[0].objkey, obj.selected[0].extra_info)
elif menuitem.returnkey == 'popupMissionRemove':
self.remove_mission(obj.selected[0].objkey, obj.selected[0].extra_info)
elif menuitem.returnkey == 'popupMissionMove':
self.move_mission(obj.selected[0].objkey, obj.selected[0].extra_info)
elif menuitem.returnkey == 'popupFenceRemove':
self.remove_fencepoint(obj.selected[0].objkey, obj.selected[0].extra_info)
elif menuitem.returnkey == 'popupFenceMove':
self.move_fencepoint(obj.selected[0].objkey, obj.selected[0].extra_info)
elif menuitem.returnkey == 'showPosition':
self.show_position()
def map_callback(self, obj):
'''called when an event happens on the slipmap'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if isinstance(obj, mp_slipmap.SlipMenuEvent):
self.handle_menu_event(obj)
return
if not isinstance(obj, mp_slipmap.SlipMouseEvent):
return
if obj.event.m_leftDown and self.moving_rally is not None:
self.click_position = obj.latlon
self.click_time = time.time()
self.mpstate.functions.process_stdin("rally move %u" % self.moving_rally)
self.moving_rally = None
return
if obj.event.m_rightDown and self.moving_rally is not None:
print("Cancelled rally move")
self.moving_rally = None
return
if obj.event.m_leftDown and self.moving_wp is not None:
self.click_position = obj.latlon
self.click_time = time.time()
self.mpstate.functions.process_stdin("wp move %u" % self.moving_wp)
self.moving_wp = None
return
if obj.event.m_leftDown and self.moving_fencepoint is not None:
self.click_position = obj.latlon
self.click_time = time.time()
self.mpstate.functions.process_stdin("fence move %u" % (self.moving_fencepoint+1))
self.moving_fencepoint = None
return
if obj.event.m_rightDown and self.moving_wp is not None:
print("Cancelled wp move")
self.moving_wp = None
return
if obj.event.m_rightDown and self.moving_fencepoint is not None:
print("Cancelled fence move")
self.moving_fencepoint = None
return
elif obj.event.m_leftDown:
if time.time() - self.click_time > 0.1:
self.click_position = obj.latlon
self.click_time = time.time()
self.drawing_update()
if self.module('misseditor') is not None:
self.module('misseditor').update_map_click_position(self.click_position)
if obj.event.m_rightDown:
if self.draw_callback is not None:
self.drawing_end()
return
if time.time() - self.click_time > 0.1:
self.click_position = obj.latlon
self.click_time = time.time()
def unload(self):
'''unload module'''
self.mpstate.map.close()
self.mpstate.map = None
self.mpstate.map_functions = {}
def idle_task(self):
now = time.time()
if self.last_unload_check_time + self.unload_check_interval < now:
self.last_unload_check_time = now
if not self.mpstate.map.is_alive():
self.needs_unloading = True
def create_vehicle_icon(self, name, colour, follow=False, vehicle_type=None):
'''add a vehicle to the map'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if vehicle_type is None:
vehicle_type = self.vehicle_type_name
if name in self.have_vehicle and self.have_vehicle[name] == vehicle_type:
return
self.have_vehicle[name] = vehicle_type
icon = self.mpstate.map.icon(colour + vehicle_type + '.png')
self.mpstate.map.add_object(mp_slipmap.SlipIcon(name, (0,0), icon, layer=3, rotation=0, follow=follow,
trail=mp_slipmap.SlipTrail()))
def drawing_update(self):
'''update line drawing'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if self.draw_callback is None:
return
self.draw_line.append(self.click_position)
if len(self.draw_line) > 1:
self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line,
layer='Drawing', linewidth=2, colour=(128,128,255)))
def drawing_end(self):
'''end line drawing'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if self.draw_callback is None:
return
self.draw_callback(self.draw_line)
self.draw_callback = None
self.mpstate.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True))
self.mpstate.map.add_object(mp_slipmap.SlipClearLayer('Drawing'))
def draw_lines(self, callback):
'''draw a series of connected lines on the map, calling callback when done'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
self.draw_callback = callback
self.draw_line = []
self.mpstate.map.add_object(mp_slipmap.SlipDefaultPopup(None))
def cmd_set_home(self, args):
'''called when user selects "Set Home" on map'''
(lat, lon) = (self.click_position[0], self.click_position[1])
alt = self.ElevationMap.GetElevation(lat, lon)
print("Setting home to: ", lat, lon, alt)
self.master.mav.command_long_send(
self.settings.target_system, self.settings.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_HOME,
1, # set position
0, # param1
0, # param2
0, # param3
0, # param4
lat, # lat
lon, # lon
alt) # param7
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if m.get_type() == "HEARTBEAT":
if m.type | |
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__partition = t
if hasattr(self, '_set'):
self._set()
def _unset_partition(self):
self.__partition = YANGDynClass(unique=True, base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="partition", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
id = __builtin__.property(_get_id, _set_id)
alarmId = __builtin__.property(_get_alarmId, _set_alarmId)
nodeId = __builtin__.property(_get_nodeId, _set_nodeId)
raAlarmId = __builtin__.property(_get_raAlarmId, _set_raAlarmId)
nodeType = __builtin__.property(_get_nodeType, _set_nodeType)
state = __builtin__.property(_get_state, _set_state)
resourceId = __builtin__.property(_get_resourceId, _set_resourceId)
nativeConditionType = __builtin__.property(_get_nativeConditionType, _set_nativeConditionType)
conditionSeverity = __builtin__.property(_get_conditionSeverity, _set_conditionSeverity)
serviceAffecting = __builtin__.property(_get_serviceAffecting, _set_serviceAffecting)
manualClearable = __builtin__.property(_get_manualClearable, _set_manualClearable)
additionalText = __builtin__.property(_get_additionalText, _set_additionalText)
firstRaiseTime = __builtin__.property(_get_firstRaiseTime, _set_firstRaiseTime)
lastRaiseTime = __builtin__.property(_get_lastRaiseTime, _set_lastRaiseTime)
numberOfOccurrences = __builtin__.property(_get_numberOfOccurrences, _set_numberOfOccurrences)
acknowledgeState = __builtin__.property(_get_acknowledgeState, _set_acknowledgeState)
deviceId = __builtin__.property(_get_deviceId, _set_deviceId)
deviceName = __builtin__.property(_get_deviceName, _set_deviceName)
deviceLongName = __builtin__.property(_get_deviceLongName, _set_deviceLongName)
ipAddress = __builtin__.property(_get_ipAddress, _set_ipAddress)
macAddress = __builtin__.property(_get_macAddress, _set_macAddress)
cardType = __builtin__.property(_get_cardType, _set_cardType)
fic = __builtin__.property(_get_fic, _set_fic)
partition = __builtin__.property(_get_partition, _set_partition)
_pyangbind_elements = OrderedDict([('id', id), ('alarmId', alarmId), ('nodeId', nodeId), ('raAlarmId', raAlarmId), ('nodeType', nodeType), ('state', state), ('resourceId', resourceId), ('nativeConditionType', nativeConditionType), ('conditionSeverity', conditionSeverity), ('serviceAffecting', serviceAffecting), ('manualClearable', manualClearable), ('additionalText', additionalText), ('firstRaiseTime', firstRaiseTime), ('lastRaiseTime', lastRaiseTime), ('numberOfOccurrences', numberOfOccurrences), ('acknowledgeState', acknowledgeState), ('deviceId', deviceId), ('deviceName', deviceName), ('deviceLongName', deviceLongName), ('ipAddress', ipAddress), ('macAddress', macAddress), ('cardType', cardType), ('fic', fic), ('partition', partition), ])
class yc_alarm_bnc_filtered_alarms__bncFilteredAlarm_alarm(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module bnc-filtered-alarms - based on the path /bncFilteredAlarm/alarm. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__type','__attributes',)
_yang_name = 'alarm'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
self.__type = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
self.__attributes = YANGDynClass(base=yc_attributes_bnc_filtered_alarms__bncFilteredAlarm_alarm_attributes, is_container='container', yang_name="attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bncFilteredAlarm', 'alarm']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /bncFilteredAlarm/alarm/id (string)
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /bncFilteredAlarm/alarm/id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /bncFilteredAlarm/alarm/type (string)
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /bncFilteredAlarm/alarm/type (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='string', is_config=True)
def _get_attributes(self):
"""
Getter method for attributes, mapped from YANG variable /bncFilteredAlarm/alarm/attributes (container)
"""
return self.__attributes
def _set_attributes(self, v, load=False):
"""
Setter method for attributes, mapped from YANG variable /bncFilteredAlarm/alarm/attributes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_attributes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attributes() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_attributes_bnc_filtered_alarms__bncFilteredAlarm_alarm_attributes, is_container='container', yang_name="attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """attributes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_attributes_bnc_filtered_alarms__bncFilteredAlarm_alarm_attributes, is_container='container', yang_name="attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='container', is_config=True)""",
})
self.__attributes = t
if hasattr(self, '_set'):
self._set()
def _unset_attributes(self):
self.__attributes = YANGDynClass(base=yc_attributes_bnc_filtered_alarms__bncFilteredAlarm_alarm_attributes, is_container='container', yang_name="attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='container', is_config=True)
id = __builtin__.property(_get_id, _set_id)
type = __builtin__.property(_get_type, _set_type)
attributes = __builtin__.property(_get_attributes, _set_attributes)
_pyangbind_elements = OrderedDict([('id', id), ('type', type), ('attributes', attributes), ])
class yc_bncFilteredAlarm_bnc_filtered_alarms__bncFilteredAlarm(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module bnc-filtered-alarms - based on the path /bncFilteredAlarm. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__alarm',)
_yang_name = 'bncFilteredAlarm'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__alarm = YANGDynClass(base=YANGListType("id",yc_alarm_bnc_filtered_alarms__bncFilteredAlarm_alarm, yang_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['bncFilteredAlarm']
def _get_alarm(self):
"""
Getter method for alarm, mapped from YANG variable /bncFilteredAlarm/alarm (list)
"""
return self.__alarm
def _set_alarm(self, v, load=False):
"""
Setter method for alarm, mapped from YANG variable /bncFilteredAlarm/alarm (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",yc_alarm_bnc_filtered_alarms__bncFilteredAlarm_alarm, yang_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alarm must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",yc_alarm_bnc_filtered_alarms__bncFilteredAlarm_alarm, yang_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='list', is_config=True)""",
})
self.__alarm = t
if hasattr(self, '_set'):
self._set()
def _unset_alarm(self):
self.__alarm = YANGDynClass(base=YANGListType("id",yc_alarm_bnc_filtered_alarms__bncFilteredAlarm_alarm, yang_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='list', is_config=True)
alarm = __builtin__.property(_get_alarm, _set_alarm)
_pyangbind_elements = OrderedDict([('alarm', alarm), ])
class bnc_filtered_alarms(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module bnc-filtered-alarms - based on the path /bnc-filtered-alarms. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__bncFilteredAlarm',)
_yang_name = 'bnc-filtered-alarms'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__bncFilteredAlarm = YANGDynClass(base=yc_bncFilteredAlarm_bnc_filtered_alarms__bncFilteredAlarm, is_container='container', yang_name="bncFilteredAlarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='https://rmt.verizon.com/bnc-filtered-alarms', defining_module='bnc-filtered-alarms', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr | |
handshake returns True if it succeeded
if not (await self.handshake()):
return
try:
await self.handler(self)
except BaseException as err:
await self.loop.render_exc_async(
err,
before=[
'Unhandled exception occurred at',
self.__class__.__name__,
'.lifetime_handler meanwhile running: ',
repr(self.handler),
'\n',
],
)
return
await self.close()
except:
# We will let Task.__del__ to render the exception...
transport = self.transport
if transport is None:
raise
transport.close()
transport.abort()
raise
finally:
self.handler_task = None
self.server.unregister(self)
async def handshake(self):
"""
Handles a received websocket connect request.
This method is a coroutine.
Returns
-------
handshake_succeeded : `bool`
If the websocket handshake succeeded and starting's it's handler can begin, returns `True`.
"""
try:
self.request = request = await self.set_payload_reader(
self._read_http_request()
)
request_headers = request.headers
if self.server.is_serving():
path = request.path
request_processor = self.request_processor
if request_processor is None:
early_response = None
else:
early_response = request_processor(path, request_headers)
if is_coroutine(early_response):
early_response = await early_response
if early_response is not None:
raise AbortHandshake(*early_response)
else:
raise AbortHandshake(SERVICE_UNAVAILABLE, 'Server is shutting down.')
connections = []
connection_headers = request_headers.get_all(CONNECTION)
if connection_headers is not None:
for connection_header in connection_headers:
connections.extend(parse_connections(connection_header))
if not any(value.lower() == 'upgrade' for value in connections):
raise InvalidUpgrade(
f'Invalid connection, no upgrade found, got {connections!r}.'
)
upgrade = []
upgrade_headers = request_headers.get_all(UPGRADE)
if upgrade_headers is not None:
for upgrade_header in upgrade_headers:
upgrade.extend(parse_upgrades(upgrade_header))
if len(upgrade) != 1 and upgrade[0].lower() != 'websocket': # ignore case
raise InvalidUpgrade(
f'Expected \'WebSocket\' for \'Upgrade\', but got {upgrade!r}.'
)
received_keys = request_headers.get_all(SEC_WEBSOCKET_KEY)
if received_keys is None:
raise InvalidHandshake(f'Missing {SEC_WEBSOCKET_KEY!r} from headers')
if len(received_keys) > 1:
raise InvalidHandshake(
f'Multiple {SEC_WEBSOCKET_KEY!r} values at headers'
)
key = received_keys[0]
try:
raw_key = b64decode(key.encode(), validate=True)
except BinasciiError:
raise InvalidHandshake(f'Invalid {SEC_WEBSOCKET_KEY!r}: {key!r}.')
if len(raw_key) != 16:
raise InvalidHandshake(
f'Invalid {SEC_WEBSOCKET_KEY!r}, should be length 16; {key!r}.'
)
sw_version = request_headers.get_all(SEC_WEBSOCKET_VERSION)
if sw_version is None:
raise InvalidHandshake(
f'Missing {SEC_WEBSOCKET_VERSION!r} values at headers.'
)
if len(sw_version) > 1:
raise InvalidHandshake(
f'Multiple {SEC_WEBSOCKET_VERSION!r} values at headers.'
)
sw_version = sw_version[0]
if sw_version != '13':
raise InvalidHandshake(
f'Invalid {SEC_WEBSOCKET_VERSION!r}: {sw_version!r}.'
)
while True:
origin = self.origin
if origin is None:
origin = None
break
origin_headers = request_headers.get_all(ORIGIN)
if origin_headers is None:
raise InvalidOrigin('No origin at header.')
if len(origin_headers) > 1:
raise InvalidOrigin('More than 1 origin at header.')
origin = origin_headers[0]
if origin in origin:
break
raise InvalidOrigin(origin)
self.origin = origin
while True:
accepted_extensions = []
available_extensions = self.available_extensions
if available_extensions is None:
extension_header = None
break
extension_headers_ = request_headers.get_all(SEC_WEBSOCKET_EXTENSIONS)
if extension_headers_ is None:
extension_header = None
break
extension_headers = []
parsed_extension_values = []
for extension_header_ in extension_headers_:
parsed_extension_values.extend(parse_extensions(extension_header_))
for name, params in parsed_extension_values:
for extension in available_extensions:
# do names and params match?
if extension.name == name and extension.are_valid_params(
params, accepted_extensions
):
accepted_extensions.append(extension)
extension_headers.append((name, params))
break
else:
# no matching extension
raise InvalidHandshake(
f'Unsupported extension: name={name!r}, params={params!r}.'
)
# If we didn't break from the loop, no extension in our list matched what the client sent. The
# extension is declined.
# Serialize extension header.
if extension_headers:
extension_header = build_extensions(extension_headers)
break
extension_header = None
break
self.extensions = accepted_extensions
while True:
available_subprotocols = self.available_subprotocols
if available_subprotocols is None:
selected_subprotocol = None
break
protocol_headers = request_headers.get_all(SEC_WEBSOCKET_PROTOCOL)
if protocol_headers is None:
selected_subprotocol = None
break
parsed_header_subprotocols = []
for protocol_header in protocol_headers:
parsed_header_subprotocols.extend(
parse_subprotocols(protocol_header)
)
subprotocol_selector = self.subprotocol_selector
if subprotocol_selector is not None:
selected_subprotocol = subprotocol_selector(
parsed_header_subprotocols, available_subprotocols
)
break
subprotocols = set(parsed_header_subprotocols)
subprotocols.intersection_update(available_subprotocols)
if not subprotocols:
selected_subprotocol = None
break
selected_subprotocol = sorted(
subprotocols,
key=lambda priority: (
parsed_header_subprotocols.index(priority)
+ available_subprotocols.index(priority)
),
)[0]
break
self.subprotocol = selected_subprotocol
response_headers = imultidict()
response_headers[UPGRADE] = 'websocket'
response_headers[CONNECTION] = 'Upgrade'
response_headers[SEC_WEBSOCKET_ACCEPT] = b64encode(
hashlib.sha1((key + WS_KEY).encode()).digest()
).decode()
if extension_header is not None:
response_headers[SEC_WEBSOCKET_EXTENSIONS] = extension_header
if selected_subprotocol is not None:
response_headers[SEC_WEBSOCKET_PROTOCOL] = selected_subprotocol
extra_response_headers = self.extra_response_headers
if extra_response_headers is not None:
for key, value in extra_response_headers.items():
response_headers[key] = value
response_headers.setdefault(DATE, formatdate(usegmt=True))
response_headers.setdefault(SERVER, '')
self.response_headers = response_headers
self.write_http_response(SWITCHING_PROTOCOLS, response_headers)
self.connection_open()
except (CancelledError, ConnectionError) as err:
await self.loop.render_exc_async(
err,
before=[
'Unhandled exception occurred at ',
self.__class__.__name__,
'.handshake, when handshaking:\n',
],
)
return False
except BaseException as err:
if isinstance(err, AbortHandshake):
status = err.code
headers = err.headers
if headers is None:
headers = imultidict()
body = err.message
if not body.endswith('\n'):
body = body + b'\n'
elif isinstance(err, InvalidOrigin):
status = FORBIDDEN
headers = imultidict()
body = f'Failed to open a WebSocket connection: {err}.\n'.encode()
elif isinstance(err, InvalidUpgrade):
status = UPGRADE_REQUIRED
headers = imultidict()
headers[UPGRADE] = 'websocket'
body = (
f'Failed to open a WebSocket connection: {err}.\n\n'
f'You cannot access a WebSocket server directly with a browser. You need a WebSocket client.\n'
).encode()
elif isinstance(err, InvalidHandshake):
status = BAD_REQUEST
headers = imultidict()
body = f'Failed to open a WebSocket connection: {err}.\n'.encode()
elif isinstance(err, PayloadError):
status = BAD_REQUEST
headers = imultidict()
body = f'Invalid request body: {err}.\n'.encode()
else:
status = INTERNAL_SERVER_ERROR
headers = imultidict()
body = b'Failed to open a WebSocket connection.\n'
headers.setdefault(DATE, formatdate(usegmt=True))
headers.setdefault(SERVER, '')
headers.setdefault(CONTENT_LENGTH, repr(len(body)))
headers.setdefault(CONTENT_TYPE, 'text/plain')
headers.setdefault(CONNECTION, 'close')
try:
self.write_http_response(status, headers, body=body)
self.fail_connection()
await self.wait_for_connection_lost()
except BaseException as err2:
await self.loop.render_exc_async(
err2,
before=[
'Unhandled exception occurred at ',
self.__class__.__name__,
'.handshake, when handling an other exception;',
repr(err),
':',
],
)
return False
return True
class WSServer:
"""
Asynchronous websocket server implementation.
Attributes
----------
loop : ``EventThread``
The event loop to what the websocket server is bound to.
websockets : `set` of (``WSServerProtocol`` or `Any`)
Active server side asynchronous websocket protocol implementations.
close_connection_task : `None` or ``Task`` of ``_close``
Close connection task, what's result is set, when closing of the websocket is done.
Should not be cancelled.
Set, when ``.close`` is called.
handler : `async-callable`
An asynchronous callable, what will handle a websocket connection.
Should be given as an `async-callable` accepting `1` parameter the respective asynchronous server side websocket
protocol implementations.
server : `None` or ``Server``
Asynchronous server instance. Set meanwhile the websocket server is running.
protocol_parameters : `tuple` of `Any`
Websocket protocol parameters.
Contains the following elements:
- `handler` : `async-callable` Same as ``.handler``.
- `host` : `None` or `str`, `iterable` of (`None` or `str`). To what network interfaces the server be bound.
- `port` : `None` or `int`. The port used by the `host`(s).
- `is_ssl` : `bool`
Whether the server is secure.
- `origin` : `None` or `str`. Value of the Origin header.
- `available_extensions` : `None` or (`list` of `Any`).Available websocket extensions.
Each websocket extension should have the following `4` attributes / methods:
- `name`, type `str`. The extension's name.
- `request_params` : `list` of `tuple` (`str`, `str`). Additional header parameters of the extension.
- `decode` : `callable`. Decoder method, what processes a received websocket frame. Should accept `2`
parameters: The respective websocket ``Frame``, and the ˙max_size` as `int`, what describes the
maximal size of a received frame. If it is passed, ``PayloadError`` is raised.
- `encode` : `callable`. Encoder method, what processes the websocket frames to send. Should accept `1`
parameter, the respective websocket ``Frame``.
- `available_subprotocols` : `None` or (`list` of `str`). A list of supported subprotocols in order of
decreasing preference.
- `extra_response_headers` : `None` or (``imultidict``, `dict-like`) of (`str`, `str`) items. Extra
headers to send with the http response.
- `request_processor` : `None` or `callable`. An optionally asynchronous callable, what processes the
initial requests from the potential clients. Should accept the following parameters:
- `path` : `str`. The requested path.
- `request_headers` : ``imultidict`` of (`str`, `str`). The request's headers.
The `request_processor` on accepted request should return `None`, otherwise a `tuple` of
``AbortHandshake`` parameters.
- `subprotocol_selector` : `None` or `callable`. User hook to select subprotocols. Should accept the
following parameters:
- `parsed_header_subprotocols` : `list` of `str`. The subprotocols supported by the client.
- `available_subprotocols` : `list` of `str`. The subprotocols supported by the server.
- `websocket_kwargs` : `dict` of (`str`, `Any`). Extra parameters for creating the websocket protocol.
Can have any of the following items:
- `close_timeout` : `float`. The maximal duration in seconds what is waited for response after close
frame is sent. Defaults to `10.0`.
- `max_size` : `int`.Max | |
from typing import Optional, List, NoReturn
import copy
from easydict import EasyDict
from ding.utils import find_free_port, find_free_port_slurm, node_to_partition, node_to_host, pretty_print, \
DEFAULT_K8S_COLLECTOR_PORT, DEFAULT_K8S_LEARNER_PORT, DEFAULT_K8S_COORDINATOR_PORT
from dizoo.classic_control.cartpole.config.parallel import cartpole_dqn_config
default_host = '0.0.0.0'
default_port = 22270
def set_host_port(cfg: EasyDict, coordinator_host: str, learner_host: str, collector_host: str) -> EasyDict:
cfg.coordinator.host = coordinator_host
if cfg.coordinator.port == 'auto':
cfg.coordinator.port = find_free_port(coordinator_host)
learner_count = 0
collector_count = 0
for k in cfg.keys():
if k == 'learner_aggregator':
raise NotImplementedError
if k.startswith('learner'):
if cfg[k].host == 'auto':
if isinstance(learner_host, list):
cfg[k].host = learner_host[learner_count]
learner_count += 1
elif isinstance(learner_host, str):
cfg[k].host = learner_host
else:
raise TypeError("not support learner_host type: {}".format(learner_host))
if cfg[k].port == 'auto':
cfg[k].port = find_free_port(cfg[k].host)
cfg[k].aggregator = False
if k.startswith('collector'):
if cfg[k].host == 'auto':
if isinstance(collector_host, list):
cfg[k].host = collector_host[collector_count]
collector_count += 1
elif isinstance(collector_host, str):
cfg[k].host = collector_host
else:
raise TypeError("not support collector_host type: {}".format(collector_host))
if cfg[k].port == 'auto':
cfg[k].port = find_free_port(cfg[k].host)
return cfg
def set_host_port_slurm(cfg: EasyDict, coordinator_host: str, learner_node: list, collector_node: list) -> EasyDict:
cfg.coordinator.host = coordinator_host
if cfg.coordinator.port == 'auto':
cfg.coordinator.port = find_free_port(coordinator_host)
if isinstance(learner_node, str):
learner_node = [learner_node]
if isinstance(collector_node, str):
collector_node = [collector_node]
learner_count, collector_count = 0, 0
learner_multi = {}
for k in cfg.keys():
if learner_node is not None and k.startswith('learner'):
node = learner_node[learner_count % len(learner_node)]
cfg[k].node = node
cfg[k].partition = node_to_partition(node)
gpu_num = cfg[k].gpu_num
if cfg[k].host == 'auto':
cfg[k].host = node_to_host(node)
if cfg[k].port == 'auto':
if gpu_num == 1:
cfg[k].port = find_free_port_slurm(node)
learner_multi[k] = False
else:
cfg[k].port = [find_free_port_slurm(node) for _ in range(gpu_num)]
learner_multi[k] = True
learner_count += 1
if collector_node is not None and k.startswith('collector'):
node = collector_node[collector_count % len(collector_node)]
cfg[k].node = node
cfg[k].partition = node_to_partition(node)
if cfg[k].host == 'auto':
cfg[k].host = node_to_host(node)
if cfg[k].port == 'auto':
cfg[k].port = find_free_port_slurm(node)
collector_count += 1
for k, flag in learner_multi.items():
if flag:
host = cfg[k].host
learner_interaction_cfg = {str(i): [str(i), host, p] for i, p in enumerate(cfg[k].port)}
aggregator_cfg = dict(
master=dict(
host=host,
port=find_free_port_slurm(cfg[k].node),
),
slave=dict(
host=host,
port=find_free_port_slurm(cfg[k].node),
),
learner=learner_interaction_cfg,
node=cfg[k].node,
partition=cfg[k].partition,
)
cfg[k].aggregator = True
cfg['learner_aggregator' + k[7:]] = aggregator_cfg
else:
cfg[k].aggregator = False
return cfg
def set_host_port_k8s(cfg: EasyDict, coordinator_port: int, learner_port: int, collector_port: int) -> EasyDict:
cfg.coordinator.host = default_host
cfg.coordinator.port = coordinator_port if coordinator_port is not None else DEFAULT_K8S_COORDINATOR_PORT
base_learner_cfg = None
base_collector_cfg = None
if learner_port is None:
learner_port = DEFAULT_K8S_LEARNER_PORT
if collector_port is None:
collector_port = DEFAULT_K8S_COLLECTOR_PORT
for k in cfg.keys():
if k.startswith('learner'):
# create the base learner config
if base_learner_cfg is None:
base_learner_cfg = copy.deepcopy(cfg[k])
base_learner_cfg.host = default_host
base_learner_cfg.port = learner_port
cfg[k].port = learner_port
elif k.startswith('collector'):
# create the base collector config
if base_collector_cfg is None:
base_collector_cfg = copy.deepcopy(cfg[k])
base_collector_cfg.host = default_host
base_collector_cfg.port = collector_port
cfg[k].port = collector_port
cfg['learner'] = base_learner_cfg
cfg['collector'] = base_collector_cfg
return cfg
def set_learner_interaction_for_coordinator(cfg: EasyDict) -> EasyDict:
cfg.coordinator.learner = {}
for k in cfg.keys():
if k.startswith('learner') and not k.startswith('learner_aggregator'):
if cfg[k].aggregator:
dst_k = 'learner_aggregator' + k[7:]
cfg.coordinator.learner[k] = [k, cfg[dst_k].slave.host, cfg[dst_k].slave.port]
else:
dst_k = k
cfg.coordinator.learner[k] = [k, cfg[dst_k].host, cfg[dst_k].port]
return cfg
def set_collector_interaction_for_coordinator(cfg: EasyDict) -> EasyDict:
cfg.coordinator.collector = {}
for k in cfg.keys():
if k.startswith('collector'):
cfg.coordinator.collector[k] = [k, cfg[k].host, cfg[k].port]
return cfg
def set_system_cfg(cfg: EasyDict) -> EasyDict:
learner_num = cfg.main.policy.learn.learner.learner_num
collector_num = cfg.main.policy.collect.collector.collector_num
path_data = cfg.system.path_data
path_policy = cfg.system.path_policy
coordinator_cfg = cfg.system.coordinator
communication_mode = cfg.system.communication_mode
assert communication_mode in ['auto'], communication_mode
learner_gpu_num = cfg.system.learner_gpu_num
learner_multi_gpu = learner_gpu_num > 1
new_cfg = dict(coordinator=dict(
host='auto',
port='auto',
))
new_cfg['coordinator'].update(coordinator_cfg)
for i in range(learner_num):
new_cfg[f'learner{i}'] = dict(
type=cfg.system.comm_learner.type,
import_names=cfg.system.comm_learner.import_names,
host='auto',
port='auto',
path_data=path_data,
path_policy=path_policy,
multi_gpu=learner_multi_gpu,
gpu_num=learner_gpu_num,
)
for i in range(collector_num):
new_cfg[f'collector{i}'] = dict(
type=cfg.system.comm_collector.type,
import_names=cfg.system.comm_collector.import_names,
host='auto',
port='auto',
path_data=path_data,
path_policy=path_policy,
)
return EasyDict(new_cfg)
def parallel_transform(
cfg: dict,
coordinator_host: Optional[str] = None,
learner_host: Optional[List[str]] = None,
collector_host: Optional[List[str]] = None
) -> None:
coordinator_host = default_host if coordinator_host is None else coordinator_host
collector_host = default_host if collector_host is None else collector_host
learner_host = default_host if learner_host is None else learner_host
cfg = EasyDict(cfg)
cfg.system = set_system_cfg(cfg)
cfg.system = set_host_port(cfg.system, coordinator_host, learner_host, collector_host)
cfg.system = set_learner_interaction_for_coordinator(cfg.system)
cfg.system = set_collector_interaction_for_coordinator(cfg.system)
return cfg
def parallel_transform_slurm(
cfg: dict,
coordinator_host: Optional[str] = None,
learner_node: Optional[List[str]] = None,
collector_node: Optional[List[str]] = None
) -> None:
cfg = EasyDict(cfg)
cfg.system = set_system_cfg(cfg)
cfg.system = set_host_port_slurm(cfg.system, coordinator_host, learner_node, collector_node)
cfg.system = set_learner_interaction_for_coordinator(cfg.system)
cfg.system = set_collector_interaction_for_coordinator(cfg.system)
pretty_print(cfg)
return cfg
def parallel_transform_k8s(
cfg: dict,
coordinator_port: Optional[int] = None,
learner_port: Optional[int] = None,
collector_port: Optional[int] = None
) -> None:
cfg = EasyDict(cfg)
cfg.system = set_system_cfg(cfg)
cfg.system = set_host_port_k8s(cfg.system, coordinator_port, learner_port, collector_port)
# learner/collector is created by opereator, so the following field is placeholder
cfg.system.coordinator.collector = {}
cfg.system.coordinator.learner = {}
pretty_print(cfg)
return cfg
def save_config_formatted(config_: dict, path: str = 'formatted_total_config.py') -> NoReturn:
"""
Overview:
save formatted configuration to python file that can be read by serial_pipeline directly.
Arguments:
- config (:obj:`dict`): Config dict
- path (:obj:`str`): Path of python file
"""
with open(path, "w") as f:
f.write('from easydict import EasyDict\n\n')
f.write('main_config = dict(\n')
f.write(" exp_name='{}',\n".format(config_.exp_name))
for k, v in config_.items():
if (k == 'env'):
f.write(' env=dict(\n')
for k2, v2 in v.items():
if (k2 != 'type' and k2 != 'import_names' and k2 != 'manager'):
if (isinstance(v2, str)):
f.write(" {}='{}',\n".format(k2, v2))
else:
f.write(" {}={},\n".format(k2, v2))
if (k2 == 'manager'):
f.write(" manager=dict(\n")
for k3, v3 in v2.items():
if (v3 != 'cfg_type' and v3 != 'type'):
if (isinstance(v3, str)):
f.write(" {}='{}',\n".format(k3, v3))
elif v3 == float('inf'):
f.write(" {}=float('{}'),\n".format(k3, v3))
else:
f.write(" {}={},\n".format(k3, v3))
f.write(" ),\n")
f.write(" ),\n")
if (k == 'policy'):
f.write(' policy=dict(\n')
for k2, v2 in v.items():
if (k2 != 'type' and k2 != 'learn' and k2 != 'collect' and k2 != 'eval' and k2 != 'other'
and k2 != 'model'):
if (isinstance(v2, str)):
f.write(" {}='{}',\n".format(k2, v2))
else:
f.write(" {}={},\n".format(k2, v2))
elif (k2 == 'learn'):
f.write(" learn=dict(\n")
for k3, v3 in v2.items():
if (k3 != 'learner'):
if (isinstance(v3, str)):
f.write(" {}='{}',\n".format(k3, v3))
else:
f.write(" {}={},\n".format(k3, v3))
if (k3 == 'learner'):
f.write(" learner=dict(\n")
for k4, v4 in v3.items():
if (k4 != 'dataloader' and k4 != 'hook'):
if (isinstance(v4, str)):
f.write(" {}='{}',\n".format(k4, v4))
else:
f.write(" {}={},\n".format(k4, v4))
else:
if (k4 == 'dataloader'):
f.write(" dataloader=dict(\n")
for k5, v5 in v4.items():
if (isinstance(v5, str)):
f.write(" {}='{}',\n".format(k5, v5))
else:
f.write(" {}={},\n".format(k5, v5))
f.write(" ),\n")
if (k4 == 'hook'):
f.write(" hook=dict(\n")
for k5, v5 in v4.items():
if (isinstance(v5, str)):
f.write(" {}='{}',\n".format(k5, v5))
else:
f.write(" {}={},\n".format(k5, v5))
f.write(" ),\n")
f.write(" ),\n")
f.write(" ),\n")
elif (k2 == 'collect'):
f.write(" collect=dict(\n")
for k3, v3 in v2.items():
if (k3 != 'collector'):
if (isinstance(v3, str)):
f.write(" {}='{}',\n".format(k3, v3))
else:
f.write(" {}={},\n".format(k3, v3))
if (k3 == 'collector'):
f.write(" collector=dict(\n")
for k4, v4 in v3.items():
if (isinstance(v4, str)):
f.write(" {}='{}',\n".format(k4, v4))
else:
f.write(" {}={},\n".format(k4, v4))
f.write(" ),\n")
f.write(" ),\n")
elif (k2 == 'model'):
f.write(" model=dict(\n")
for k3, v3 in v2.items():
if (isinstance(v3, str)):
f.write(" {}='{}',\n".format(k3, v3))
else:
f.write(" {}={},\n".format(k3, v3))
f.write(" ),\n")
elif (k2 == 'other'):
f.write(" other=dict(\n")
for k3, v3 in v2.items():
if (k3 == 'replay_buffer'):
f.write(" replay_buffer=dict(\n")
for k4, v4 in v3.items():
if (k4 != 'monitor' and k4 != 'thruput_controller'):
if (isinstance(v4, dict)):
f.write(" {}=dict(\n".format(k4))
for k5, v5 in v4.items():
if (isinstance(v5, str)):
f.write(" {}='{}',\n".format(k5, v5))
elif v5 == float('inf'):
f.write(" {}=float('{}'),\n".format(k5, v5))
elif (isinstance(v5, dict)):
f.write(" {}=dict(\n".format(k5))
for k6, v6 in v5.items():
if (isinstance(v6, str)):
f.write(" {}='{}',\n".format(k6, v6))
elif v6 == float('inf'):
f.write(
" {}=float('{}'),\n".format(
k6, v6
)
)
elif (isinstance(v6, dict)):
f.write(" {}=dict(\n".format(k6))
for k7, v7 in v6.items():
if (isinstance(v7, str)):
f.write(
" {}='{}',\n".format(
k7, v7
)
)
elif v7 == float('inf'):
f.write(
" {}=float('{}'),\n".
format(k7, v7)
)
else:
f.write(
" {}={},\n".format(
k7, v7
)
)
f.write(" ),\n")
else:
f.write(" {}={},\n".format(k6, v6))
f.write(" ),\n")
else:
f.write(" {}={},\n".format(k5, v5))
f.write(" ),\n")
else:
if (isinstance(v4, str)):
f.write(" {}='{}',\n".format(k4, v4))
elif v4 == float('inf'):
f.write(" {}=float('{}'),\n".format(k4, v4))
else:
f.write(" {}={},\n".format(k4, v4))
else:
if (k4 == 'monitor'):
f.write(" monitor=dict(\n")
for k5, v5 in v4.items():
if (k5 == 'log_path'):
if (isinstance(v5, str)):
f.write(" {}='{}',\n".format(k5, v5))
else:
f.write(" {}={},\n".format(k5, v5))
else:
f.write(" {}=dict(\n".format(k5))
for k6, v6 in v5.items():
if (isinstance(v6, str)):
f.write(" {}='{}',\n".format(k6, v6))
else:
f.write(" {}={},\n".format(k6, v6))
f.write(" ),\n")
f.write(" ),\n")
if (k4 == 'thruput_controller'):
f.write(" thruput_controller=dict(\n")
for k5, v5 in v4.items():
if (isinstance(v5, dict)):
f.write(" {}=dict(\n".format(k5))
for k6, | |
self.distance_all_pairs()
for u in self:
for v in self:
if d[u].get(v, Infinity) in distances:
D.add_edge(u, v)
return D
def girth(self, certificate=False):
"""
Return the girth of the graph.
The girth is the length of the shortest cycle in the graph
(directed cycle if the graph is directed). Graphs without
(directed) cycles have infinite girth.
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return
``(g, c)``, where ``g`` is the girth and ``c`` is a list
of vertices of a (directed) cycle of length ``g`` in the graph,
thus providing a certificate that the girth is at most ``g``,
or ``None`` if ``g`` infinite
EXAMPLES::
sage: graphs.TetrahedralGraph().girth()
3
sage: graphs.CubeGraph(3).girth()
4
sage: graphs.PetersenGraph().girth(certificate=True) # random
(5, [4, 3, 2, 1, 0])
sage: graphs.HeawoodGraph().girth()
6
sage: next(graphs.trees(9)).girth()
+Infinity
.. SEEALSO::
* :meth:`~GenericGraph.odd_girth` -- return the odd girth
of the graph.
TESTS:
Prior to :trac:`12243`, the girth computation assumed vertices were
integers (and failed). The example below tests the computation for
graphs with vertices that are not integers. In this example the vertices
are sets::
sage: G = graphs.OddGraph(3)
sage: type(G.vertices()[0])
<class 'sage.sets.set.Set_object_enumerated_with_category'>
sage: G.girth()
5
Ticket :trac:`12355`::
sage: H=Graph([(0, 1), (0, 3), (0, 4), (0, 5), (1, 2), (1, 3), (1, 4), (1, 6), (2, 5), (3, 4), (5, 6)])
sage: H.girth()
3
Girth < 3 (see :trac:`12355`)::
sage: g = graphs.PetersenGraph()
sage: g.allow_multiple_edges(True)
sage: g.allow_loops(True)
sage: g.girth()
5
sage: g.add_edge(0,0)
sage: g.girth()
1
sage: g.delete_edge(0,0)
sage: g.add_edge(0,1)
sage: g.girth()
2
sage: g.delete_edge(0,1)
sage: g.girth()
5
sage: g = DiGraph(g)
sage: g.girth()
2
Directed graphs (see :trac:`28142`)::
sage: g = digraphs.Circuit(6)
sage: g.girth()
6
sage: g = digraphs.RandomDirectedGNC(10)
sage: g.girth()
+Infinity
sage: g = DiGraph([(0, 1), (1, 2), (1, 3), (2, 3), (3, 4), (4, 0)])
sage: g.girth()
4
sage: Graph(g).girth()
3
"""
# Cases where girth <= 2
if self.allows_loops():
for u in self:
if self.has_edge(u, u):
return (1, [u]) if certificate else 1
if self.is_directed():
for u, v in self.edge_iterator(labels=False):
if self.has_edge(v, u):
return (2, [u, v]) if certificate else 2
elif self.allows_multiple_edges():
edges = set()
for e in self.edge_iterator(labels=False):
if e in edges:
return (2, list(e)) if certificate else 2
edges.add(e)
return self._girth_bfs(odd=False, certificate=certificate)
def odd_girth(self, algorithm="bfs", certificate=False):
r"""
Return the odd girth of the graph.
The odd girth is the length of the shortest cycle of odd length
in the graph (directed cycle if the graph is directed).
Bipartite graphs have infinite odd girth.
INPUT:
- ``algorithm`` -- string (default: ``"bfs"``); the algorithm to use:
- ``"bfs"`` -- BFS-based algorithm
- any algorithm accepted by
:meth:`~sage.matrix.matrix_integer_dense.Matrix_integer_dense.charpoly`
for computation from the characteristic polynomial (see
[Har1962]_ and [Big1993]_, p. 45)
- ``certificate`` -- boolean (default: ``False``); whether to return
``(g, c)``, where ``g`` is the odd girth and ``c`` is a list of
vertices of a (directed) cycle of length ``g`` in the graph, thus
providing a certificate that the odd girth is at most ``g``, or
``None`` if ``g`` is infinite. So far, this parameter is accepted only
when ``algorithm = "bfs"``.
EXAMPLES:
The McGee graph has girth 7 and therefore its odd girth is 7 as well::
sage: G = graphs.McGeeGraph()
sage: G.girth()
7
sage: G.odd_girth()
7
Any complete (directed) graph on more than 2 vertices contains
a (directed) triangle and has thus odd girth 3::
sage: G = graphs.CompleteGraph(5)
sage: G.odd_girth(certificate=True) # random
(3, [2, 1, 0])
sage: G = digraphs.Complete(5)
sage: G.odd_girth(certificate=True) # random
(3, [1, 2, 0])
Bipartite graphs have no odd cycle and consequently have
infinite odd girth::
sage: G = graphs.RandomBipartite(6, 6, .5)
sage: G.odd_girth()
+Infinity
sage: G = graphs.Grid2dGraph(3, 4)
sage: G.odd_girth()
+Infinity
The odd girth of a (directed) graph with loops is 1::
sage: G = graphs.RandomGNP(10, .5)
sage: G.allow_loops(True)
sage: G.add_edge(0, 0)
sage: G.odd_girth()
1
sage: G = digraphs.RandomDirectedGNP(10, .5)
sage: G.allow_loops(True)
sage: G.add_edge(0, 0)
sage: G.odd_girth()
1
.. SEEALSO::
* :meth:`~GenericGraph.girth` -- return the girth of the graph.
TESTS:
Odd girth of odd cycles::
sage: [graphs.CycleGraph(i).odd_girth() for i in range(3, 12, 2)]
[3, 5, 7, 9, 11]
Directed graphs (see :trac:`28142`)::
sage: g = digraphs.Circuit(7)
sage: g.odd_girth()
7
sage: g = graphs.CompleteBipartiteGraph(10, 10).random_orientation()
sage: g.odd_girth()
+Infinity
sage: g = DiGraph([(0, 1), (1, 2), (1, 3), (2, 3), (3, 4), (4, 0)])
sage: g.odd_girth()
5
sage: Graph(g).odd_girth()
3
Small cases::
sage: [graphs.CompleteGraph(i).odd_girth() for i in range(5)]
[+Infinity, +Infinity, +Infinity, 3, 3]
sage: [digraphs.Complete(i).odd_girth() for i in range(5)]
[+Infinity, +Infinity, +Infinity, 3, 3]
"""
# Case where odd girth is 1
if self.allows_loops():
for u in self:
if self.has_edge(u, u):
return (1, [u]) if certificate else 1
if self.is_bipartite():
from sage.rings.infinity import Infinity
return (Infinity, None) if certificate else Infinity
if algorithm == "bfs":
return self._girth_bfs(odd=True, certificate=certificate)
if certificate:
raise ValueError("certificate is only supported with algorithm='bfs'")
ch = self.am().charpoly(algorithm=algorithm).coefficients(sparse=False)
n = self.order()
for i in range(n-1, -1, -2):
if ch[i]:
return n - i
def _girth_bfs(self, odd=False, certificate=False):
r"""
Return the girth of the graph using breadth-first search.
Loops and parallel edges are ignored,
so the returned value is at least 3.
INPUT:
- ``odd`` -- boolean (default: ``False``); whether to compute the odd
girth instead instead of the girth
- ``certificate`` -- boolean (default: ``False``); whether to return
``(g, c)``, where ``g`` is the (odd) girth and ``c`` is a list
of vertices of a cycle of length ``g`` in the graph,
thus providing a certificate that the (odd) girth is at most ``g``,
or ``None`` if ``g`` infinite
EXAMPLES:
The 5-prism has girth 4 and odd girth 5::
sage: G = graphs.CycleGraph(5).cartesian_product(graphs.CompleteGraph(2))
sage: G._girth_bfs(certificate=True) # random
(4, [(2, 0), (1, 0), (1, 1), (2, 1)])
sage: G._girth_bfs(odd=True)
5
.. SEEALSO::
* :meth:`~GenericGraph.girth` -- return the girth of the graph.
* :meth:`~GenericGraph.odd_girth` -- return the odd girth of the graph.
"""
n = self.num_verts()
best = n + 1
seen = set()
for w in self:
seen.add(w)
span = {w: None}
depth = 1
thisList = set([w])
while 2 * depth <= best:
nextList = set()
for v in thisList:
for u in self.neighbor_iterator(v):
if u in seen:
continue
if u not in span:
span[u] = v
nextList.add(u)
else:
if u in thisList:
best = depth * 2 - 1
ends = (u, v)
bestSpan = span
break
if not odd and u in nextList:
best = depth * 2
ends = (u, v)
bestSpan = span
if best == 2 * depth - 1:
break
if best <= 3:
break
thisList = nextList
depth += 1
if best == n + 1:
from sage.rings.infinity import Infinity
return (Infinity, None) if certificate else Infinity
if certificate:
cycles = {}
for x in ends:
cycles[x] = []
y = x
while bestSpan[y] is not None:
cycles[x].append(y)
y = bestSpan[y]
cycles[x].append(y)
u, v = ends
return (best, list(reversed(cycles[u])) + cycles[v])
else:
return best
### Centrality
def centrality_betweenness(self, k=None, normalized=True, weight=None,
endpoints=False, seed=None, exact=False,
algorithm=None):
r"""
Return the betweenness centrality.
The betweenness centrality of a vertex is the fraction of number of
shortest paths that go through each vertex. The betweenness is
normalized by default to be in range (0,1).
Measures of the centrality of a vertex within a graph determine the
relative importance of that vertex to its graph. Vertices that occur on
more shortest paths between other vertices have higher betweenness than
vertices that occur on less.
INPUT:
- ``normalized`` -- boolean (default: ``True``); if set to ``False``,
result is not normalized.
- ``k`` -- integer (default: ``None``); if set to an integer, use ``k``
node samples to estimate betweenness. Higher values give better
approximations. Not available when ``algorithm="Sage"``.
- ``weight`` -- string (default: ``None``); if set to a string, use that
attribute of the nodes as weight. ``weight = True`` is equivalent to
``weight = "weight"``. Not available when ``algorithm="Sage"``.
- ``endpoints`` -- boolean (default: ``False``); if set to | |
<filename>ExtinctionRecallTask/ExtinctionRecallAndVasTask.py
#!/usr/bin/env python2
"""
ExtinctioRecallAndVasTask.py
Display images from a specified folder and present them to the subject, rating the images on given scales.
Also have rest and "dummy" runs and present mood ratings to the subject in between.
Created 8/21/18 by DJ.
Updated 8/29/18 by DJ - added parallel port triggers, opening prompt, baseline period
Updated 11/14/18 by DJ - changed port ID for OP4, changed parallel port codes: 0-5 for image in block type 1,
6-10 in block type 2, image code + 10 for face rating, 31 and 32 for baseline and mood rating VAS.
Updated 11/28/18 by DJ - Debugged DoRun function, added RunMoodVas function and PreFinalVasMsg parameter.
Updated 12/3/18 by DJ - added moodVasScreenColor, vasMarkerSize, and vasLabelYDist as parameters
Updated 12/19/18 by DJ - split mood VASs into multiple independent files, changed screen colors & spaces, changed fORP keys.
Updated 1/8/19 by DJ - added support for "versions" 5-8, added end block/group/run messages, named VASs more descriptively in log
Updated 1/10/19 by DJ - added year to datestring, incorporated log parsing
Updated 1/11/19 by DJ - fixed "versions" check, RunMoodVas end delays, comments
Updated 1/22/19 by DJ - modified "range" calls to make compatible with python3
Updated 2/21/19 by DJ - changed timing, added MSI, removed dummy run, moved stimuli, 15 groups/run, randomize Q order for each run but not each group
Updated 2/25/19 by DJ - switched to 3 runs, 5 groups/run, changed timing, added visible tick marks to VAS, changed final VAS name to PostRun3.
Updated 3/25/19 by GF - added sound check VAS, second sound check & VAS, second break
Updated 4/12/19 by DJ - no processing at end of task, changed log filename, renamed sound check VASs
Updated 4/25/19 by DJ - added tPreStartup parameter for added fix cross time before Run 1's instructions, added startAtRun option to GUI
Updated 4/26/19 by DJ - renamed tPreStartup->tGetReady and tStartup->tRestInstructions, added corresponding Msg parameters, removed duplicate fixCrossDur
Updated 6/3/19 by GF - added reminder prompt after sound VAS
"""
# Import packages
from psychopy import visual, core, gui, data, event, logging, sound
from psychopy.tools.filetools import fromFile, toFile # saving and loading parameter files
import time as ts, numpy as np # for timing and array operations
import os, glob # for file manipulation
import BasicPromptTools # for loading/presenting prompts and questions
import random # for randomization of trials
import RatingScales # for VAS sliding scale
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True;
newParamsFilename = 'ExtinctionRecallParams.psydat'
# Declare primary task parameters.
params = {
# Declare experiment flow parameters
'nTrialsPerBlock': 5, # number of trials in a block
'nBlocksPerGroup': 2, # number of blocks in a group (should match number of face VAS questions)
'nGroupsPerRun': 5, # number times this "group" pattern should repeat
# Declare timing parameters
'tGetReady': 20., # 2., # time displaying GetReadyMessage before rest instructions at start of each run
'tRestInstructions': 5.,# 2., # time displaying rest instructions while waiting for scanner to reach steady-state
'tBaseline': 30., # 2., # rest time displaying fixation cross before starting first stimulus
'tPreBlockPrompt': 5., # duration of prompt before each block
'tStimMin': 2., # min duration of stimulus (in seconds)
'tStimMax': 4., # max duration of stimulus (in seconds)
'questionDur': 2.5, # duration of the image rating (in seconds)
'tMsiMin': 0.5, # min time between when one stimulus disappears and the next appears (in seconds)
'tMsiMax': 3.5, # max time between when one stimulus disappears and the next appears (in seconds)
'tIsiMin': 0.5, # min time between when one stimulus disappears and the next appears (in seconds)
'tIsiMax': 7., # max time between when one stimulus disappears and the next appears (in seconds)
'tBreak': 60, # duration of break between runs
# Declare stimulus and response parameters
'preppedKey': 'y', # key from experimenter that says scanner is ready
'triggerKey': '5', # key from scanner that says scan is starting
'imageDir': 'Faces/', # directory containing image stimluli
'imageNames': ['R0_B100.jpg','R25_B75.jpg','R50_B50.jpg','R75_B25.jpg','R100_B0.jpg'], # images will be selected randomly (without replacement) from this list of files in imageDir.
# Corresponding Port codes will be 1-len(imageNames) for versions 2 & 4, len(imageNames)-1 for versions 1 & 3 (to match increasig CSplus level).
# declare prompts
'skipPrompts': False, # go right to the scanner-wait page
'promptFile': 'Prompts/ExtinctionRecallPrompts.txt', # Name of text file containing prompts
'GetReadyMsg': 'Get Ready...', # Text displayed at start of each run
'RestInstructionsMsg': 'For the next minute or so, stare at the cross and rest.', # Text displayed before fixation cross at start of each run
'PreSoundCheckFile': "Prompts/ExtinctionRecallSoundCheckPrompts.txt", # Text FILE containing prompts shown before the sound check
'PreVasMsg': "Let's do some rating scales.", # Text shown BEFORE each VAS except the final one
'BreakMsg': "You can rest now.", # Text shown during break between runs
'BetweenRunsReminderMsg': "Please remember, in this next part, the faces might scream, so be prepared for that.", # text file shown after rest before following run
'PreFinalVasMsg': "Before we continue, let's do some more rating scales", # Text shown before final VAS
# declare VAS info
'faceQuestionFile': 'Questions/ERFaceRatingScales.txt', # Name of text file containing image Q&As
'moodQuestionFile1': 'Questions/ERVas1RatingScales.txt', # Name of text file containing mood Q&As presented before sound check
'moodQuestionFile2': 'Questions/ERVasRatingScales.txt', # Name of text file containing mood Q&As presented after 1st run
'moodQuestionFile3': 'Questions/ERVasRatingScales.txt', # Name of text file containing mood Q&As presented after 2nd run
'moodQuestionFile4': 'Questions/ERVas4RatingScales.txt', # Name of text file containing mood Q&As presented after 3rd run
'PostSoundCheckFile': 'Questions/PostSoundCheckFile.txt', # Name of text file containing rating scale of volume post sound
'questionDownKey': '4', # red on fORP
'questionUpKey':'2', # yellow on fORP
'questionSelectKey':'3', # green on fORP
'questionSelectAdvances': False, # will locking in an answer advance past an image rating?
# sound info
'badSoundFile': "media/tone_noise_rany.wav",
# parallel port parameters
'sendPortEvents': True, # send event markers to biopac computer via parallel port
'portAddress': 0xE050, # 0xE050, 0x0378, address of parallel port
'codeBaseline': 31, # parallel port code for baseline period (make sure it's greater than nBlocks*2*len(imageNames)!)
'codeVas': 32, # parallel port code for mood ratings (make sure it's greater than nBlocks*2*len(imageNames)!)
# declare display parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 0, # display on primary screen (0) or secondary (1)?
'fixCrossSize': 50, # size of cross, in pixels
'fixCrossPos': [0,0], # (x,y) pos of fixation cross displayed before each stimulus (for gaze drift correction)
'faceHeight': 2., # in norm units: 2 = height of screen
'screenColor':(120,120,120), # in rgb255 space: (r,g,b) all between 0 and 255
'textColor': (-1,-1,-1), # color of text outside of VAS
'moodVasScreenColor': (110,110,200), # background behind mood VAS and its pre-VAS prompt. Ideally luminance-matched to screen color via luminance meter/app, else keep in mind gamma correction Y = 0.2126 * R + 0.7152 * G + 0.0722 * B
'vasTextColor': (-1,-1,-1), # color of text in both VAS types (-1,-1,-1) = black
'vasMarkerSize': 0.1, # in norm units (2 = whole screen)
'vasLabelYDist': 0.1, # distance below line that VAS label/option text should be, in norm units
'screenRes': (1024,768) # screen resolution (hard-coded because AppKit isn't available on PCs)
}
# save parameters
if saveParams:
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/Params', initFileName = newParamsFilename,
allowed="PSYDAT files (*.psydat);;All files (*.*)")
newParamsFilename = dlgResult
if newParamsFilename is None: # keep going, but don't save
saveParams = False
else:
toFile(newParamsFilename, params) # save it!
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
scriptName = os.path.basename(__file__)
scriptName = os.path.splitext(scriptName)[0] # remove extension
try: # try to get a previous parameters file
expInfo = fromFile('%s-lastExpInfo.psydat'%scriptName)
expInfo['session'] +=1 # automatically increment session number
expInfo['version'] = ['1','2','3','4','5','6','7','8']
expInfo['startAtRun'] = ['1','2','3']
expInfo['paramsFile'] = [expInfo['paramsFile'],'Load...']
except: # if not there then use a default set
expInfo = {
'subject':'1',
'session': 1,
'version': ['1','2','3','4','5','6','7','8'], # group determining which stim is CS+
'skipPrompts':False,
'sendPortEvents': True,
'startAtRun': ['1','2','3'],
'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
#present a dialogue to change select params
dlg = | |
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp-global',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address to be used as a router ID.
Precisely one of Address and Interface
must be specified.
''',
'address',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface with designated stable IP
address to be used as a router ID. This
must be a Loopback interface. Precisely
one of Address and Interface must be
specified.
''',
'interface_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'router-id',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Mpls',
False,
[
_MetaInfoClassMember('igp-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install TE and non-TE nexthops in the RIB
''',
'igp_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisConfigurableLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisConfigurableLevelsEnum',
[], [],
''' Enable MPLS for an IS-IS at the given
levels
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('multicast-intact', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Install non-TE nexthops in the RIB for use
by multicast
''',
'multicast_intact',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId',
[], [],
''' Traffic Engineering stable IP address for
system
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Afs.Af.TopologyName' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Afs.Af.TopologyName',
False,
[
_MetaInfoClassMember('topology-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Topology Name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('adjacency-check', REFERENCE_ENUM_CLASS, 'IsisAdjCheckEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAdjCheckEnum',
[], [],
''' Suppress check for consistent AF support on
received IIHs
''',
'adjacency_check',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('admin-distances', REFERENCE_CLASS, 'AdminDistances' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances',
[], [],
''' Per-route administrative
distanceconfiguration
''',
'admin_distances',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-link-attributes', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, advertise additional link
attributes in our LSP
''',
'advertise_link_attributes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('advertise-passive-only', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' If enabled, advertise prefixes of passive
interfaces only
''',
'advertise_passive_only',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('apply-weight', REFERENCE_ENUM_CLASS, 'IsisApplyWeightEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisApplyWeightEnum',
[], [],
''' Apply weights to UCMP or ECMP only
''',
'apply_weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('attached-bit', REFERENCE_ENUM_CLASS, 'IsisAttachedBitEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisAttachedBitEnum',
[], [],
''' Set the attached bit in this router's level
1 System LSP
''',
'attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-admin-distance', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Default IS-IS administrative distance
configuration.
''',
'default_admin_distance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('default-information', REFERENCE_CLASS, 'DefaultInformation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation',
[], [],
''' Control origination of a default route with
the option of using a policy. If no policy
is specified the default route is
advertised with zero cost in level 2 only.
''',
'default_information',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-table', REFERENCE_CLASS, 'FrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ignore-attached-bit', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, Ignore other routers attached bit
''',
'ignore_attached_bit',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ispf', REFERENCE_CLASS, 'Ispf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ispf',
[], [],
''' ISPF configuration
''',
'ispf',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('max-redist-prefixes', REFERENCE_CLASS, 'MaxRedistPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes',
[], [],
''' Maximum number of redistributed
prefixesconfiguration
''',
'max_redist_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('maximum-paths', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Maximum number of active parallel paths per
route
''',
'maximum_paths',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric-styles', REFERENCE_CLASS, 'MetricStyles' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles',
[], [],
''' Metric-style configuration
''',
'metric_styles',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('micro-loop-avoidance', REFERENCE_CLASS, 'MicroLoopAvoidance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance',
[], [],
''' Micro Loop Avoidance configuration
''',
'micro_loop_avoidance',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('monitor-convergence', REFERENCE_CLASS, 'MonitorConvergence' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence',
[], [],
''' Enable convergence monitoring
''',
'monitor_convergence',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls', REFERENCE_CLASS, 'Mpls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Mpls',
[], [],
''' MPLS configuration. MPLS configuration will
only be applied for the IPv4-unicast
address-family.
''',
'mpls',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp-global', REFERENCE_CLASS, 'MplsLdpGlobal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal',
[], [],
''' MPLS LDP configuration. MPLS LDP
configuration will only be applied for the
IPv4-unicast address-family.
''',
'mpls_ldp_global',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('propagations', REFERENCE_CLASS, 'Propagations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Propagations',
[], [],
''' Route propagation configuration
''',
'propagations',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('redistributions', REFERENCE_CLASS, 'Redistributions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions',
[], [],
''' Protocol redistribution configuration
''',
'redistributions',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('route-source-first-hop', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, routes will be installed with the
IP address of the first-hop node as the
source instead of the originating node
''',
'route_source_first_hop',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('router-id', REFERENCE_CLASS, 'RouterId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.RouterId',
[], [],
''' Stable IP address for system. Will only be
applied for the unicast sub-address-family.
''',
'router_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('segment-routing', REFERENCE_CLASS, 'SegmentRouting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting',
[], [],
''' Enable Segment Routing configuration
''',
'segment_routing',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('single-topology', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Run IPv6 Unicast using the standard (IPv4
Unicast) topology
''',
'single_topology',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-intervals', REFERENCE_CLASS, 'SpfIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals',
[], [],
''' SPF-interval configuration
''',
'spf_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-periodic-intervals', REFERENCE_CLASS, 'SpfPeriodicIntervals' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals',
[], [],
''' Peoridic SPF configuration
''',
'spf_periodic_intervals',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('spf-prefix-priorities', REFERENCE_CLASS, 'SpfPrefixPriorities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities',
[], [],
''' SPF Prefix Priority configuration
''',
'spf_prefix_priorities',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('summary-prefixes', REFERENCE_CLASS, 'SummaryPrefixes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes',
[], [],
''' Summary-prefix configuration
''',
'summary_prefixes',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-id', ATTRIBUTE, 'int' , None, None,
[('6', '4095')], [],
''' Set the topology ID for a named
(non-default) topology. This object must be
set before any other configuration is
supplied for a named (non-default) topology
, and must be the last configuration object
to be removed. This item should not be
supplied for the non-named default
topologies.
''',
'topology_id',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('ucmp', REFERENCE_CLASS, 'Ucmp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp',
[], [],
''' UCMP (UnEqual Cost MultiPath) configuration
''',
'ucmp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Afs.Af.TopologyName.Weights',
[], | |
fdmnulllist=[]
for i in range(numperm):
allflowvec=flowveclist1copy+flowveclist2copy
flowgrp=common.randgroup(allflowvec,2)
for flow in flowgrp[0]:
if sum(flow)<0.96:
flow[0]=0.95
flow[-1]=0.0
for flow in flowgrp[1]:
if sum(flow)<0.96:
flow[0]=0.0
flow[-1]=0.95
flowgrp1=self._flowveclist2flowvec(flowgrp[0])
flowgrp2=self._flowveclist2flowvec(flowgrp[1])
fdmnulllist.append(self._flowpair2fdm(flowgrp1,flowgrp2))
#print 'before and after removing zero sum vectors',len(flowveclist1), len(flowveclist2), len(flowgrp[0]), len(flowgrp[1])
pvalue=distr2pvalue(fdmnulllist,infdm)
return (infdm,pvalue,fdmnulllist)
def compute_fast_fdm(self,flowprefix,ffastprefix,tspair):
#ffast: no p-values
flowfile='%s/project/%s/flows/%s_ALL_flows.txt'%(self.cfg.root_dir,self.cfg.project_name,flowprefix)
self.ffast_dict={}
lncnt=0
for lntxt in open(flowfile):
lncnt+=1
ln=lntxt.rstrip('\n').split('\t')
if lncnt==1:
hdict=common.findheaderdict(ln)
tslist=hdict.keys()
tslist.sort()
ffastfile='%s/project/%s/ffast/%s_ALL_fdm.txt'%(self.cfg.root_dir,self.cfg.project_name,ffastprefix)
ffastpck='%s/project/%s/ffast/%s_ALL_fdm.pck'%(self.cfg.root_dir,self.cfg.project_name,ffastprefix)
fout=open(ffastfile,'w')
fout.write('\t'.join(ln[0:7]))
tsi='_'.join(tspair[0])
tsj='_'.join(tspair[1])
if tsi<tsj:
fout.write('\t%s__%s:minwt\t%s__%s:fdm'%(tsi,tsj,tsi,tsj))
else:
fout.write('\t%s__%s:minwt\t%s__%s:fdm'%(tsj,tsi,tsj,tsi))
fout.write('\n')
else:
fout.write('\t'.join(ln[0:7]))
gene=ln[2]; pos=int(ln[1])
if gene not in self.ffast_dict:
self.ffast_dict[gene]={}
if pos not in self.ffast_dict[gene]:
self.ffast_dict[gene][pos]={}
wtlist=[]
flowlist1=[]; flowlist2=[]
for ts in tspair[0]:
wtlist+=[float(ln[hdict[ts][0]]),float(ln[hdict[ts][1]])]
flow=common.str2fl(ln[hdict[ts][2]])
weightedflow=[x*wtlist[-1] for x in flow]
flowlist1.append(weightedflow)
for ts in tspair[1]:
wtlist+=[float(ln[hdict[ts][0]]),float(ln[hdict[ts][1]])]
flow=common.str2fl(ln[hdict[ts][2]])
weightedflow=[x*wtlist[-1] for x in flow]
flowlist2.append(weightedflow)
tsi='_'.join(tspair[0])
tsj='_'.join(tspair[1])
if tsi<tsj:
tskey='%s__%s'%(tsi,tsj)
else:
tskey='%s__%s'%(tsj,tsi)
minwt=min(wtlist)
flow1=common.normalize_vector([sum(x) for x in zip(*flowlist1)])
flow2=common.normalize_vector([sum(x) for x in zip(*flowlist2)])
fdm=self._flowpair2fdm(flow1,flow2)
self.ffast_dict[gene][pos][tskey]=[minwt,fdm]
fout.write('\t%8.2f\t%6.4f'%(minwt,fdm))
fout.write('\n')
cPickle.dump(self.ffast_dict,open(ffastpck,'w'))
def merge_fast_fdm(self):
folder='%s/project/%s/ffast'%(self.cfg.root_dir,self.cfg.project_name)
prefix=self.cfg.project_name
dirfiles = ['%s/%s'%(folder,f) for f in os.listdir(folder) if os.path.isfile('%s/%s'%(folder,f)) and f.startswith(prefix) and f.split('.')[1]=='txt' ]
dirfiles.sort()
fout=open('%s/project/%s/ffast/%s_ALL_fdm.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name),'w')
common.mergefilelist(dirfiles,range(7,9),fout,range(7))
fout.close()
dictfiles = ['%s/%s'%(folder,f) for f in os.listdir(folder) if os.path.isfile('%s/%s'%(folder,f)) and f.startswith(prefix) and f.split('.')[1]=='pck' ]
dictlist=[cPickle.load(open(f)) for f in dictfiles]
fout=open('%s/project/%s/ffast/%s_ALL_fdm.pck'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name),'w')
mergeddict=common.mergedictlist(dictlist)
cPickle.dump(mergeddict,fout)
fout.close()
def filter_fast_fdm(self):
ffastall='%s/project/%s/ffast/%s_ALL_fdm.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
fout=open('%s/project/%s/ffast/%s_FIL_fdm.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name),'w')
lncnt=0
for lntxt in open(ffastall):
lncnt+=1
ln=lntxt.rstrip('\n').split('\t')
if lncnt==1:
fout.write(lntxt)
hdict=common.findheaderdict(ln,['minwt','fdm'])
tskeylist=hdict.keys()
if self.cfg.run_type==3:
project_group_keys=['_'.join(y) for y in self.cfg.project_groups]
project_group_keys.sort()
fdmtskeylist=['__'.join(x) for x in itertools.combinations(project_group_keys,2)]
else:
fdmtskeylist=hdict.keys()
#print fdmtskeylist
else:
minwt =min([float(ln[hdict[tskey][0]]) for tskey in tskeylist])
maxfdm=max([float(ln[hdict[tskey][1]]) for tskey in fdmtskeylist])
if minwt>self.cfg.ffast_min_cov and maxfdm>self.cfg.ffast_min_fdm:
fout.write(lntxt)
def splitgenelist(self):
fdmfil='%s/project/%s/ffast/%s_FIL_fdm.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
if os.path.exists(fdmfil):
genelist=list(set([lntxt.split('\t')[2] for lntxt in open(fdmfil) if lntxt.split('\t')[2]!='gene']))
else:
genelist=[]
splitgenedir='%s/project/%s/list/splitgene'%(self.cfg.root_dir,self.cfg.project_name)
os.system('mkdir -p %s'%splitgenedir)
numfiles=int(len(genelist)/self.cfg.ffull_genesplit_size)+1
for i in range(numfiles):
gfile=open('%s/G%04d'%(splitgenedir,i+1),'w')
sgenelist=genelist[i*self.cfg.ffull_genesplit_size:(i+1)*self.cfg.ffull_genesplit_size]
for gene in sgenelist:
gfile.write('%s\n'%gene)
gfile.close()
def extractFEF(self):
flowfile='%s/project/%s/flows/%s_ALL_flows.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
FEFfile1='%s/project/%s/flows/%s_ALL_FEFs.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
FEFfile2='%s/project/%s/flows/%s_ALL_FEF_processed.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
fout1=open(FEFfile1,'w')
fout2=open(FEFfile2,'w')
lncnt=0
for lntxt in open(flowfile):
lncnt+=1
ln=lntxt.rstrip('\n').split('\t')
if lncnt==1:
hdict=common.findheaderdict(ln)
tslist=hdict.keys()
tslist.sort()
fout1.write('\t'.join(ln[0:7]))
fout1.write('\tedge used')
for ts in tslist:
fout1.write('\t%s:FEF\t%s:wt'%(ts,ts))
fout1.write('\n')
fout2.write('\t'.join(ln[0:7]))
fout2.write('\tedge used')
for ts in tslist:
fout2.write('\t%s:FEF'%(ts))
fout2.write('\n')
else:
edges=ast.literal_eval(ln[6])
numrows=max(1,len(edges)-1)
for i in range(numrows):
fout1.write('\t'.join(ln[0:7]))
fout1.write('\t%s'%str(edges[i]))
fout2.write('\t'.join(ln[0:7]))
fout2.write('\t%s'%str(edges[i]))
for ts in tslist:
wt1=float(ln[hdict[ts][0]])
wt2=float(ln[hdict[ts][1]])
flow=ast.literal_eval(ln[hdict[ts][2]])
FEF=flow[i]
fout1.write('\t%6.4f\t%6.4f'%(FEF,wt2))
if wt2<self.cfg.ffast_min_cov:
FEF=-1
fout2.write('\t%6.4f'%(FEF))
fout1.write('\n')
fout2.write('\n')
fout1.close()
fout2.close()
def getFEFlinecluster(self,headerdict,FEFline,numclusters):
tslist=headerdict.keys()
tslist.sort()
FEFlist=[]
goodtslist=[]
missinglist=[]
#print FEFline
#print headerdict
for ts in tslist:
if float(FEFline[headerdict[ts][1]])>self.cfg.ffast_min_cov:
FEFlist.append(float(FEFline[headerdict[ts][0]]))
goodtslist.append(ts)
else:
missinglist.append(ts)
FEFlist=numpy.array(FEFlist)
#print 'F',FEFlist
centroids,distortion=scipy.cluster.vq.kmeans(FEFlist,numclusters)
indexes,distortionlist=scipy.cluster.vq.vq(FEFlist,centroids)
clusterlist=[]; mediantslist=[]; FEFclusterlist=[]
#print indexes
for i in range(numclusters):
cluster=[]
for j in range(len(indexes)):
if indexes[j]==i:
cluster.append(goodtslist[j])
clusterlist.append(cluster)
FEFcluster=FEFlist[indexes==i]
FEFclusterlist.append(FEFcluster.tolist())
try:
min_d,idx=min((val, id) for (id, val) in enumerate(distortionlist) if indexes[id]==i)
except:
idx=goodtslist.index(goodtslist[-1])
mediantslist.append(goodtslist[idx])
DBidx,_,mclist=common.DaviesBouldinIndex(FEFclusterlist)
#print centroids,mclist
return [DBidx,FEFline[0:8],clusterlist,missinglist,FEFclusterlist,centroids,mediantslist]
def getallFEFclusterreport(self):
'''
Add 3 cluster
'''
reportdir='%s/project/%s/report'%(self.cfg.root_dir,self.cfg.project_name)
clusterdir='%s/project/%s/report/cluster'%(self.cfg.root_dir,self.cfg.project_name)
cmd='mkdir -p %s'%clusterdir
os.system(cmd)
minsampvar=0.1
mingooddatapct=0.6
minclusterszpct=0.2
FEFfile1='%s/project/%s/flows/%s_ALL_FEFs.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
worklist2=[]
worklist3=[]
lncnt=0
for lntxt in open(FEFfile1):
lncnt+=1
ln=lntxt.rstrip('\n').split('\t')
if lncnt==1:
hdict=common.findheaderdict(ln,coltype=['FEF','wt'])
tslist=hdict.keys()
tslist.sort()
csvhlist2=ln[0:8]+['% Good Data','Not Enough Data','Sample STD','Cluster2 DBI','Cluster2 image','Cluster2.1','Cluster2.2',
'Cluster2.1 ACT','Cluster2.2 ACT']
csvhline2='\t'.join(csvhlist2)
csvhlist3=ln[0:8]+['% Good Data','Not Enough Data','Sample STD','Cluster3 DBI','Cluster3 image','Cluster3.1','Cluster3.2','Cluster3.3',
'Cluster3.1 ACT','Cluster3.2 ACT','Cluster3.3 ACT']
csvhline3='\t'.join(csvhlist3)
else:
allFEFdata=[float(ln[hdict[ts][0]]) for ts in tslist if float(ln[hdict[ts][1]])>self.cfg.ffast_min_cov]
percentagegooddata=1.0*len(allFEFdata)/len(tslist)
samplevariance=scipy.stats.nanstd(allFEFdata)
if samplevariance>minsampvar and percentagegooddata>mingooddatapct:
#print ln
#print 'AF',allFEFdata
FEFdatadict=dict([(ts,[float(ln[hdict[ts][0]]),float(ln[hdict[ts][1]])]) for ts in tslist])
FEFclustertuple=self.getFEFlinecluster(hdict,ln,2)
clusterlist=FEFclustertuple[2]
minclussize=min([len(x) for x in clusterlist])
if 1.0*minclussize/len(FEFdatadict)>minclusterszpct:
worklist2.append(FEFclustertuple+[FEFdatadict,samplevariance])
FEFclustertuple=self.getFEFlinecluster(hdict,ln,3)
clusterlist=FEFclustertuple[2]
minclussize=min([len(x) for x in clusterlist])
if 1.0*minclussize/len(FEFdatadict)>minclusterszpct:
worklist3.append(FEFclustertuple+[FEFdatadict,samplevariance])
worklist2.sort()
toplist=worklist2[0:self.cfg.report_top_x]
csvfile='%s/top_cluster2_report.txt'%reportdir
csvptr=open(csvfile,'w')
csvptr.write('#\t%s\n'%csvhline2)
lncnt=0
for line in toplist:
lncnt+=1
filenameprefix='%s_%s__%010d_%010d'%(line[1][2],line[1][0],ast.literal_eval(line[1][7])[0],ast.literal_eval(line[1][7])[0])
fclust21=open('%s/%s_2_1.txt'%(clusterdir,filenameprefix),'w')
for ts in line[2][0]:
fclust21.write('%s\n'%ts)
fclust21.close()
fclust22=open('%s/%s_2_2.txt'%(clusterdir,filenameprefix),'w')
for ts in line[2][1]:
fclust22.write('%s\n'%ts)
fclust22.close()
fmiss=open('%s/%s_miss.txt'%(clusterdir,filenameprefix),'w')
for ts in line[3]:
fmiss.write('%s\n'%ts)
fmiss.close()
fimage2='%s/%s_cluster2.pdf'%(clusterdir,filenameprefix)
FEFdatadict=line[7]
genechredge=[line[1][2],line[1][0],ast.literal_eval(line[1][7])]
if lncnt<=self.cfg.graph_top_x:
plot.plotFEFclusters(FEFdatadict,line[2],line[6],genechredge,fimage2)
l2=ast.literal_eval(line[1][6])
l2.append(ast.literal_eval(line[1][5]))
minx=min([min(x) for x in l2])
maxx=max([max(x) for x in l2])
urlimg='<a href="cluster/%s_cluster2.pdf">cluster</a>'%(filenameprefix)
urlactlist=['<a href="image/%s_%s__%s__%d-%d.pdf">actg</a>'%(self.cfg.project_name,ts,line[1][2],minx,maxx) for ts in line[6]]
urlmissing='<a href="cluster/%s_miss.txt">missing</a>'%(filenameprefix)
urlclust1='<a href="cluster/%s_2_1.txt">cluster 1</a>'%(filenameprefix)
urlclust2='<a href="cluster/%s_2_2.txt">cluster 2</a>'%(filenameprefix)
csvline=line[1][0:8]+['%6.2f'%(100*(1-1.0*len(line[3])/len(FEFdatadict))),urlmissing,'%6.4f'%line[8],'%6.4f'%line[0]]
csvline.append(urlimg)
csvline.append(urlclust1)
csvline.append(urlclust2)
csvline+=urlactlist
csvptr.write('%d\t%s\n'%(lncnt,'\t'.join(csvline)))
genepos=[line[1][2],[minx,maxx],int(line[1][1])]
if lncnt<=self.cfg.graph_top_x:
self.createactgraphs(line[6],[genepos])
csvptr.close()
htmlfile=open('%s/top_cluster2_report.html'%reportdir,'w')
common.csv2html(open(csvfile),htmlfile)
htmlfile.close()
worklist3.sort()
toplist=worklist3[0:self.cfg.report_top_x]
csvfile='%s/top_cluster3_report.txt'%reportdir
csvptr=open(csvfile,'w')
csvptr.write('#\t%s\n'%csvhline3)
lncnt=0
for line in toplist:
lncnt+=1
filenameprefix='%s_%s__%010d_%010d'%(line[1][2],line[1][0],ast.literal_eval(line[1][7])[0],ast.literal_eval(line[1][7])[0])
fclust31=open('%s/%s_3_1.txt'%(clusterdir,filenameprefix),'w')
for ts in line[2][0]:
fclust31.write('%s\n'%ts)
fclust31.close()
fclust32=open('%s/%s_3_2.txt'%(clusterdir,filenameprefix),'w')
for ts in line[2][1]:
fclust32.write('%s\n'%ts)
fclust32.close()
fclust33=open('%s/%s_3_3.txt'%(clusterdir,filenameprefix),'w')
for ts in line[2][2]:
fclust33.write('%s\n'%ts)
fclust33.close()
fmiss=open('%s/%s_miss.txt'%(clusterdir,filenameprefix),'w')
for ts in line[3]:
fmiss.write('%s\n'%ts)
fmiss.close()
fimage3='%s/%s_cluster3.pdf'%(clusterdir,filenameprefix)
FEFdatadict=line[7]
genechredge=[line[1][2],line[1][0],ast.literal_eval(line[1][7])]
if lncnt<=self.cfg.graph_top_x:
plot.plotFEFclusters(FEFdatadict,line[2],line[6],genechredge,fimage3)
l2=ast.literal_eval(line[1][6])
l2.append(ast.literal_eval(line[1][5]))
minx=min([min(x) for x in l2])
maxx=max([max(x) for x in l2])
urlimg='<a href="cluster/%s_cluster3.pdf">cluster</a>'%(filenameprefix)
urlactlist=['<a href="image/%s_%s__%s__%d-%d.pdf">actg %s</a>'%(self.cfg.project_name,ts,line[1][2],minx,maxx,ts) for ts in line[6]]
urlmissing='<a href="cluster/%s_miss.txt">missing</a>'%(filenameprefix)
urlclust1='<a href="cluster/%s_3_1.txt">cluster 1</a>'%(filenameprefix)
urlclust2='<a href="cluster/%s_3_2.txt">cluster 2</a>'%(filenameprefix)
urlclust3='<a href="cluster/%s_3_3.txt">cluster 3</a>'%(filenameprefix)
csvline=line[1][0:8]+['%6.2f'%(100*(1-1.0*len(line[3])/len(FEFdatadict))),urlmissing,'%6.4f'%line[8],'%6.4f'%line[0]]
csvline.append(urlimg)
csvline.append(urlclust1)
csvline.append(urlclust2)
csvline.append(urlclust3)
csvline+=urlactlist
csvptr.write('%d\t%s\n'%(lncnt,'\t'.join(csvline)))
genepos=[line[1][2],[minx,maxx],int(line[1][1])]
if lncnt<=self.cfg.graph_top_x:
self.createactgraphs(line[6],[genepos])
csvptr.close()
htmlfile=open('%s/top_cluster3_report.html'%reportdir,'w')
common.csv2html(open(csvfile),htmlfile)
htmlfile.close()
def gettwolargegroupreport(self,twogroupdifflist):
"""
Make boxplot, html
"""
reportdir='%s/project/%s/report'%(self.cfg.root_dir,self.cfg.project_name)
csvhlist=['chr','pos','gene','outflag','exonstartflag','divexon','flowedges','edge used']
csvhlist+=['Size Group1','Size Group2','Group1 Median','Group2 Median','Random Median1','Random Median2','Sample STD']
csvhlist+=['Sample Range','Kruskal pvalue','Median Random pvalue','Group1 Median ACT','Group2 Median ACT','Boxplot']
csvhline='\t'.join(csvhlist)
worklist=[]
for line in twogroupdifflist:
worklist.append([[float(line[16]),1-float(line[15])],line])
worklist.sort()
toplist=worklist[0:self.cfg.report_top_x]
csvfile='%s/top_twolargegroup_report.txt'%reportdir
csvptr=open(csvfile,'w')
csvptr.write('#\t%s\n'%csvhline)
lncnt=0
for line in toplist:
lncnt+=1
l2=ast.literal_eval(line[1][6])
l2.append(ast.literal_eval(line[1][5]))
minx=min([min(x) for x in l2])
maxx=max([max(x) for x in l2])
#print line
#print line[1][18],line[1][0],line[1][2],minx,maxx
urlact1='<a href="image/%s_%s__%s__%d-%d.pdf">actg %s</a>'%(self.cfg.project_name,line[1][18],line[1][2],minx,maxx,line[1][18])
urlact2='<a href="image/%s_%s__%s__%d-%d.pdf">actg %s</a>'%(self.cfg.project_name,line[1][19],line[1][2],minx,maxx,line[1][19])
imagefilename='%s/image/%s_jitter__%s__%d-%d.pdf'%(reportdir,self.cfg.project_name,line[1][2],minx,maxx)
ylabel='Fractional Edge Weight'
xlabel='Group1 vs Group2'
title='%s__%d-%d\np-value=%s'%(line[1][2],minx,maxx,line[1][16])
if lncnt<=self.cfg.graph_top_x:
plot.makejitter(line[1][20],line[1][21],[],ylabel,xlabel,title,imagefilename)
urlimg='<a href="image/%s_jitter__%s__%d-%d.pdf">boxplot</a>'%(self.cfg.project_name,line[1][2],minx,maxx)
csvline=line[1][0:18]
csvline.append(urlact1)
csvline.append(urlact2)
csvline.append(urlimg)
csvptr.write('%d\t%s\n'%(lncnt,'\t'.join(csvline)))
genepos=[line[1][2],[minx,maxx],int(line[1][1])]
mediantslist=[line[1][18],line[1][19]]
#print mediantslist
if lncnt<=self.cfg.graph_top_x:
self.createactgraphs(mediantslist,[genepos])
csvptr.close()
htmlfile=open('%s/top_twolargegroup_report.html'%reportdir,'w')
common.csv2html(open(csvfile),htmlfile)
htmlfile.close()
def twolargegroupdifference(self):
outlinelist=[]
mingroupsz=10
numiter=101
minstd=0.3
FEFfile2='%s/project/%s/flows/%s_ALL_FEF_processed.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
twogroupdiff='%s/project/%s/report/%s_Two_Group_diff.txt'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
fout=open(twogroupdiff,'w')
lncnt=0
for lntxt in open(FEFfile2):
lncnt+=1
ln=lntxt.rstrip('\n').split('\t')
if lncnt==1:
hdict=common.findheaderdict(ln,coltype=['FEF'])
tslist=hdict.keys()
tslist.sort()
fout.write('\t'.join(ln[0:8]))
fout.write('Size Group1\tSize Group2\tGroup1 Median\tGroup2 Median\tRandom Median1\tRandom Media2\tSample Variance')
fout.write('\tSample Range\tKruskal pvalue\tMedian Random pvalue\tGroup1 Median ACT\tGroup2 Median ACT\n')
else:
allgroup1=[float(ln[hdict[ts][0]]) for ts in tslist if ts in self.cfg.project_groups[0]]
allgroup2=[float(ln[hdict[ts][0]]) for ts in tslist if ts in self.cfg.project_groups[1]]
group1=[x for x in allgroup1 if x>=0.0]
group2=[x for x in allgroup2 if x>=0.0]
group1.sort()
group2.sort()
if len(group1)<mingroupsz or len(group2)<mingroupsz:
continue
medgroup1=common.median(group1)
medgroup2=common.median(group2)
medts1=[ts for ts in tslist if ts in self.cfg.project_groups[0] and float(ln[hdict[ts][0]])-medgroup1<0.001 ][0]
medts2=[ts for ts in tslist if ts in self.cfg.project_groups[1] and float(ln[hdict[ts][0]])-medgroup2<0.001 ][0]
bothgroup=group1+group2
samplestd=scipy.stats.nanstd(bothgroup)
if samplestd>minstd:
#print group1, group2
_,grouppval=scipy.stats.kruskal(numpy.array(group1),numpy.array(group2))
rndpvaluelist=[]
for j in range(numiter):
#print j
random.shuffle(bothgroup)
rndgrp1=bothgroup[0:len(group1)]
rndgrp2=bothgroup[len(group1):]
_,rndpval=scipy.stats.kruskal(numpy.array(rndgrp1),numpy.array(rndgrp2))
rndpvaluelist.append(rndpval)
outline=ln[0:8]+['%d'%len(group1),'%d'%len(group2),'%5.4f'%medgroup1,'%5.4f'%medgroup2,'%5.4f'%common.median(rndgrp1),'%5.4f'%common.median(rndgrp2),
'%5.4f'%samplestd,'%5.4f'%(max(bothgroup)-min(bothgroup)),'%5.4f'%grouppval,'%5.4f'%common.median(rndpvaluelist),medts1,medts2,group1,group2]
outlinelist.append(outline)
fout.write('%s\n'%'\t'.join(outline[0:-2]))
fout.close()
return outlinelist
def compute_full_fdm(self,tspair,genelist):
'''
_flowveclist2flowvec
genefdmdict = gene:pos: [(fdm,pvalue)] for pool,[(fdm,pvalue)1st,(fdm,pvalue)2nd,(fdm,pvalue)12],[posflgs],[wtpairlist1, 2],[[flowvec allparts] list1, 2]
'''
self._setgeneprojdict()
#print self.cfg.flow_prefix
#print self.cfg.ffast_prefix
#print self.cfg.ffull_prefix
tspair[0].sort()
tspair[1].sort()
fdmset1='_'.join(tspair[0])
fdmset2='_'.join(tspair[1])
if fdmset2<fdmset1:
fdmset1,fdmset2=fdmset2,fdmset1
ffulldir='%s/project/%s/ffull'%(self.cfg.root_dir,self.cfg.project_name)
fdmrawfilename='%s/%s__%s__%s__raw.txt'%(ffulldir,self.cfg.ffull_prefix,fdmset1,fdmset2)
fdmsmalloutfilename='%s/%s__%s__%s__summary.txt'%(ffulldir,self.cfg.ffull_prefix,fdmset1,fdmset2)
if os.path.exists(fdmrawfilename):
os.system('rm %s'%fdmrawfilename)
ffastpck='%s/project/%s/ffast/%s_ALL_fdm.pck'%(self.cfg.root_dir,self.cfg.project_name,self.cfg.project_name)
num_partition=self.cfg.ffull_partition
num_permutation=self.cfg.ffull_permutation
self.ffast_dict=cPickle.load(open(ffastpck))
islanddict=self.gtfobj.getislanddict()
useislanddict={}
usegenedivdict={}
for gene in genelist:
useislanddict[gene]=islanddict[gene]
usegenedivdict[gene]=self.geneprojdivdict[gene]
#genelist=self.genedivdict.keys()
geneposflgdictdict={}
genewtpairdict1={}
genewtpairdict2={}
geneflowvecdict1={}
geneflowvecdict2={}
genefdmdict={}
for gene in genelist:
geneposflgdictdict[gene]={}
genewtpairdict1[gene]={}
geneflowvecdict1[gene]={}
genewtpairdict2[gene]={}
geneflowvecdict2[gene]={}
genefdmdict[gene]={}
#ffull: Working with p-values
for i in range(len(tspair[0])):
bamfilename=self.cfg.data_dict[tspair[0][i]]
#inconsistent tooldirs
bamfile1=bamfile.bamFile(bamfilename,[self.cfg.pathsamtools,self.cfg.pathbedtools])
#print len(usegenedivdict), len(useislanddict), num_permutation
geneflowlistdict=bamfile1.Togeneflowlistdict(usegenedivdict, useislanddict,num_partition)
#print geneflowlistdict.keys()
for gene in genelist:
flowdictlist=geneflowlistdict[gene]
#All flowdict are identifical in keys
poslist=flowdictlist[0].keys()
for pos in poslist:
wtpairlist=[]
flowveclist=[]
for flowdict in flowdictlist:
if i==0:
geneposflgdictdict[gene][pos]=flowdict[pos][0]
genewtpairdict1[gene][pos]=[]
geneflowvecdict1[gene][pos]=[]
wtpairlist.append(flowdict[pos][1][0])
flowveclist.append(flowdict[pos][1][1])
genewtpairdict1[gene][pos].append(wtpairlist)
geneflowvecdict1[gene][pos].append(flowveclist)
for i in range(len(tspair[1])):
bamfilename=self.cfg.data_dict[tspair[1][i]]
bamfile2=bamfile.bamFile(bamfilename,[self.cfg.pathsamtools,self.cfg.pathbedtools])
geneflowlistdict=bamfile2.Togeneflowlistdict(usegenedivdict, useislanddict,num_partition)
for gene in genelist:
flowdictlist=geneflowlistdict[gene]
#All flowdict are identifical in keys
poslist=flowdictlist[0].keys()
for pos in poslist:
wtpairlist=[]
flowveclist=[]
for flowdict in flowdictlist:
if i==0:
geneposflgdictdict[gene][pos]=flowdict[pos][0]
genewtpairdict2[gene][pos]=[]
geneflowvecdict2[gene][pos]=[]
wtpairlist.append(flowdict[pos][1][0])
flowveclist.append(flowdict[pos][1][1])
genewtpairdict2[gene][pos].append(wtpairlist)
geneflowvecdict2[gene][pos].append(flowveclist)
for gene in genelist:
for pos in geneflowvecdict1[gene].keys():
flowveclist1=self._flowveclistlist2flowveclist(geneflowvecdict1[gene][pos])
flowveclist2=self._flowveclistlist2flowveclist(geneflowvecdict2[gene][pos])
tskey='%s__%s'%(fdmset1,fdmset2)
infdm=self.ffast_dict[gene][pos][tskey][1]
fdmvalue,pvalue,fdmnulllist=self._flowveclists2fdmpval(infdm,flowveclist1,flowveclist2,num_partition,num_permutation)
genefdmdict[gene][pos]=[(fdmvalue,pvalue),[],geneposflgdictdict[gene][pos], \
[genewtpairdict1[gene][pos],genewtpairdict2[gene][pos]], \
[flowveclist1,flowveclist2]]
self.genefdmdict=genefdmdict
self.printgenefdm(fdmrawfilename)
fsmallout=open(fdmsmalloutfilename,'w')
fdmpair='%s__%s'%(fdmset1,fdmset2)
fsmallout.write('gene\tpos\t%s:fdm\t%s:pvalue\t%s:significance\n'%(fdmpair,fdmpair,fdmpair))
lncnt=0
for lntxt in open(fdmrawfilename):
lncnt+=1
if lncnt==1:
continue
ln=lntxt.rstrip('\n').split('\t')
if float(ln[3])<self.cfg.ffull_pvalue:
sig_flg='SIG'
else:
sig_flg='NOT'
fsmallout.write('%s\t%s\t%s\t%s\t%s\n'%(ln[0],ln[1],ln[2],ln[3],sig_flg))
def printgenefdm(self,filename):
fout=open(filename,'w')
fout.write('Gene\tPosition\tFDM\tp-value\toutflag\texonflg\twtlist1\twtlist2\t\tflowlist1\tflowlist2\n')
genelist=self.genefdmdict.keys()
genelist.sort()
for gene in genelist:
fdmdict=self.genefdmdict[gene]
poslist=fdmdict.keys()
poslist.sort()
for pos in poslist:
fdm,pvalue=fdmdict[pos][0]
fdmpairlist=fdmdict[pos][1]
outflg,exonflg=fdmdict[pos][2]
wtlist1,wtlist2=fdmdict[pos][3]
flowlist1,flowlist2=fdmdict[pos][4]
fout.write('%s\t%10d\t%6.4f\t%10.6f\t%d\t%d\t%s\t%s\t%s\t%s\t%s\n'%
(gene,pos,fdm,pvalue,outflg,exonflg,common.fl2str(wtlist1),common.fl2str(wtlist2),common.fl2str(flowlist1),
common.fl2str(flowlist2),common.fl2str(fdmpairlist)))
fout.close()
def ToFDMbedgraph(self,filename,islanddict):
# to deprecate
foutfdm=open(filename[:-4]+'__fdm'+filename[-4:],'a')
foutpval=open(filename[:-4]+'__pvl'+filename[-4:],'a')
genelist=self.genefdmdict.keys()
genelist.sort()
for gene in genelist:
chrnm='%s'%islanddict[gene][0]
fdmdict=self.genefdmdict[gene]
poslist=fdmdict.keys()
poslist.sort()
for pos in poslist:
fdm,pvalue=fdmdict[pos][0]
outflg,exonflg=fdmdict[pos][2]
if outflg==0:
foutfdm.write('%s\t%d\t%d\t%6.4f\n'%(chrnm,pos,pos+1,fdm))
foutpval.write('%s\t%d\t%d\t%6.4f\n'%(chrnm,pos,pos+1,pvalue))
else:
foutfdm.write('%s\t%d\t%d\t%6.4f\n'%(chrnm,pos,pos+1,fdm))
foutpval.write('%s\t%d\t%d\t%6.4f\n'%(chrnm,pos,pos+1,pvalue))
def createactgraphs(self,tslist,geneposlist):
actfilelist=['%s/dataout/%s/%s_%s.act'%(self.cfg.root_dir,ts,self.cfg.project_name,ts) for ts in tslist]
imagedir='%s/project/%s/report/image'%(self.cfg.root_dir,self.cfg.project_name)
for actfilename in actfilelist:
act0=actgraph.actFile(actfilename)
for genepos in geneposlist:
act0.Toimage(genepos[0],imagedir,genepos[1],highlightnodelist=[genepos[2]])
def mergeffullfiles(self):
#merge all fdm full common to all run types
ffulldir='%s/project/%s/ffull'%(self.cfg.root_dir,self.cfg.project_name)
ffullfiles = [f for f in os.listdir(ffulldir) if f.endswith('summary.txt')]
ffullkeylist=list(set(['%s__%s'%(f.split('__')[1],f.split('__')[2]) for f in os.listdir(ffulldir) if f.endswith('summary.txt')]))
ffullkeylist.sort()
ffullfiledict={}
for fkey in ffullkeylist:
for | |
import cv2
from PIL import Image
import numpy as np
from skimage import measure
from PIL.ExifTags import TAGS, GPSTAGS
def close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
def binary_mask_to_polygon(binary_mask, tolerance=2):
"""Converts a binary mask to COCO polygon representation
Args:
binary_mask: a 2D binary numpy array where '1's represent the object
tolerance: Maximum distance from original points of polygon to approximated
polygonal chain. If tolerance is 0, the original coordinate array is returned.
"""
polygons = []
# pad mask to close contours of shapes which start and end at an edge
padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)
contours = measure.find_contours(padded_binary_mask, 0.5)
contours = np.subtract(contours, 1)
for contour in contours:
contour = close_contour(contour)
contour = measure.approximate_polygon(contour, tolerance)
if len(contour) < 3:
continue
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
# after padding and subtracting 1 we may get -0.5 points in our segmentation
segmentation = [0 if i < 0 else i for i in segmentation]
polygons.append(segmentation)
return polygons
def getExif(path):
src_image = Image.open(path)
info = src_image._getexif()
test = 1
# if info is not None:
# # Focal Length
# # focalLength = info[37386]
# # focal_length = focalLength[0] / focalLength[1] # unit: mm
# # focal_length = focal_length * pow(10, -3) # unit: m
#
# # Orientation
# orientation = info[274]
# else:
# orientation = None
try:
orientation = info[274]
if orientation == 3:
test = 1
except:
orientation = 0
# return focal_length, orientation
return orientation
def restoreOrientation(image, orientation):
if orientation == 8:
restored_image = rotate(image, -90)
elif orientation == 6:
restored_image = rotate(image, 90)
elif orientation == 3:
restored_image = rotate(image, 180)
else:
restored_image = image
return restored_image
# def imgRotation(img_array,img_dir):
def imgRotation(img_dir):
orientation = getExif(img_dir)
img_original = cv2.imread(img_dir)
img_array = restoreOrientation(img_original,orientation)
return img_array
def rotate(image, angle):
# https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
height = image.shape[0]
width = image.shape[1]
center = (width/2, height/2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
rotation_mat = cv2.getRotationMatrix2D(center, angle, 1.0)
abs_cos = abs(rotation_mat[0, 0])
abs_sin = abs(rotation_mat[0, 1])
# compute the new bounding dimensions of the image
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# adjust the rotation matrix to take into account translation
rotation_mat[0, 2] += bound_w / 2 - center[0]
rotation_mat[1, 2] += bound_h / 2 - center[1]
# perform the actual rotation and return the image
rotated_mat = cv2.warpAffine(image, rotation_mat, (bound_w, bound_h))
return rotated_mat
def restoreVertices(bbox,ori): #dict = [x1_batch,y1_batch,x2_batch,y2_batch,class_batch]
X1_batch = []
Y1_batch = []
X2_batch = []
Y2_batch = []
degree ={6: 90, 3: 180}
cos = np.cos(degree[ori] * np.pi/180)
sin = np.sin(degree[ori] * np.pi/180)
for i in range(len(bbox[0])):
x1 = bbox[0][i]
y1 = bbox[1][i]
x3 = bbox[2][i]
y3 = bbox[3][i]
X1 = x1 * cos + y1 * sin
Y1 = -x1 * sin + y1 * cos
X3 = x3 * cos + y3 * sin
Y3 = -x3 * sin + y3 * cos
## origin shift
X1 += abs(X3 - X1)
X3 += abs(X3 - X1)
if degree[ori] == 180:
Y1 += abs(Y3-Y1)
Y3 += abs(Y3-Y1)
X1_batch.append(int(X1))
X2_batch.append(int(X3))
Y1_batch.append(int(Y1))
Y2_batch.append(int(Y3))
return [X1_batch,Y1_batch,X2_batch,Y2_batch,bbox[4]]
import mmcv
import pycocotools.mask as maskUtils
def server_det_masks(result,
class_names,
score_thr=0.3,
wait_time=0,
show=True,
out_file=None):
assert isinstance(class_names, (tuple, list))
# img = mmcv.imread(img)
# img = imgRotation(img)
# img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
polygons,plabels = [],[]
# draw segmentation masks
if segm_result is not None and len(labels) > 0:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = maskUtils.decode(segms[i]).astype(np.bool)
poly = [int(j) for j in binary_mask_to_polygon(mask)[0]]
obj_class = int(labels[i])+1
# if obj_class == 3 or obj_class == 4 or obj_class == 5:
# obj_class -= 2
# else:
# continue
polygons.append(poly + [obj_class])
# plabels.append(labels[i].tolist())
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
test = 0
return polygons#, plabels
# draw bounding boxes
def server_det_masks_demo(result,
class_names,
score_thr=0.3,
wait_time=0,
show=True,
out_file=None):
assert isinstance(class_names, (tuple, list))
# img = mmcv.imread(img)
# img = imgRotation(img)
# img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
polygons,plabels = [],[]
# draw segmentation masks
if segm_result is not None and len(labels) > 0:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = maskUtils.decode(segms[i]).astype(np.bool)
poly = [int(j) for j in binary_mask_to_polygon(mask)[0]]
obj_class = int(labels[i])+1
if obj_class == 3 or obj_class == 4 or obj_class == 5 or obj_class ==6:
obj_class -= 2
else:
continue
polygons.append(poly + [obj_class])
# plabels.append(labels[i].tolist())
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
test = 0
return polygons#, plabels
# def server_det_bboxes(bboxes,
# labels,
# class_names=None,
# score_thr=0): # ,
# # bbox_color='green',
# # text_color='green',
# # thickness=1,
# # font_scale=0.5,
# # show=True,
# # win_name='',
# # wait_time=0,
# # out_file=None):
# """Draw bboxes and class labels (with scores) on an image.
#
# Args:
# img (str or ndarray): The image to be displayed.
# bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
# (n, 5).
# labels (ndarray): Labels of bboxes.
# class_names (list[str]): Names of each classes.
# score_thr (float): Minimum score of bboxes to be shown.
# bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
# text_color (str or tuple or :obj:`Color`): Color of texts.
# thickness (int): Thickness of lines.
# font_scale (float): Font scales of texts.
# show (bool): Whether to show the image.
# win_name (str): The window name.
# wait_time (int): Value of waitKey param.
# out_file (str or None): The filename to write the image.
# """
# assert bboxes.ndim == 2
# assert labels.ndim == 1
# assert bboxes.shape[0] == labels.shape[0]
# assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
# # img = imread(img)
#
# if score_thr > 0:
# assert bboxes.shape[1] == 5
# scores = bboxes[:, -1]
# inds = scores > score_thr
# bboxes = bboxes[inds, :]
# labels = labels[inds]
#
# return bboxes, labels
# bbox_color = color_val(bbox_color)
# text_color = color_val(text_color)
#
# for bbox, label in zip(bboxes, labels):
# bbox_int = bbox.astype(np.int32)
# left_top = (bbox_int[0], bbox_int[1])
# right_bottom = (bbox_int[2], bbox_int[3])
# cv2.rectangle(
# img, left_top, right_bottom, bbox_color, thickness=thickness)
# label_text = class_names[
# label] if class_names is not None else 'cls {}'.format(label)
# if len(bbox) > 4:
# label_text += '|{:.02f}'.format(bbox[-1])
# cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
# cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
def server_det_bboxes(result, score_thr=0):
# bbox_color='green',
# text_color='green',
# thickness=1,
# font_scale=0.5,
# show=True,
# win_name='',
# wait_time=0,
# out_file=None):
"""Draw bboxes and class labels (with scores) on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5).
labels (ndarray): Labels of bboxes.
class_names (list[str]): Names of each classes.
score_thr (float): Minimum score of bboxes to be shown.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
if isinstance(result, tuple):
bbox_result, _ = result # bbox_result, segm_result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] | |
perna, parte não especificada'),
('S82.0', 'Fratura da rótula [patela]'),
('S82.1', 'Fratura da extremidade proximal da tíbia'),
('S82.2', 'Fratura da diáfise da tíbia'),
('S82.3', 'Fratura da extremidade distal da tíbia'),
('S82.4', 'Fratura do perônio [fíbula]'),
('S82.5', 'Fratura do maléolo medial'),
('S82.6', 'Fratura do maléolo lateral'),
('S82.7', 'Fraturas múltiplas da perna'),
('S82.8', 'Fratura de outras partes da perna'),
('S82.9', 'Fratura da perna, parte não especificada'),
('S83.0', 'Luxação da rótula [patela]'),
('S83.1', 'Luxação do joelho'),
('S83.2', 'Ruptura do menisco, atual'),
('S83.3', 'Ruptura atual da cartilagem da articulação do joelho'),
('S83.4', 'Entorse e distensão envolvendo ligamento colateral (peronial) (tibial) do joelho'),
('S83.5', 'Entorse e distensão envolvendo ligamento cruzado (anterior) (posterior) do joelho'),
('S83.6', 'Entorse e distensão de outras partes e das não especificadas do joelho'),
('S83.7', 'Traumatismo de estruturas múltiplas do joelho'),
('S84.0', 'Traumatismo do nervo tibial ao nível da perna'),
('S84.1', 'Traumatismo do nervo peronial ao nível da perna'),
('S84.2', 'Traumatismo do nervo cutâneo sensitivo ao nível da perna'),
('S84.7', 'Traumatismo de múltiplos nervos ao nível da perna'),
('S84.8', 'Traumatismo de outros nervos ao nível da perna'),
('S84.9', 'Traumatismo de nervo não especificado ao nível da perna'),
('S85.0', 'Traumatismo da artéria poplítea'),
('S85.1', 'Traumatismo da artéria tibial (anterior) (posterior)'),
('S85.2', 'Traumatismo da artéria peronial'),
('S85.3', 'Traumatismo da veia grande safena ao nível da perna'),
('S85.4', 'Traumatismo da veia safena menor ao nível da perna'),
('S85.5', 'Traumatismo da veia poplítea'),
('S85.7', 'Traumatismo de múltiplos vasos sangüíneos ao nível da perna'),
('S85.8', 'Traumatismo de outros vasos sangüíneos ao nível da perna'),
('S85.9', 'Traumatismo de vasos sangüíneos não especificados ao nível da perna'),
('S86.0', 'Traumatismo do tendão de Aquiles'),
('S86.1', 'Traumatismo de outro(s) músculo(s) e tendão(ões) do grupo muscular posterior ao nível da perna'),
('S86.2', 'Traumatismo do(s) músculo(s) e tendão(ões) do grupo muscular anterior ao nível da perna'),
('S86.3', 'Traumatismo do(s) músculo(s) e tendão(ões) do grupo muscular peronial ao nível da perna'),
('S86.7', 'Traumatismo de múltiplos músculos e tendões ao nível da perna'),
('S86.8', 'Traumatismos de outros músculos e tendões ao nível da perna'),
('S86.9', 'Traumatismo de músculo e de tendão não especificado ao nível da perna'),
('S87.0', 'Traumatismo por esmagamento do joelho'),
('S87.8', 'Traumatismo por esmagamento de outra parte e de partes não especificadas da perna'),
('S88.0', 'Amputação traumática ao nível do joelho'),
('S88.1', 'Amputação traumática entre o joelho e o tornozelo'),
('S88.9', 'Amputação traumática da perna ao nível não especificado'),
('S89.0', 'Traumatismos múltiplos da perna'),
('S89.8', 'Outros traumatismos especificados da perna'),
('S89.9', 'Traumatismo não especificado da perna'),
('S90.0', 'Contusão do tornozelo'),
('S90.1', 'Contusão de artelho sem lesão da unha'),
('S90.2', 'Contusão de artelho(s) com lesão da unha'),
('S90.3', 'Contusão de outras partes e partes não especificadas do pé'),
('S90.7', 'Traumatismos superficiais múltiplos do tornozelo e do pé'),
('S90.8', 'Outros traumatismos superficiais do tornozelo e do pé'),
('S90.9', 'Traumatismo superficial do tornozelo e do pé, não especificado'),
('S91.0', 'Ferimento do tornozelo'),
('S91.1', 'Ferimento do(s) artelho(s) sem lesão da unha'),
('S91.2', 'Ferimento do(s) artelho(s) com lesão da unha'),
('S91.3', 'Ferimento de outras partes do pé'),
('S91.7', 'Ferimentos múltiplos do tornozelo e do pé'),
('S92.0', 'Fratura do calcâneo'),
('S92.1', 'Fratura do astrágalo'),
('S92.2', 'Fratura de outros ossos do tarso'),
('S92.3', 'Fratura de ossos do metatarso'),
('S92.4', 'Fratura do hálux'),
('S92.5', 'Fratura de outro artelho'),
('S92.7', 'Fraturas múltiplas do pé'),
('S92.9', 'Fratura do pé não especificada'),
('S93.0', 'Luxação da articulação do tornozelo'),
('S93.1', 'Luxação do(s) artelho(s)'),
('S93.2', 'Ruptura de ligamentos ao nível do tornozelo e do pé'),
('S93.3', 'Luxação de outras partes e das não especificadas do pé'),
('S93.4', 'Entorse e distensão do tornozelo'),
('S93.5', 'Entorse e distensão do(s) artelho(s)'),
('S93.6', 'Entorse e distensão de outras partes e de partes não especificadas do pé'),
('S94.0', 'Traumatismo do nervo plantar externo (lateral)'),
('S94.1', 'Traumatismo do nervo plantar interno (medial)'),
('S94.2', 'Traumatismo nervo peronial [fibular] profundo ao nível do tornozelo e do pé'),
('S94.3', 'Traumatismo do nervo sensitivo cutâneo ao nível do tornozelo e do pé'),
('S94.7', 'Traumatismo de múltiplos nervos ao nível do tornozelo e do pé'),
('S94.8', 'Traumatismo de outros nervos ao nível do tornozelo e do pé'),
('S94.9', 'Traumatismo de nervo não especificado, ao nível do tornozelo e do pé'),
('S95.0', 'Traumatismo da artéria dorsal do pé'),
('S95.1', 'Traumatismo da artéria plantar do pé'),
('S95.2', 'Traumatismo da veia dorsal do pé'),
('S95.7', 'Traumatismo de múltiplos vasos sangüíneos ao nível do tornozelo e do pé'),
('S95.8', 'Traumatismo de outros vasos sangüíneos ao nível do tornozelo e do pé'),
('S95.9', 'Traumatismo de vaso sangüíneo não especificado ao nível do tornozelo e do pé'),
('S96.0', 'Traumatismo do músculo e tendão do músculo flexor longo do(s) artelho(s) ao nível do tornozelo e do pé'),
('S96.1', 'Traumatismo do músculo e tendão do extensor longo do(s) artelho(s) ao nível do tornozelo e do pé'),
('S96.2', 'Traumatismo de músculo intrínseco e tendão ao nível do tornozelo e do pé'),
('S96.7', 'Traumatismo de múltiplos músculos e tendões ao nível do tornozelo e do pé'),
('S96.8', 'Traumatismo de outros tendões e músculos ao nível do tornozelo e do pé'),
('S96.9', 'Traumatismo de músculo e tendão não especificados do tornozelo e do pé'),
('S97.0', 'Lesão por esmagamento do tornozelo'),
('S97.1', 'Lesão por esmagamento do(s) artelho(s)'),
('S97.8', 'Lesão por esmagamento de outras partes do tornozelo e do pé'),
('S98.0', 'Amputação traumática do pé ao nível do tornozelo'),
('S98.1', 'Amputação traumática de apenas um artelho'),
('S98.2', 'Amputação traumática de dois ou mais artelhos'),
('S98.3', 'Amputação traumática de outras partes do pé'),
('S98.4', 'Amputação traumática do pé ao nível não especificado'),
('S99.7', 'Traumatismos múltiplos do tornozelo e do pé'),
('S99.8', 'Outros traumatismos especificados do tornozelo e do pé'),
('S99.9', 'Traumatismos não especificados do tornozelo e do pé'),
('T00.0', 'Traumatismos superficiais envolvendo a cabeça com o pescoço'),
('T00.1', 'Traumatismos superficiais envolvendo o tórax com o abdome, parte inferior do dorso e da pelve'),
('T00.2', 'Traumatismos superficiais envolvendo regiões múltiplas do(s) membro(s) superior(es)'),
('T00.3', 'Traumatismos superficiais envolvendo regiões múltiplas do(s) membro(s) inferior(es)'),
('T00.6', 'Traumatismos superficiais envolvendo regiões múltiplas dos membros superiores com membro(s) inferiores'),
('T00.8', 'Traumatismos superficiais envolvendo outras combinações de regiões do corpo'),
('T00.9', 'Traumatismos superficiais múltiplos não especificados'),
('T01.0', 'Ferimentos envolvendo a cabeça com o pescoço'),
('T01.1', 'Ferimentos envolvendo o tórax com o abdome, parte inferior do dorso e da pelve'),
('T01.2', 'Ferimentos envolvendo regiões múltiplas do(s) membro(s) superior(es)'),
('T01.3', 'Ferimentos envolvendo múltiplas regiões do(s) membro(s) inferior(es)'),
('T01.6', 'Ferimentos envolvendo regiões múltiplas do(s) membro(s) superior(es) com membro(s) inferior(es)'),
('T01.8', 'Ferimentos envolvendo outras combinações de regiões do corpo'),
('T01.9', 'Ferimentos múltiplos não especificados'),
('T02.0', 'Fraturas envolvendo cabeça com pescoço'),
('T02.1', 'Fraturas envolvendo tórax com parte inferior do dorso e da pelve'),
('T02.2', 'Fraturas envolvendo regiões múltiplas de um membro superior'),
('T02.3', 'Fraturas envolvendo regiões múltiplas de um membro inferior'),
('T02.4', 'Fraturas envolvendo regiões múltiplas de ambos os membros superiores');
INSERT INTO servicos_cid (cid_id, descricao) VALUES
('T02.5', 'Fraturas envolvendo regiões múltiplas de ambos os membros inferiores'),
('T02.6', 'Fraturas envolvendo regiões múltiplas do(s) membro(s) superior(es) com inferior(es)'),
('T02.7', 'Fraturas envolvendo tórax com parte inferior do dorso e pelve com membro(s)'),
('T02.8', 'Fraturas envolvendo outras combinações de regiões do corpo'),
('T02.9', 'Fraturas múltiplas não especificadas'),
('T03.0', 'Luxações, entorses e distensões envolvendo a cabeça com o pescoço'),
('T03.1', 'Luxações, entorses e distensões envolvendo tórax com parte inferior do dorso e da pelve'),
('T03.2', 'Luxações, entorses e distensões envolvendo regiões múltiplas de membro(s) superior(es)'),
('T03.3', 'Luxações, entorses e distensões envolvendo regiões múltiplas do(s) membro(s) inferior(es)'),
('T03.4', 'Luxações, entorses e distensões envolvendo regiões múltiplas dos membros superiores com inferiores'),
('T03.8', 'Luxações, entorses e distensões envolvendo outras combinações de regiões do corpo'),
('T03.9', 'Luxações, entorses e distensões múltiplas, não especificadas'),
('T04.0', 'Traumatismos por esmagamento envolvendo a cabeça com o pescoço'),
('T04.1', 'Traumatismos por esmagamento envolvendo tórax com o abdome, parte inferior do dorso e da pelve'),
('T04.2', 'Traumatismos por esmagamento envolvendo regiões múltiplas do(s) membro(s) superior(es)'),
('T04.3', 'Traumatismos por esmagamento envolvendo regiões múltiplas do(s) membro(s) inferior(es)'),
('T04.4', 'Traumatismos por esmagamento envolvendo regiões múltiplas do(s) membro(s) superior(es) com inferior(es)'),
('T04.7', 'Traumatismo por esmagamento do tórax com abdome, parte inferior do dorso, pelve e membro(s)'),
('T04.8', 'Traumatismos por esmagamento envolvendo outras combinações de regiões do corpo'),
('T04.9', 'Traumatismo múltiplos por esmagamento não especificados'),
('T05.0', 'Amputação traumática de ambas as mãos'),
('T05.1', 'Amputação traumática de uma mão e de um outro braço [qualquer nível, exceto mão]'),
('T05.2', 'Amputação traumática de ambos os braços [qualquer nível]'),
('T05.3', 'Amputação traumática de ambos os pés'),
('T05.4', 'Amputação traumática de um pé e outra perna [qualquer nível, exceto pé]'),
('T05.5', 'Amputação traumática de ambas as pernas [qualquer nível]'),
('T05.6', 'Amputação traumática de membros superiores e inferiores, qualquer combinação [qualquer nível]'),
('T05.8', 'Amputações traumáticas envolvendo outras combinações de regiões do corpo'),
('T05.9', 'Amputações traumáticas múltiplas não especificadas'),
('T06.0', 'Traumatismos de cérebro e nervos cranianos com traumatismos de nervos e da medula espinhal ao nível do pescoço'),
('T06.1', 'Traumatismos de nervos e da | |
Returns the value of the `qoss` property.
"""
return self._qoss
@qoss.setter
def qoss(self, value):
"""
Sets the value of the `qoss` property.
"""
self._qoss = value
@property
def mac_pool(self):
"""
Returns the value of the `mac_pool` property.
"""
return self._mac_pool
@mac_pool.setter
def mac_pool(self, value):
"""
Sets the value of the `mac_pool` property.
"""
Struct._check_type('mac_pool', value, MacPool)
self._mac_pool = value
@property
def storage_domains(self):
"""
Returns the value of the `storage_domains` property.
"""
return self._storage_domains
@storage_domains.setter
def storage_domains(self, value):
"""
Sets the value of the `storage_domains` property.
"""
self._storage_domains = value
@property
def iscsi_bonds(self):
"""
Returns the value of the `iscsi_bonds` property.
"""
return self._iscsi_bonds
@iscsi_bonds.setter
def iscsi_bonds(self, value):
"""
Sets the value of the `iscsi_bonds` property.
"""
self._iscsi_bonds = value
@property
def networks(self):
"""
Returns the value of the `networks` property.
"""
return self._networks
@networks.setter
def networks(self, value):
"""
Sets the value of the `networks` property.
"""
self._networks = value
@property
def local(self):
"""
Returns the value of the `local` property.
"""
return self._local
@local.setter
def local(self, value):
"""
Sets the value of the `local` property.
"""
self._local = value
@property
def permissions(self):
"""
Returns the value of the `permissions` property.
"""
return self._permissions
@permissions.setter
def permissions(self, value):
"""
Sets the value of the `permissions` property.
"""
self._permissions = value
@property
def supported_versions(self):
"""
Returns the value of the `supported_versions` property.
"""
return self._supported_versions
@supported_versions.setter
def supported_versions(self, value):
"""
Sets the value of the `supported_versions` property.
"""
self._supported_versions = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, DataCenterStatus)
self._status = value
@property
def clusters(self):
"""
Returns the value of the `clusters` property.
"""
return self._clusters
@clusters.setter
def clusters(self, value):
"""
Sets the value of the `clusters` property.
"""
self._clusters = value
class Device(Identified):
def __init__(
self,
comment=None,
description=None,
id=None,
instance_type=None,
name=None,
template=None,
vm=None,
vms=None,
):
super(Device, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.instance_type = instance_type
self.template = template
self.vm = vm
self.vms = vms
@property
def template(self):
"""
Returns the value of the `template` property.
"""
return self._template
@template.setter
def template(self, value):
"""
Sets the value of the `template` property.
"""
Struct._check_type('template', value, Template)
self._template = value
@property
def instance_type(self):
"""
Returns the value of the `instance_type` property.
"""
return self._instance_type
@instance_type.setter
def instance_type(self, value):
"""
Sets the value of the `instance_type` property.
"""
Struct._check_type('instance_type', value, InstanceType)
self._instance_type = value
@property
def vm(self):
"""
Returns the value of the `vm` property.
"""
return self._vm
@vm.setter
def vm(self, value):
"""
Sets the value of the `vm` property.
"""
Struct._check_type('vm', value, Vm)
self._vm = value
@property
def vms(self):
"""
Returns the value of the `vms` property.
"""
return self._vms
@vms.setter
def vms(self, value):
"""
Sets the value of the `vms` property.
"""
self._vms = value
class Disk(Device):
def __init__(
self,
active=None,
actual_size=None,
alias=None,
backup=None,
backup_mode=None,
bootable=None,
comment=None,
content_type=None,
description=None,
disk_profile=None,
disk_snapshots=None,
external_disk=None,
format=None,
id=None,
image_id=None,
initial_size=None,
instance_type=None,
interface=None,
logical_name=None,
lun_storage=None,
name=None,
openstack_volume_type=None,
permissions=None,
propagate_errors=None,
provisioned_size=None,
qcow_version=None,
quota=None,
read_only=None,
sgio=None,
shareable=None,
snapshot=None,
sparse=None,
statistics=None,
status=None,
storage_domain=None,
storage_domains=None,
storage_type=None,
template=None,
total_size=None,
uses_scsi_reservation=None,
vm=None,
vms=None,
wipe_after_delete=None,
):
super(Disk, self).__init__(
comment=comment,
description=description,
id=id,
instance_type=instance_type,
name=name,
template=template,
vm=vm,
vms=vms,
)
self.active = active
self.actual_size = actual_size
self.alias = alias
self.backup = backup
self.backup_mode = backup_mode
self.bootable = bootable
self.content_type = content_type
self.disk_profile = disk_profile
self.disk_snapshots = disk_snapshots
self.external_disk = external_disk
self.format = format
self.image_id = image_id
self.initial_size = initial_size
self.interface = interface
self.logical_name = logical_name
self.lun_storage = lun_storage
self.openstack_volume_type = openstack_volume_type
self.permissions = permissions
self.propagate_errors = propagate_errors
self.provisioned_size = provisioned_size
self.qcow_version = qcow_version
self.quota = quota
self.read_only = read_only
self.sgio = sgio
self.shareable = shareable
self.snapshot = snapshot
self.sparse = sparse
self.statistics = statistics
self.status = status
self.storage_domain = storage_domain
self.storage_domains = storage_domains
self.storage_type = storage_type
self.total_size = total_size
self.uses_scsi_reservation = uses_scsi_reservation
self.wipe_after_delete = wipe_after_delete
@property
def initial_size(self):
"""
Returns the value of the `initial_size` property.
"""
return self._initial_size
@initial_size.setter
def initial_size(self, value):
"""
Sets the value of the `initial_size` property.
"""
self._initial_size = value
@property
def qcow_version(self):
"""
Returns the value of the `qcow_version` property.
"""
return self._qcow_version
@qcow_version.setter
def qcow_version(self, value):
"""
Sets the value of the `qcow_version` property.
"""
Struct._check_type('qcow_version', value, QcowVersion)
self._qcow_version = value
@property
def total_size(self):
"""
Returns the value of the `total_size` property.
"""
return self._total_size
@total_size.setter
def total_size(self, value):
"""
Sets the value of the `total_size` property.
"""
self._total_size = value
@property
def content_type(self):
"""
Returns the value of the `content_type` property.
"""
return self._content_type
@content_type.setter
def content_type(self, value):
"""
Sets the value of the `content_type` property.
"""
Struct._check_type('content_type', value, DiskContentType)
self._content_type = value
@property
def format(self):
"""
Returns the value of the `format` property.
"""
return self._format
@format.setter
def format(self, value):
"""
Sets the value of the `format` property.
"""
Struct._check_type('format', value, DiskFormat)
self._format = value
@property
def active(self):
"""
Returns the value of the `active` property.
"""
return self._active
@active.setter
def active(self, value):
"""
Sets the value of the `active` property.
"""
self._active = value
@property
def storage_domains(self):
"""
Returns the value of the `storage_domains` property.
"""
return self._storage_domains
@storage_domains.setter
def storage_domains(self, value):
"""
Sets the value of the `storage_domains` property.
"""
self._storage_domains = value
@property
def actual_size(self):
"""
Returns the value of the `actual_size` property.
"""
return self._actual_size
@actual_size.setter
def actual_size(self, value):
"""
Sets the value of the `actual_size` property.
"""
self._actual_size = value
@property
def propagate_errors(self):
"""
Returns the value of the `propagate_errors` property.
"""
return self._propagate_errors
@propagate_errors.setter
def propagate_errors(self, value):
"""
Sets the value of the `propagate_errors` property.
"""
self._propagate_errors = value
@property
def external_disk(self):
"""
Returns the value of the `external_disk` property.
"""
return self._external_disk
@external_disk.setter
def external_disk(self, value):
"""
Sets the value of the `external_disk` property.
"""
self._external_disk = value
@property
def uses_scsi_reservation(self):
"""
Returns the value of the `uses_scsi_reservation` property.
"""
return self._uses_scsi_reservation
@uses_scsi_reservation.setter
def uses_scsi_reservation(self, value):
"""
Sets the value of the `uses_scsi_reservation` property.
"""
self._uses_scsi_reservation = value
@property
def snapshot(self):
"""
Returns the value of the `snapshot` property.
"""
return self._snapshot
@snapshot.setter
def snapshot(self, value):
"""
Sets the value of the `snapshot` property.
"""
Struct._check_type('snapshot', value, Snapshot)
self._snapshot = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, DiskStatus)
self._status = value
@property
def logical_name(self):
"""
Returns the value of the `logical_name` property.
"""
return self._logical_name
@logical_name.setter
def logical_name(self, value):
"""
Sets the value of the `logical_name` property.
"""
self._logical_name = value
@property
def statistics(self):
"""
Returns the value of the `statistics` property.
"""
return self._statistics
@statistics.setter
def statistics(self, value):
"""
Sets the value of the `statistics` property.
"""
self._statistics = value
@property
def shareable(self):
"""
Returns the value of the `shareable` property.
"""
return self._shareable
@shareable.setter
def shareable(self, value):
"""
Sets the value of the `shareable` property.
"""
self._shareable = value
@property
def backup(self):
"""
Returns the value of the `backup` property.
"""
return self._backup
@backup.setter
def backup(self, value):
"""
Sets the value of the `backup` property.
"""
Struct._check_type('backup', value, DiskBackup)
self._backup = value
@property
def storage_domain(self):
"""
Returns the value of the `storage_domain` property.
"""
return self._storage_domain
@storage_domain.setter
def storage_domain(self, value):
"""
Sets the value of the `storage_domain` property.
"""
Struct._check_type('storage_domain', value, StorageDomain)
self._storage_domain = value
@property
def disk_snapshots(self):
"""
Returns the value of the `disk_snapshots` property.
"""
return self._disk_snapshots
@disk_snapshots.setter
def disk_snapshots(self, value):
"""
Sets the value of the `disk_snapshots` property.
"""
self._disk_snapshots = value
@property
def provisioned_size(self):
"""
Returns the value of the `provisioned_size` property.
"""
return self._provisioned_size
@provisioned_size.setter
def provisioned_size(self, value):
"""
Sets the value of the `provisioned_size` property.
"""
self._provisioned_size = value
@property
def backup_mode(self):
"""
Returns the value of the `backup_mode` property.
"""
return self._backup_mode
@backup_mode.setter
def backup_mode(self, value):
"""
Sets the value of the `backup_mode` property.
"""
Struct._check_type('backup_mode', value, DiskBackupMode)
self._backup_mode = | |
'https://i.imgur.com/s5AExEM.jpg'],
['繪師: カツラギ-pixiv', 'https://i.imgur.com/3NOjwK5.jpg'],
['繪師: ぴてぃ-pixiv', 'https://i.imgur.com/4TWI8Ht.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/jPLoOYo.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 七冠 ###
### 桂冠 ###
### 七冠 ###
elif input_message in ['克莉絲提娜','克里斯蒂娜','クリスティーナ','克總','誓約女君','老太婆','副團長','阿姨','製作人','聖誕克莉絲提娜']:
value_i = [
['繪師: qwerty131154-巴哈', 'https://i.imgur.com/fjYRD4W.jpg'],
['繪師: 双見ゆうき-pixiv', 'https://i.imgur.com/fY5YhrJ.jpg'],
['繪師: ぽむり-pixiv', 'https://i.imgur.com/vhVVBlr.jpg'],
['繪師: 淫傘うさぎ-pixiv', 'https://i.imgur.com/yqV2k5k.jpg'],
['繪師: itaco-pixiv', 'https://i.imgur.com/99cwfub.jpg'],
['繪師: Saha_-pixiv', 'https://i.imgur.com/QRyfHzd.jpg'],
['繪師: しゅーくりいむ-pixiv', 'https://i.imgur.com/A6e9Nv6.jpg'],
['繪師: Hanse-pixiv', 'https://i.imgur.com/tf6sNt6.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/aQNaclR.jpg'],
['繪師: sonchi-pixiv', 'https://i.imgur.com/4rkG4kz.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['矛依未','青蛙','ムイミ','天樓霸斷劍','諾唯姆','姆咪','正月矛依未']:
value_i = [
'https://i.imgur.com/CW1GCBv.jpg',
['繪師: AJ-pixiv', "https://i.imgur.com/Pgg0fqM.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/QZMeUVh.jpg"],
['繪師: 延ビ-pixiv', "https://i.imgur.com/S6OSknV.jpg"],
['繪師: Jehyun-pixiv', "https://i.imgur.com/wZzXQMY.jpg"],
['繪師: カッシュ-pixiv', "https://i.imgur.com/5890KnY.jpg"],
['繪師: 延ビ-pixiv', "https://i.imgur.com/wH7RlxR.jpg"],
['繪師: 延ビ-pixiv', "https://i.imgur.com/P1AKT4r.jpg"],
['繪師: ヒーロー-pixiv', "https://i.imgur.com/2sAbiD5.jpg"],
['繪師: ヒーロー-pixiv', "https://i.imgur.com/jBrFpQr.jpg"],
['繪師: 六丸いなみ-pixiv', "https://i.imgur.com/i1FJvTk.jpg"]
]
if(len(value_i[i% len(value_i)])==2):
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
else:
line_bot_api.reply_message(event.reply_token,ImageMessageURL(value_i[i% len(value_i)]))
elif input_message in ['似似花','ネネカ','448','nnk','現士實似似花','變貌大妃','正月似似花']:
value_i = [
['繪師: 蛞蝓SLUG-pixiv', "https://i.imgur.com/5SuITSA.jpg"],
['繪師: うまるつふり-pixiv', "https://i.imgur.com/aGDYsI3.jpg"],
['繪師: ヒーロー-pixiv', "https://i.imgur.com/yGsd9CX.jpg"],
['繪師: Sw(すぅ)-pixiv', "https://i.imgur.com/ZzUuYHz.jpg"],
['繪師: 1ピコ㍍-pixiv', "https://i.imgur.com/Xsi8DLf.jpg"],
['繪師: AJ-pixiv', "https://i.imgur.com/gcbOijd.jpg"],
['繪師: Sw(すぅ)-pixiv', "https://i.imgur.com/IWWXm2i.jpg"],
['繪師: Sw(すぅ)-pixiv', "https://i.imgur.com/JIvmhTS.jpg"],
['繪師: 天雷-pixiv', "https://i.imgur.com/jjsaSVF.jpg"],
['繪師: Sw(すぅ)-pixiv', "https://i.imgur.com/y0vPH6W.jpg"],
['繪師: Sw(すぅ)-pixiv', "https://i.imgur.com/rYx1j94.jpg"],
['繪師: けんぴゃっ-pixiv', "https://i.imgur.com/Gbt0uVO.jpg"]
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['夥伴','伙伴','同伴','相棒','アイボウ','尾狗刀','尾刀狗']:
value_i = [
['繪師: 塵-pixiv', "https://i.imgur.com/SneVdIU.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/scnsgWD.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/xMsa8U2.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/I5Qk2cQ.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/AUG6ynv.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/L7I8aOS.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/1StDQPw.jpg"],
['繪師: 塵-pixiv', "https://i.imgur.com/DRVw6os.jpg"],
['繪師: 延ビ-pixiv', "https://i.imgur.com/CMeG1rV.jpg"],
['繪師: 延ビ-pixiv', "https://i.imgur.com/zele47S.jpg"]
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['拉比林斯達','ラビリスタ','模索路晶','晶','迷宮女王','迷路女王']:
value_i = [
['繪師: オスティ-pixiv', "https://i.imgur.com/J69aauG.jpg"],
['繪師: オスティ-pixiv', "https://i.imgur.com/kHF3TOs.jpg"],
['繪師: 谷川犬兎-pixiv', "https://i.imgur.com/JqOwWXm.jpg"],
['繪師: らる-pixiv', "https://i.imgur.com/OF7HmOJ.jpg"],
['遊戲繪圖', "https://i.imgur.com/9BfhchR.jpg"],
['繪師: ヒーロー-pixiv', "https://i.imgur.com/lkj81hl.jpg"],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message == '桂冠':
value_i = [
ImageMessageURL("https://i.imgur.com/5lRyLJy.png"),
TextSendMessage(text ="騎士君是肚子餓了嗎?"),
TextSendMessage(text ="桂冠你媽啦,就跟你說七冠了。\n-布丁")
]
line_bot_api.reply_message(event.reply_token,value_i[i% len(value_i)])
### 馬納歷亞 ###
### マナリアフレンズ ###
### Manaria Friends ###
elif input_message in ['馬納歷亞','マナリアフレンズ','Manaria Friends','百合公主']:
value_i = [
['繪師: 92M-pixiv', 'https://i.imgur.com/AtJOEqh.jpg'],
['繪師: とも-pixiv', 'https://i.imgur.com/rqVMy0r.jpg'],
['繪師: 音の绯-pixiv', 'https://i.imgur.com/OYbCg5i.jpg'],
['繪師: ぽんず-pixiv', 'https://i.imgur.com/QARR8iO.jpg'],
['繪師: れんず-pixiv', 'https://i.imgur.com/t9jLBeS.jpg'],
['繪師: にゃー-pixiv', 'https://i.imgur.com/Dl0bf68.jpg'],
['繪師: れっれれ-pixiv', 'https://i.imgur.com/pqQQ1ED.jpg'],
['繪師: みどりのちゃ-pixiv', 'https://i.imgur.com/D6B3wSk.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/JE5MeGW.jpg'],
['繪師: いとね-pixiv', 'https://i.imgur.com/pz8dC3b.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['古蕾雅','グレア','龍姬','古雷雅'] :
value_i = [
['繪師: KWS-pixiv', 'https://i.imgur.com/aatQVtQ.jpg'],
['繪師: かんかっぴ-pixiv', 'https://i.imgur.com/jYom8yC.jpg'],
['繪師: とも-pixiv', 'https://i.imgur.com/qxe6AtA.jpg'],
['繪師: とも-pixiv', 'https://i.imgur.com/zvolEcL.jpg'],
['繪師: とも-pixiv', 'https://i.imgur.com/7RuuWPm.jpg'],
['繪師: いとね-pixiv', 'https://i.imgur.com/lhXOFCV.jpg'],
['繪師: おもおもも-pixiv', 'https://i.imgur.com/HHuR7AU.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['安','アン','55kg','馬納歷亞公主','宏大魔法']:
value_i = [
['繪師: S.U.-pixiv', 'https://i.imgur.com/TYwLMpV.jpg'],
['繪師: いとね-pixiv', 'https://i.imgur.com/SoSwfNW.jpg'],
['繪師: HotaK-pixiv', 'https://i.imgur.com/sqexcv7.jpg'],
['繪師: セラ-pixiv', 'https://i.imgur.com/pF3oHmc.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['露','ルゥ','眼球','補考','補考女帝']:
value_i = [
['繪師: ぺろんちょ-pixiv', 'https://i.imgur.com/WXCiwFo.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/xcFCKju.jpg'],
['繪師: りこ-pixiv', 'https://i.imgur.com/oFuxXbB.jpg'],
['繪師: なかひま-pixiv', 'https://i.imgur.com/UMZ3jqU.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### Re:從零開始的異世界生活 ###
### Re:ゼロから始める異世界生活 ###
### Re:0 ###
elif input_message in ['Re:從零開始的異世界生活','Re:ゼロから始める異世界生活','Re0','re0','Re:0','re:0']:
value_i = [
['繪師: ぽえ-pixiv', 'https://i.imgur.com/zEWwDWx.jpg'],
['繪師: 桃乃きのこ。-pixiv', 'https://i.imgur.com/9sNkqru.jpg'],
['繪師: ChinTora0201-pixiv', 'https://i.imgur.com/JXlERea.jpg'],
['繪師: 喜欢夜宵yayoi-pixiv', 'https://i.imgur.com/RR4bXb2.jpg'],
['繪師: ゆぞうに-pixiv', 'https://i.imgur.com/GlwcKnj.jpg'],
['繪師: えらんと-pixiv', 'https://i.imgur.com/6dltMdz.jpg'],
['繪師: だよ-pixiv', 'https://i.imgur.com/UamOLeJ.jpg'],
['繪師: あろえ-pixiv', 'https://i.imgur.com/3zH6gy7.jpg'],
['繪師: しゃけ沢-pixiv', 'https://i.imgur.com/6GAVLKt.jpg'],
['繪師: しゃけ沢-pixiv', 'https://i.imgur.com/kkweao4.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['愛蜜莉雅','艾蜜莉雅','愛蜜莉亞','艾蜜莉亞','エミリア','EMT','emt','莉雅']:
value_i = [
['繪師: @Seic_Oh-pixiv', 'https://i.imgur.com/Il334iS.jpg'],
['繪師: DABY-pixiv', 'https://i.imgur.com/slm7jSF.jpg'],
['繪師: PiO-pixiv', 'https://i.imgur.com/mBDxyvy.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['雷姆','レム','快速動眼期']:
value_i = [
['繪師: そらほし-pixiv', 'https://i.imgur.com/hrYaNNk.jpg'],
['繪師: DABY-pixiv', 'https://i.imgur.com/SqT0j7K.jpg'],
['繪師: ttosom-pixiv', 'https://i.imgur.com/BTclhHL.jpg'],
['繪師: MOMIN-pixiv', 'https://i.imgur.com/CUh9u9u.jpg'],
['繪師: Bcoca-pixiv', 'https://i.imgur.com/Lhuqtbl.jpg'],
['繪師: Melings-pixiv', 'https://i.imgur.com/MAbMNvB.jpg'],
['繪師: 赤つき-pixiv', 'https://i.imgur.com/qwwmytW.jpg'],
['繪師: ONSEM-pixiv', 'https://i.imgur.com/yxq7Q41.jpg'],
['繪師: pangbai_666-pixiv', 'https://i.imgur.com/nSIZyms.jpg'],
['繪師: 千羽茸みな-pixiv', 'https://i.imgur.com/W3i3XrP.jpg'],
['繪師: はちろく-pixiv', 'https://i.imgur.com/BFjYGja.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['拉姆','ラム','記憶體','快取']:
value_i = [
['繪師: 千羽茸みな-pixiv', 'https://i.imgur.com/170L1AL.jpg'],
['繪師: pigone-pixiv', 'https://i.imgur.com/vCtXAgN.jpg'],
['繪師: MOMIN-pixiv', 'https://i.imgur.com/NABfw4w.jpg'],
['繪師: Suo-pixiv', 'https://i.imgur.com/lco0h8A.jpg'],
['繪師: G.YA-pixiv', 'https://i.imgur.com/G1pGidw.jpg'],
['繪師: mongble-pixiv', 'https://i.imgur.com/yDjYb2W.jpg'],
['繪師: 100wang-pixiv', 'https://i.imgur.com/1V0lN5M.jpg'],
['繪師: ゆぞうに-pixiv', 'https://i.imgur.com/swAQL8v.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['碧翠絲','ベアトリス','貝蒂','碧翠子']:
value_i = [
['繪師: だよ-pixiv', 'https://i.imgur.com/nGQILuC.jpg'],
['繪師: KeG-pixiv', 'https://i.imgur.com/scWOIZi.jpg'],
['繪師: tonowa トノワ-pixiv', 'https://i.imgur.com/0C9ZjXh.jpg'],
['繪師: しゃけ沢-pixiv', 'https://i.imgur.com/rm7jCFZ.jpg'],
['繪師: きんぎん-pixiv', 'https://i.imgur.com/uLWLBqp.jpg'],
['繪師: そらほし-pixiv', 'https://i.imgur.com/0Xqn2Kj.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 偶像大師灰姑娘女孩 ###
### アイドルマスターシンデレラガールズ ###
### New Generation ###
elif input_message in ['偶像大師灰姑娘女孩','アイドルマスターシンデレラガールズ','偶大','偶像大師','灰姑娘','新世代','New Generation','new generation']:
value_i = [
['繪師: シワスタカシ-pixiv', 'https://i.imgur.com/WZMIbDm.jpg'],
['繪師: Blue_Gk-pixiv', 'https://i.imgur.com/oEwb94k.jpg'],
['繪師: nyanya-pixiv', 'https://i.imgur.com/2T86ioj.jpg'],
['繪師: 森倉円-pixiv', 'https://i.imgur.com/V701qlH.jpg'],
['繪師: 森倉円-pixiv', 'https://i.imgur.com/Tttl70z.jpg'],
['繪師: 森倉円-pixiv', 'https://i.imgur.com/pdpPweS.jpg'],
['繪師: 月神るな-pixiv', 'https://i.imgur.com/7ha5BHr.jpg'],
['繪師: @001_Tashia-twitter', 'https://i.imgur.com/sg9Q98D.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['凜','渋谷凛','澀谷凜','蒼之劍士']:
value_i = [
['繪師: たまかが-pixiv', 'https://i.imgur.com/IHb3Fpq.jpg'],
['繪師: たまかが-pixiv', 'https://i.imgur.com/dEOc4B9.jpg'],
['繪師: たまかが-pixiv', 'https://i.imgur.com/Pn8rJg6.jpg'],
['繪師: すとろα-pixiv', 'https://i.imgur.com/rpLWgZ0.jpg'],
['繪師: たまかが-pixiv', 'https://i.imgur.com/RYqtKt4.jpg'],
['繪師: Appplepie/AP-pixiv', 'https://i.imgur.com/T3XJO0P.jpg'],
['繪師: 遊びに来た人・v・-pixiv', 'https://i.imgur.com/eFryXdz.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['卯月','ウヅキ','島村卯月','笑容狂魔','笑容狂魔卯月']:
value_i = [
['我爸摳爸twitter: @cloba377', 'https://i.imgur.com/LJei46w.jpg'],
['繪師: うらび-pixiv', 'https://i.imgur.com/dUdTGQb.jpg'],
['繪師: 荻pote-pixiv', 'https://i.imgur.com/mm8Yo4p.jpg'],
['繪師: 結城辰也-pixiv', 'https://i.imgur.com/UyrBq7f.jpg'],
['繪師: U35(うみこ)-pixiv', 'https://i.imgur.com/sgquvvJ.jpg'],
['繪師: 芹野いつき-pixiv', 'https://i.imgur.com/tV1mcRw.jpg'],
['繪師: 芹野いつき-pixiv', 'https://i.imgur.com/Y5qeCti.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['未央','ミオ','本田未央','醬未央']:
value_i = [
['繪師: なかむら-pixiv', 'https://i.imgur.com/c7AyD0I.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/PSDg4hg.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/ytEi5Ch.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/oLYkBFq.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/3LVm262.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/OMM2Nnw.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/u5b5jeY.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/2NmYR4H.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/5kjaeRP.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/pjAWB3t.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/dcciHxb.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/JFsM3G9.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/8P5x7yP.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/wBYo85z.png'],
['繪師: なかむら-pixiv', 'https://i.imgur.com/MXQ211f.png']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 角色 (其他) ###
### 角色 (其他) ###
### 角色 (其他) ###
elif input_message in ['吉塔','ジータ','姬塔','騎空士','團長','古戰場逃兵','古戰場','吉他']:
value_i = [
['繪師: sirohito-pixiv', 'https://i.imgur.com/JQVl13u.jpg'],
['繪師: sirohito-pixiv', 'https://i.imgur.com/Koj0uLM.jpg'],
['繪師: iro-pixiv', 'https://i.imgur.com/H7HCIAP.jpg'],
['繪師: たく庵-pixiv', 'https://i.imgur.com/A2aEn1l.jpg'],
['繪師: iro-pixiv', 'https://i.imgur.com/JbYRjt3.jpg'],
['繪師: まぐ-pixiv', 'https://i.imgur.com/6ZfdF8m.jpg'],
['繪師: とうふぷりん-pixiv', 'https://i.imgur.com/oG0b6Hi.jpg'],
['繪師: みり㍑-pixiv', 'https://i.imgur.com/UCIL06b.jpg'],
['繪師: 葉千はちみつ-pixiv', 'https://i.imgur.com/usc7BWI.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['露娜','ルナ','露那','死靈法師','好朋友']:
value_i = [
['繪師: Enji-pixiv', 'https://i.imgur.com/ob9Uw8y.jpg'],
['繪師: により-pixiv', 'https://i.imgur.com/HgbgckH.jpg'],
['繪師: HIROKAZU-pixiv', 'https://i.imgur.com/igqmOgB.jpg'],
['繪師: ちてたん-pixiv', 'https://i.imgur.com/SCP6xRn.jpg'],
['繪師: により-pixiv', 'https://i.imgur.com/amVcMiq.jpg'],
['繪師: 小山内-pixiv', 'https://i.imgur.com/BrEu6Vm.jpg'],
['繪師: shoonia-pixiv', 'https://i.imgur.com/oAjAmEe.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['亞里莎','アリサ','亞里瞎','瞎子','羅莎莉亞']:
value_i = [
['繪師: 黒井ススム-pixiv', 'https://i.imgur.com/BCu2qYG.jpg'],
['繪師: ヨシノリョウ-pixiv', 'https://i.imgur.com/7NwXZJ2.jpg'],
['繪師: 士雷 Shirai-pixiv', 'https://i.imgur.com/QvQRCLn.jpg'],
['繪師: kieed-pixiv', 'https://i.imgur.com/anYIkmH.jpg'],
['繪師: きち-pixiv', 'https://i.imgur.com/crxmOPm.jpg'],
['繪師: きち-pixiv', 'https://i.imgur.com/FNVMMbH.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['愛梅斯','DD頭子','アメス','艾梅斯']:
value_i = [
['繪師: aono-pixiv', 'https://i.imgur.com/yk8dzMD.jpg','https://i.imgur.com/uc1XcEF.jpg','https://i.imgur.com/uKWemDs.jpg'],
['繪師: aono-pixiv', 'https://i.imgur.com/hurT0Sk.jpg'],
['繪師: aono-pixiv', 'https://i.imgur.com/9wfDIYY.jpg'],
['繪師: aono-pixiv', 'https://i.imgur.com/M6WlrdB.jpg'],
['繪師: いすとーん-pixiv', 'https://i.imgur.com/VnQ0cPI.jpg'],
['繪師: つちのトン-pixiv', 'https://i.imgur.com/lzKdQtU.jpg'],
['繪師: うまるつふり-pixiv', 'https://i.imgur.com/LKRmGhU.jpg'],
['繪師: みず-pixiv', 'https://i.imgur.com/v2grm1E.jpg'],
['繪師: 結月わらび-pixiv', 'https://i.imgur.com/1VERUPY.jpg'],
['繪師: Sira-pixiv', 'https://i.imgur.com/NMi24Ix.jpg'],
['繪師: aono-pixiv', 'https://i.imgur.com/WG2qVcL.jpg'],
['繪師: ヒーロー-pixiv', 'https://i.imgur.com/xSmm7wk.jpg'],
]
if(len(value_i[i% len(value_i)])==4):
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1]),ImageMessageURL(value_i[i% len(value_i)][2]),ImageMessageURL(value_i[i% len(value_i)][3])])
else:
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['對決','決戰','終戰','對峙']:
value_i = [
['繪師: 天雷-pixiv', 'https://i.imgur.com/2wM6Fv3.jpg'],
['繪師: KMH-pixiv', 'https://i.imgur.com/d95pPjB.jpg'],
['繪師: こしあん(たいやき)-pixiv', 'https://i.imgur.com/tuIdVA5.jpg'],
['繪師: ウラズラ-pixiv', 'https://i.imgur.com/mse3aq4.jpg'],
['繪師: @cluseller-twitter', 'https://i.imgur.com/Gzac88E.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['姊妹','姐妹','雙子']:
value_i = [
['繪師: みず-pixiv', 'https://i.imgur.com/ul5x7d4.jpg'],
['繪師: 結城辰也-pixiv', 'https://i.imgur.com/UtkMYdI.jpg'],
['繪師: ヤンタロウ-pixiv', 'https://i.imgur.com/QaAUaca.jpg'],
['繪師: Chel-pixiv', 'https://i.imgur.com/vy9LI9P.jpg'],
['繪師: ぬるぷよ-pixiv', 'https://i.imgur.com/WH0niD2.jpg'],
['繪師: ゆりりん-pixiv', 'https://i.imgur.com/vuueBKE.jpg'],
['繪師: はちろく-pixiv', 'https://i.imgur.com/BFjYGja.jpg'],
['繪師: pigone-pixiv', 'https://i.imgur.com/vCtXAgN.jpg'],
['繪師: ユキタカ-pixiv', 'https://i.imgur.com/iQVOxk2.jpg'],
['繪師: みどりのちゃ-pixiv', 'https://i.imgur.com/2wbKiAy.jpg'],
['繪師: 秋月リア-pixiv', 'https://i.imgur.com/NRgmRRj.jpg'],
['繪師: RYUKI-pixiv', 'https://i.imgur.com/cTrVg8W.jpg'],
['繪師: cha_chya-pixiv', 'https://i.imgur.com/nle89D8.jpg'],
['繪師: @PK_PKP_PPK-twitter', 'https://i.imgur.com/Dg1bV2v.jpg'],
['繪師: GaaRa-pixiv', 'https://i.imgur.com/npB3vE4.jpg'],
['繪師: しもん-pixiv', 'https://i.imgur.com/vXay9QY.jpg'],
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['魔法少女','馬猴燒酒']:
value_i = [
['繪師: けんぴゃっ-pixiv', 'https://i.imgur.com/SrlAcry.jpg'],
['繪師: ぐっち庵-pixiv', 'https://i.imgur.com/DiJZGaI.jpg'],
['繪師: AJ-pixiv', 'https://i.imgur.com/QQec9OS.jpg'],
['繪師: 夜凪朝妃-pixiv', 'https://i.imgur.com/GU3Pdtk.jpg'],
['繪師: @mk1122maki-twitter', 'https://i.imgur.com/BeCNUl3.png'],
['繪師: @uranakahima-twitter', 'https://i.imgur.com/zRr1s25.png'],
['繪師: @o_kita915-twitter', 'https://i.imgur.com/o6Mo3d1.png']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['公主連結','プリコネ','超異域公主連結','公連']:
value_i = [
['繪師: Lab2-pixiv', 'https://i.imgur.com/YBfyJ36.jpg'],
['繪師: 菖蒲-pixiv', 'https://i.imgur.com/Ljgi7Of.jpg'],
['繪師: 結城辰也-pixiv', 'https://iㄛmgur.com/FXAP2EI.jpg'],
['繪師: 冷蝉-pixiv', 'https://i.imgur.com/S07PioH.jpg'],
['繪師: みどりのちゃ-pixiv','https://i.imgur.com/jkxQSzY.jpg'],
['繪師: みどりのちゃ-pixiv','https://i.imgur.com/UNjZhIs.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text= value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
#其他角色 (非公連登場角色,存私心)
### 碧藍航線 ###
### アズールレーン ###
### 碧池航線 ###
elif input_message in ['碧藍航線','アズールレーン','碧池航線']:
value_i = [
['繪師: 清里-pixiv', 'https://i.imgur.com/hONfsMX.jpg'],
['繪師: 玲汰-pixiv', 'https://i.imgur.com/Pl0P8pK.jpg'],
['繪師: 月満懐空-pixiv', 'https://i.imgur.com/3uZlrvV.jpg'],
['繪師: かぷりちお-pixiv', 'https://i.imgur.com/LU16tpQ.jpg'],
['繪師: @umaiyo_puyoman-twitter', 'https://i.imgur.com/XPKkF7W.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['標槍','Javelin','ジャベリン']:
value_i = [
['繪師: 紅薙ようと-pixiv', 'https://i.imgur.com/PzKzQCC.jpg'],
['繪師: もうぴい-pixiv', 'https://i.imgur.com/ryUR0N6.jpg'],
['繪師: もうぴい-pixiv', 'https://i.imgur.com/NTmk4IM.jpg'],
['繪師: もうぴい-pixiv', 'https://i.imgur.com/2WxKEDr.jpg'],
['繪師: もうぴい-pixiv', 'https://i.imgur.com/J7o6Htn.jpg'],
['繪師: もうぴい-pixiv', 'https://i.imgur.com/SekN4bL.jpg'],
['繪師: うなっち-pixiv', 'https://i.imgur.com/IO1nx2t.jpg'],
['繪師: まだら-pixiv', 'https://i.imgur.com/Q506e62.jpg'],
['繪師: ちょこころね-pixiv', 'https://i.imgur.com/mJEnaOq.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['拉菲','ラフィー','紅酒']:
value_i = [
['繪師: TouTou-pixiv', 'https://i.imgur.com/6xrpW1X.jpg'],
['繪師: 月うさぎ-pixiv', 'https://i.imgur.com/peTAQvV.jpg'],
['繪師: まとけち-pixiv', 'https://i.imgur.com/8tHVklt.jpg'],
['繪師: ぽしー-pixiv', 'https://i.imgur.com/tjy0KC3.jpg'],
['繪師: らむち-pixiv', 'https://i.imgur.com/VRFASyP.jpg'],
['繪師: 月うさぎ-pixiv', 'https://i.imgur.com/MB6IDtx.jpg'],
['繪師: 自律金属-pixiv', 'https://i.imgur.com/b7LXyZb.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['綾波','アヤナミ','鬼神']:
value_i = [
['繪師: rika_39-pixiv', 'https://i.imgur.com/2vNqHVP.jpg'],
['繪師: Kana-pixiv', 'https://i.imgur.com/0I2TKgh.jpg'],
['繪師: 清里-pixiv', 'https://i.imgur.com/bD8S8tB.jpg'],
['繪師: いずもねる-pixiv', 'https://i.imgur.com/cG12rKK.jpg'],
['繪師: シロノーラ-pixiv', 'https://i.imgur.com/Js1wIYn.jpg'],
['繪師: narae-pixiv', 'https://i.imgur.com/udUKVLC.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
elif input_message in ['獨角獸','ユニコーン'] or (input_message[:2] == '港獨' and len(input_message)<6):
value_i = [
['繪師: 浅ノ川-pixiv', 'https://i.imgur.com/zXNIMWm.jpg'],
['繪師: Kinty-pixiv', 'https://i.imgur.com/fnpwjNA.jpg'],
['繪師: 松うに-pixiv', 'https://i.imgur.com/ahxeS2g.jpg'],
['繪師: マトリ-pixiv', 'https://i.imgur.com/TQcJlUj.jpg'],
['繪師: 繭咲悠-pixiv', 'https://i.imgur.com/rhsNP4Y.jpg'],
['繪師: 小枝-pixiv', 'https://i.imgur.com/xmQ7dEq.jpg'],
['繪師: ちた-pixiv', 'https://i.imgur.com/KKfImwN.jpg']
]
line_bot_api.reply_message(event.reply_token,[TextSendMessage(text = value_i[i% len(value_i)][0]),ImageMessageURL(value_i[i% len(value_i)][1])])
### 心跳文學部 ###
### Doki Doki Literature Club! | |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class ConfigurableJoint:
def __new__(cls, arg1=None):
'''
:returns: ConfigurableJoint
:rtype: UnityEngine.ConfigurableJoint
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_secondaryAxis():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_secondaryAxis(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_xMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_xMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_yMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_yMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_zMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_zMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_angularXMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_angularXMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_angularYMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_angularYMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_angularZMotion():
'''
:returns: ConfigurableJointMotion
:rtype: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def set_angularZMotion(arg1):
'''
:param arg1: ConfigurableJointMotion
:type arg1: UnityEngine.ConfigurableJointMotion
'''
pass
@staticmethod
def get_linearLimitSpring():
'''
:returns: SoftJointLimitSpring
:rtype: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def set_linearLimitSpring(arg1):
'''
:param arg1: SoftJointLimitSpring
:type arg1: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def get_angularXLimitSpring():
'''
:returns: SoftJointLimitSpring
:rtype: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def set_angularXLimitSpring(arg1):
'''
:param arg1: SoftJointLimitSpring
:type arg1: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def get_angularYZLimitSpring():
'''
:returns: SoftJointLimitSpring
:rtype: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def set_angularYZLimitSpring(arg1):
'''
:param arg1: SoftJointLimitSpring
:type arg1: UnityEngine.SoftJointLimitSpring
'''
pass
@staticmethod
def get_linearLimit():
'''
:returns: SoftJointLimit
:rtype: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def set_linearLimit(arg1):
'''
:param arg1: SoftJointLimit
:type arg1: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def get_lowAngularXLimit():
'''
:returns: SoftJointLimit
:rtype: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def set_lowAngularXLimit(arg1):
'''
:param arg1: SoftJointLimit
:type arg1: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def get_highAngularXLimit():
'''
:returns: SoftJointLimit
:rtype: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def set_highAngularXLimit(arg1):
'''
:param arg1: SoftJointLimit
:type arg1: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def get_angularYLimit():
'''
:returns: SoftJointLimit
:rtype: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def set_angularYLimit(arg1):
'''
:param arg1: SoftJointLimit
:type arg1: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def get_angularZLimit():
'''
:returns: SoftJointLimit
:rtype: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def set_angularZLimit(arg1):
'''
:param arg1: SoftJointLimit
:type arg1: UnityEngine.SoftJointLimit
'''
pass
@staticmethod
def get_targetPosition():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_targetPosition(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_targetVelocity():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_targetVelocity(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_xDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_xDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_yDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_yDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_zDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_zDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_targetRotation():
'''
:returns: Quaternion
:rtype: UnityEngine.Quaternion
'''
pass
@staticmethod
def set_targetRotation(arg1):
'''
:param arg1: Quaternion
:type arg1: UnityEngine.Quaternion
'''
pass
@staticmethod
def get_targetAngularVelocity():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_targetAngularVelocity(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_rotationDriveMode():
'''
:returns: RotationDriveMode
:rtype: UnityEngine.RotationDriveMode
'''
pass
@staticmethod
def set_rotationDriveMode(arg1):
'''
:param arg1: RotationDriveMode
:type arg1: UnityEngine.RotationDriveMode
'''
pass
@staticmethod
def get_angularXDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_angularXDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_angularYZDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_angularYZDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_slerpDrive():
'''
:returns: JointDrive
:rtype: UnityEngine.JointDrive
'''
pass
@staticmethod
def set_slerpDrive(arg1):
'''
:param arg1: JointDrive
:type arg1: UnityEngine.JointDrive
'''
pass
@staticmethod
def get_projectionMode():
'''
:returns: JointProjectionMode
:rtype: UnityEngine.JointProjectionMode
'''
pass
@staticmethod
def set_projectionMode(arg1):
'''
:param arg1: JointProjectionMode
:type arg1: UnityEngine.JointProjectionMode
'''
pass
@staticmethod
def get_projectionDistance():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_projectionDistance(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_projectionAngle():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_projectionAngle(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_configuredInWorldSpace():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_configuredInWorldSpace(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_swapBodies():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_swapBodies(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_connectedBody():
'''
:returns: Rigidbody
:rtype: UnityEngine.Rigidbody
'''
pass
@staticmethod
def set_connectedBody(arg1):
'''
:param arg1: Rigidbody
:type arg1: UnityEngine.Rigidbody
'''
pass
@staticmethod
def get_axis():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_axis(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_anchor():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_anchor(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_connectedAnchor():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_connectedAnchor(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_autoConfigureConnectedAnchor():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_autoConfigureConnectedAnchor(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_breakForce():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_breakForce(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_breakTorque():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_breakTorque(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_enableCollision():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enableCollision(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_enablePreprocessing():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enablePreprocessing(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_massScale():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_massScale(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_connectedMassScale():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_connectedMassScale(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_currentForce():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def get_currentTorque():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: | |
<filename>gamemaker.py<gh_stars>0
from Interface.models import HuntUser, Landmark, Penalty, Game
from django.utils import timezone
from datetime import datetime
class GameMaker:
def __init__(self):
pass
def add_landmark(self, input):
# input: List of length 4
if len(input) != 4:
return "Invalid input!"
name = input[0]
clue = input[1]
question = input[2]
answer = input[3]
try:
# Check that no landmarks of the same name are already in the database
Landmark.objects.get(name=name)
except Landmark.DoesNotExist:
# If the landmark doesn't exist, add it to the database
lm = Landmark(name=name, clue=clue, question=question, answer=answer)
lm.save()
return "Landmark " + name + " has been added!"
# Should only get here if a landmark of the same name exists
return "Landmark " + name + " already exists!"
def edit_landmark(self, input):
change = False
# Begin with empty strings for a full concatenation of which fields were altered on return
new_name = ""
clue = ""
question = ""
answer = ""
order = ""
ret_string_order = ""
points = ""
ret_string_points = ""
# We always assume we are at least trying to edit; this will return for every case
ret_string = "Edit to " + input[0] + " "
name = input[0]
lm = Landmark.objects.get(name=name)
# If name field is empty, do nothing
if input[1] != "":
lm.name = input[1]
change = True
new_name = "name "
# If clue field is empty, do nothing
if input[2] != "":
lm.clue = input[2]
change = True
clue = "clue "
# If question field is empty, do nothing
if input[3] != "":
lm.question = input[3]
change = True
question = "question "
# If answer field is empty, do nothing
if input[4] != "":
lm.answer = input[4]
change = True
answer = "answer "
# If order number field is empty, do nothing. Must be an integer!
if input[5] != "":
try:
lm.order_num = int(input[5])
change = True
order = "order "
except ValueError:
ret_string_order = " order number must be an integer!"
# If points field is empty, do nothing. Must be an integer!
if input[6] != "":
try:
lm.points = int(input[6])
change = True
points = "points "
except ValueError:
ret_string_points = " points must be an integer!"
lm.save()
# If no changes were made, the edit was unsuccessful. Return such
if not change:
return ret_string + "unsuccessful" + ret_string_order + ret_string_points
# Otherwise, return what fields were changed as well as any issues with order or points
return ret_string + new_name + clue + question + answer + order + points + "successful"\
+ ret_string_order + ret_string_points
def display_landmarks(self):
landmarks = Landmark.objects.all()
ret = ""
if len(landmarks) == 1:
ret = "There are no landmarks"
else:
for landmark in landmarks:
if landmark.name != "dummy":
ret += landmark.name + "\n"
return ret
def remove_landmark(self, input):
# input: List of length 1
name = input[0]
try:
lm = Landmark.objects.get(name=name)
lm.delete()
found = True
except Landmark.DoesNotExist:
return "Couldn't find landmark with name " + name
return "Removed " + name + " from landmarks."
def display_status(self):
string = ''
teams = HuntUser.objects.all()
game = Game.objects.get(name="game")
game_over = True
for team in teams:
if team.name != "maker":
string += "Team: " + team.name + "\nScore: " + str(team.score) \
+ "\nPenalties: " + str(team.penalties) + "\n"
if game.running:
if team.current_landmark.order_num == 0:
string += "Current landmark: start\n"
elif team.game_ended:
string += "Current landmark: finish\n"
else:
cur = team.current_landmark.order_num - 1
string += "Current landmark: " + Landmark.objects.get(order_num=cur).name + "\n"
string += "\n"
if not team.game_ended:
game_over = False
if game.running:
string += "Game in progress"
elif game_over:
string += "The game has ended"
else:
string += "There is currently no game running"
if string == '':
string = 'No teams!'
return string
def display_menu(self):
return "Options\n\ndisplaystatus\nmaketeam [team name], [team password]\n" \
"editteam [team name to edit], [new team name], [new team password]\n" \
"addlandmark [name], [clue], [question], [answer]\n" \
"editlandmarks [name], [clue], [question], [answer], [order number], [points]\n" \
"displaylandmarks\nremovelandmark [name]\n" \
"setpenaltyscores [time points], [guess points]\n" \
"setpenalties [new time penalty], [new guess penalty]\n" \
"creategame [landmark name]...\nstartgame\nendgame\nlogout\n"
def make_team(self, input):
# input: List of length 2
if len(input) == 2:
name = input[0]
password = input[1]
dummy_landmark = Landmark.objects.get(name="dummy")
try:
# Check that there isn't already a team with that name in the database
HuntUser.objects.get(name=name)
except HuntUser.DoesNotExist:
# If team doesn't exist, add the team and return
team = HuntUser(name=name, password=password, current_landmark=dummy_landmark)
team.save()
return "Added " + name
# Should only reach this return statement if a team with the same name exists
return "Team " + name + " already exists!"
else:
# If input length was not 2, return
return "Invalid input!"
def edit_team(self, input):
# input: List of length 3
found = False
if len(input) == 3:
orig_name = input[0]
new_name = input[1]
new_pass = input[2]
ret_string = "Edited " + orig_name + " to have username " + new_name + " and password " + new_pass
try:
team = HuntUser.objects.get(name=orig_name)
team.name = new_name
team.password = <PASSWORD>
team.save()
found = True
except HuntUser.DoesNotExist:
ret_string = "Could not find that team!"
else:
ret_string = "Invalid input!"
found = True
if not found:
ret_string = "Could not find that team!"
return ret_string
def delete_team(self, input):
found = False
if len(input) == 1:
try:
team = HuntUser.objects.get(name=input[0])
team.delete()
found = True
except Landmark.DoesNotExist:
pass
if found:
return "Removed " + input[0] + " from teams."
else:
return "Couldn't find team with name " + input[0]
else:
ret_string = "Invalid input!"
found = True
if not found:
ret_string = "That team does not exist."
return ret_string
def set_penalty_scores(self, input):
if len(input) != 2:
return "Bad input!"
try:
time_value = int(input[0])
guess_value = int(input[1])
except ValueError:
return "Bad input! Need integers"
if time_value > 0 and guess_value > 0:
game = Game.objects.get(name="game")
game.time_penalty = time_value
game.guess_penalty = guess_value
game.save()
return "Set time penalty to " + input[0] + " and guess penalty to " + input[1]
def set_penalties(self, input):
# input: List of length 2
if len(input) == 2:
try:
time = int(input[0])
guess = int(input[1])
if time > 0 and guess > 0:
game = Game.objects.get(name="game")
game.guess_period = time
game.num_guesses = guess
game.save()
ret_string = "Time penalty is " + input[0] + " minutes and guess penalty is " + input[1] + " guesses"
else:
ret_string = "Invalid input! Need integers greater than 0"
except ValueError:
ret_string = "Invalid input! Need integers"
else:
ret_string = "Bad spacing! Need one space between time penalty and guess penalty"
return ret_string
def create_game(self, input):
# input: List of length > 1
i = 0
# First, check that game is not currently running
if Game.objects.get(name="game").running:
return "Game is already in progress!"
if len(input) == 0:
return "Need at least one landmark to create a game"
for landmark in Landmark.objects.all():
print(landmark)
# Reset all of the landmarks to index -1
landmark.order_num = -1
landmark.save()
# Loop through all of the names in the input and make sure they are all valid
for name in input:
try:
landmark = Landmark.objects.get(name=name)
except Landmark.DoesNotExist:
return "Landmark " + name + " is not a valid landmark!"
# Now that we know they're all valid landmarks, we can add them all to the game
for name in input:
landmark = Landmark.objects.get(name=name)
landmark.order_num = i
landmark.save()
i += 1
return "Game has been created!"
def start_game(self):
game = Game.objects.get(name="game")
if game.running:
return "Game already started!"
try:
# Check that the game was actually created before we can start it
lm = Landmark.objects.get(order_num=0)
except Landmark.DoesNotExist:
# If no landmarks have an order_num of 0, it means the game wasn't created
# Thus, return an error statement to the user
return "No landmarks are part of the game!"
game.running = True
game.time_start = datetime.now(tz=timezone.utc) # Set the official start time of the game
game.save()
teams = | |
# -*- coding: utf-8 -*-
#============================================================
#
# Deep Learning BLW Filtering
# Main
#
# author: <NAME>
# email: <EMAIL>
# github id: fperdigon
#
#===========================================================
import _pickle as pickle
from datetime import datetime
import time
import numpy as np
from utils.metrics import MAD, SSD, PRD, COS_SIM
from utils import visualization as vs
from Data_Preparation import data_preparation as dp
from digitalFilters.dfilters import FIR_test_Dataset, IIR_test_Dataset
from deepFilter.dl_pipeline import train_dl, test_dl
if __name__ == "__main__":
dl_experiments = [
'DRNN',
'FCN-DAE',
'Vanilla L',
'Vanilla NL',
'Multibranch LANL',
'Multibranch LANLD'
]
noise_versions = [1, 2]
for nv in noise_versions:
# Data_Preparation() function assumes that QT database and Noise Stress Test Database are uncompresed
# inside a folder called data
Dataset = dp.Data_Preparation(noise_version=nv)
# Save dataset
with open('data/dataset_nv' + str(nv) + '.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump(Dataset, output)
print('Dataset saved')
# Load dataset
with open('data/dataset_nv' + str(nv) + '.pkl', 'rb') as input:
Dataset = pickle.load(input)
train_time_list = []
test_time_list = []
for experiment in range(len(dl_experiments)):
start_train = datetime.now()
train_dl(Dataset, dl_experiments[experiment])
end_train = datetime.now()
train_time_list.append(end_train - start_train)
start_test = datetime.now()
[X_test, y_test, y_pred] = test_dl(Dataset, dl_experiments[experiment])
end_test = datetime.now()
test_time_list.append(end_test - start_test)
test_results = [X_test, y_test, y_pred]
# Save Results
with open('test_results_' + dl_experiments[experiment] + '_nv' + str(nv) + '.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump(test_results, output)
print('Results from experiment ' + dl_experiments[experiment] + '_nv' + str(nv) + ' saved')
time.sleep(60)
# Classical Filters
# FIR
start_test = datetime.now()
[X_test_f, y_test_f, y_filter] = FIR_test_Dataset(Dataset)
end_test = datetime.now()
train_time_list.append(0)
test_time_list.append(end_test - start_test)
test_results_FIR = [X_test_f, y_test_f, y_filter]
# Save FIR filter results
with open('test_results_FIR_nv' + str(nv) + '.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump(test_results_FIR, output)
print('Results from experiment FIR filter nv ' + str(nv) + ' saved')
# IIR
start_test = datetime.now()
[X_test_f, y_test_f, y_filter] = IIR_test_Dataset(Dataset)
end_test = datetime.now()
train_time_list.append(0)
test_time_list.append(end_test - start_test)
test_results_IIR = [X_test_f, y_test_f, y_filter]
# Save IIR filter results
with open('test_results_IIR_nv' + str(nv) + '.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump(test_results_IIR, output)
print('Results from experiment IIR filter nv ' + str(nv) + ' saved')
# Saving timing list
timing = [train_time_list, test_time_list]
with open('timing_nv' + str(nv) + '.pkl', 'wb') as output: # Overwrites any existing file.
pickle.dump(timing, output)
print('Timing nv ' + str(nv) + ' saved')
####### LOAD EXPERIMENTS #######
# Load timing
with open('timing_nv1.pkl', 'rb') as input:
timing_nv1 = pickle.load(input)
[train_time_list_nv1, test_time_list_nv1] = timing_nv1
with open('timing_nv2.pkl', 'rb') as input:
timing_nv2 = pickle.load(input)
[train_time_list_nv2, test_time_list_nv2] = timing_nv2
train_time_list = []
test_time_list = []
for i in range(len(train_time_list_nv1)):
train_time_list.append(train_time_list_nv1[i] + train_time_list_nv2[i])
for i in range(len(test_time_list_nv1)):
test_time_list.append(test_time_list_nv1[i] + test_time_list_nv2[i])
timing = [train_time_list, test_time_list]
# Load Results DRNN
with open('test_results_' + dl_experiments[0] + '_nv1.pkl', 'rb') as input:
test_DRNN_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[0] + '_nv2.pkl', 'rb') as input:
test_DRNN_nv2 = pickle.load(input)
test_DRNN = [np.concatenate((test_DRNN_nv1[0], test_DRNN_nv2[0])),
np.concatenate((test_DRNN_nv1[1], test_DRNN_nv2[1])),
np.concatenate((test_DRNN_nv1[2], test_DRNN_nv2[2]))]
# Load Results FCN_DAE
with open('test_results_' + dl_experiments[1] + '_nv1.pkl', 'rb') as input:
test_FCN_DAE_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[1] + '_nv2.pkl', 'rb') as input:
test_FCN_DAE_nv2 = pickle.load(input)
test_FCN_DAE = [np.concatenate((test_FCN_DAE_nv1[0], test_FCN_DAE_nv2[0])),
np.concatenate((test_FCN_DAE_nv1[1], test_FCN_DAE_nv2[1])),
np.concatenate((test_FCN_DAE_nv1[2], test_FCN_DAE_nv2[2]))]
# Load Results Vanilla L
with open('test_results_' + dl_experiments[2] + '_nv1.pkl', 'rb') as input:
test_Vanilla_L_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[2] + '_nv2.pkl', 'rb') as input:
test_Vanilla_L_nv2 = pickle.load(input)
test_Vanilla_L = [np.concatenate((test_Vanilla_L_nv1[0], test_Vanilla_L_nv2[0])),
np.concatenate((test_Vanilla_L_nv1[1], test_Vanilla_L_nv2[1])),
np.concatenate((test_Vanilla_L_nv1[2], test_Vanilla_L_nv2[2]))]
# Load Results Exp Vanilla NL
with open('test_results_' + dl_experiments[3] + '_nv1.pkl', 'rb') as input:
test_Vanilla_NL_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[3] + '_nv2.pkl', 'rb') as input:
test_Vanilla_NL_nv2 = pickle.load(input)
test_Vanilla_NL = [np.concatenate((test_Vanilla_NL_nv1[0], test_Vanilla_NL_nv2[0])),
np.concatenate((test_Vanilla_NL_nv1[1], test_Vanilla_NL_nv2[1])),
np.concatenate((test_Vanilla_NL_nv1[2], test_Vanilla_NL_nv2[2]))]
# Load Results Multibranch LANL
with open('test_results_' + dl_experiments[4] + '_nv1.pkl', 'rb') as input:
test_Multibranch_LANL_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[4] + '_nv2.pkl', 'rb') as input:
test_Multibranch_LANL_nv2 = pickle.load(input)
test_Multibranch_LANL = [np.concatenate((test_Multibranch_LANL_nv1[0], test_Multibranch_LANL_nv2[0])),
np.concatenate((test_Multibranch_LANL_nv1[1], test_Multibranch_LANL_nv2[1])),
np.concatenate((test_Multibranch_LANL_nv1[2], test_Multibranch_LANL_nv2[2]))]
# Load Results Multibranch LANLD
with open('test_results_' + dl_experiments[5] + '_nv1.pkl', 'rb') as input:
test_Multibranch_LANLD_nv1 = pickle.load(input)
with open('test_results_' + dl_experiments[5] + '_nv2.pkl', 'rb') as input:
test_Multibranch_LANLD_nv2 = pickle.load(input)
test_Multibranch_LANLD = [np.concatenate((test_Multibranch_LANLD_nv1[0], test_Multibranch_LANLD_nv2[0])),
np.concatenate((test_Multibranch_LANLD_nv1[1], test_Multibranch_LANLD_nv2[1])),
np.concatenate((test_Multibranch_LANLD_nv1[2], test_Multibranch_LANLD_nv2[2]))]
# Load Result FIR Filter
with open('test_results_FIR_nv1.pkl', 'rb') as input:
test_FIR_nv1 = pickle.load(input)
with open('test_results_FIR_nv2.pkl', 'rb') as input:
test_FIR_nv2 = pickle.load(input)
test_FIR = [np.concatenate((test_FIR_nv1[0], test_FIR_nv2[0])),
np.concatenate((test_FIR_nv1[1], test_FIR_nv2[1])),
np.concatenate((test_FIR_nv1[2], test_FIR_nv2[2]))]
# Load Result IIR Filter
with open('test_results_IIR_nv1.pkl', 'rb') as input:
test_IIR_nv1 = pickle.load(input)
with open('test_results_IIR_nv2.pkl', 'rb') as input:
test_IIR_nv2 = pickle.load(input)
test_IIR = [np.concatenate((test_IIR_nv1[0], test_IIR_nv2[0])),
np.concatenate((test_IIR_nv1[1], test_IIR_nv2[1])),
np.concatenate((test_IIR_nv1[2], test_IIR_nv2[2]))]
####### Calculate Metrics #######
print('Calculating metrics ...')
# DL Metrics
# Exp FCN-DAE
[X_test, y_test, y_pred] = test_DRNN
SSD_values_DL_DRNN = SSD(y_test, y_pred)
MAD_values_DL_DRNN = MAD(y_test, y_pred)
PRD_values_DL_DRNN = PRD(y_test, y_pred)
COS_SIM_values_DL_DRNN = COS_SIM(y_test, y_pred)
# Exp FCN-DAE
[X_test, y_test, y_pred] = test_FCN_DAE
SSD_values_DL_FCN_DAE = SSD(y_test, y_pred)
MAD_values_DL_FCN_DAE = MAD(y_test, y_pred)
PRD_values_DL_FCN_DAE = PRD(y_test, y_pred)
COS_SIM_values_DL_FCN_DAE = COS_SIM(y_test, y_pred)
# Vanilla L
[X_test, y_test, y_pred] = test_Vanilla_L
SSD_values_DL_exp_1 = SSD(y_test, y_pred)
MAD_values_DL_exp_1 = MAD(y_test, y_pred)
PRD_values_DL_exp_1 = PRD(y_test, y_pred)
COS_SIM_values_DL_exp_1 = COS_SIM(y_test, y_pred)
# Vanilla_NL
[X_test, y_test, y_pred] = test_Vanilla_NL
SSD_values_DL_exp_2 = SSD(y_test, y_pred)
MAD_values_DL_exp_2 = MAD(y_test, y_pred)
PRD_values_DL_exp_2 = PRD(y_test, y_pred)
COS_SIM_values_DL_exp_2 = COS_SIM(y_test, y_pred)
# Multibranch_LANL
[X_test, y_test, y_pred] = test_Multibranch_LANL
SSD_values_DL_exp_3 = SSD(y_test, y_pred)
MAD_values_DL_exp_3 = MAD(y_test, y_pred)
PRD_values_DL_exp_3 = PRD(y_test, y_pred)
COS_SIM_values_DL_exp_3 = COS_SIM(y_test, y_pred)
# Multibranch_LANLD
[X_test, y_test, y_pred] = test_Multibranch_LANLD
SSD_values_DL_exp_4 = SSD(y_test, y_pred)
MAD_values_DL_exp_4 = MAD(y_test, y_pred)
PRD_values_DL_exp_4 = PRD(y_test, y_pred)
COS_SIM_values_DL_exp_4 = COS_SIM(y_test, y_pred)
# Digital Filtering
# FIR Filtering Metrics
[X_test, y_test, y_filter] = test_FIR
SSD_values_FIR = SSD(y_test, y_filter)
MAD_values_FIR = MAD(y_test, y_filter)
PRD_values_FIR = PRD(y_test, y_filter)
COS_SIM_values_FIR = COS_SIM(y_test, y_filter)
# IIR Filtering Metrics (Best)
[X_test, y_test, y_filter] = test_IIR
SSD_values_IIR = SSD(y_test, y_filter)
MAD_values_IIR = MAD(y_test, y_filter)
PRD_values_IIR = PRD(y_test, y_filter)
COS_SIM_values_IIR = COS_SIM(y_test, y_filter)
####### Results Visualization #######
SSD_all = [SSD_values_FIR,
SSD_values_IIR,
SSD_values_DL_FCN_DAE,
SSD_values_DL_DRNN,
SSD_values_DL_exp_1,
SSD_values_DL_exp_2,
SSD_values_DL_exp_3,
SSD_values_DL_exp_4,
]
MAD_all = [MAD_values_FIR,
MAD_values_IIR,
MAD_values_DL_FCN_DAE,
MAD_values_DL_DRNN,
MAD_values_DL_exp_1,
MAD_values_DL_exp_2,
MAD_values_DL_exp_3,
MAD_values_DL_exp_4,
]
PRD_all = [PRD_values_FIR,
PRD_values_IIR,
PRD_values_DL_FCN_DAE,
PRD_values_DL_DRNN,
PRD_values_DL_exp_1,
PRD_values_DL_exp_2,
PRD_values_DL_exp_3,
PRD_values_DL_exp_4,
]
COS_SIM_all = [COS_SIM_values_FIR,
COS_SIM_values_IIR,
COS_SIM_values_DL_FCN_DAE,
COS_SIM_values_DL_DRNN,
COS_SIM_values_DL_exp_1,
COS_SIM_values_DL_exp_2,
COS_SIM_values_DL_exp_3,
COS_SIM_values_DL_exp_4,
]
Exp_names = ['FIR Filter', 'IIR Filter'] + dl_experiments
metrics = ['SSD', 'MAD', 'PRD', 'COS_SIM']
metric_values = [SSD_all, MAD_all, PRD_all, COS_SIM_all]
# Metrics table
vs.generate_table(metrics, metric_values, Exp_names)
# Timing table
timing_var = ['training', 'test']
vs.generate_table_time(timing_var, timing, Exp_names, gpu=True)
################################################################################################################
# Segmentation by noise amplitude
rnd_test = np.load('rnd_test.npy')
rnd_test = np.concatenate([rnd_test, rnd_test])
segm = [0.2, 0.6, 1.0, 1.5, 2.0] # real number of segmentations is len(segmentations) - 1
SSD_seg_all = []
MAD_seg_all = []
PRD_seg_all = []
COS_SIM_seg_all = []
for idx_exp in range(len(Exp_names)):
SSD_seg = [None] * (len(segm) - 1)
MAD_seg = [None] * (len(segm) - 1)
PRD_seg = [None] * (len(segm) - 1)
COS_SIM_seg = [None] * (len(segm) - 1)
for idx_seg in range(len(segm) - 1):
SSD_seg[idx_seg] = []
MAD_seg[idx_seg] = []
PRD_seg[idx_seg] = []
COS_SIM_seg[idx_seg] = []
for idx in range(len(rnd_test)):
# Object under analysis (oua)
# SSD
oua = SSD_all[idx_exp][idx]
if rnd_test[idx] > segm[idx_seg] and rnd_test[idx] < segm[idx_seg + 1]:
SSD_seg[idx_seg].append(oua)
# MAD
oua = MAD_all[idx_exp][idx]
if rnd_test[idx] > segm[idx_seg] and rnd_test[idx] < segm[idx_seg + 1]:
MAD_seg[idx_seg].append(oua)
# PRD
oua = PRD_all[idx_exp][idx]
if rnd_test[idx] > segm[idx_seg] and rnd_test[idx] < segm[idx_seg + 1]:
PRD_seg[idx_seg].append(oua)
# COS SIM
oua = COS_SIM_all[idx_exp][idx]
if rnd_test[idx] > segm[idx_seg] and rnd_test[idx] < segm[idx_seg + 1]:
COS_SIM_seg[idx_seg].append(oua)
# Processing the last index
# SSD
SSD_seg[-1] = []
for idx in range(len(rnd_test)):
# Object under analysis
oua = SSD_all[idx_exp][idx]
if rnd_test[idx] > segm[-2]:
SSD_seg[-1].append(oua)
SSD_seg_all.append(SSD_seg) # [exp][seg][item]
# MAD
MAD_seg[-1] = []
for idx in range(len(rnd_test)):
# Object under analysis
oua = MAD_all[idx_exp][idx]
if rnd_test[idx] > segm[-2]:
MAD_seg[-1].append(oua)
MAD_seg_all.append(MAD_seg) # [exp][seg][item]
# PRD
PRD_seg[-1] = []
for idx in range(len(rnd_test)):
# Object under analysis
oua = PRD_all[idx_exp][idx]
if rnd_test[idx] > segm[-2]:
PRD_seg[-1].append(oua)
PRD_seg_all.append(PRD_seg) # [exp][seg][item]
# COS SIM
COS_SIM_seg[-1] = []
for idx in range(len(rnd_test)):
# Object under analysis
oua = COS_SIM_all[idx_exp][idx]
if rnd_test[idx] > segm[-2]:
COS_SIM_seg[-1].append(oua)
COS_SIM_seg_all.append(COS_SIM_seg) # [exp][seg][item]
# Printing Tables
seg_table_column_name = []
for idx_seg in range(len(segm) - 1):
column_name = str(segm[idx_seg]) + ' < noise < ' + str(segm[idx_seg + 1])
seg_table_column_name.append(column_name)
# SSD Table
SSD_seg_all | |
# Standard library imports
import os
import warnings
# Third party imports
import numpy as np
import astropy.units as u
from astropy.io import fits
from astropy import constants
from galpy.orbit import Orbit
from galpy.util.bovy_conversion import time_in_Gyr
from scipy.integrate import solve_ivp
from scipy.interpolate import InterpolatedUnivariateSpline
# Local imports
from .tools import period
from .tidal_tensor import TidalTensor
from .vector_conversion import elements_to_vectors, vectors_to_elements
# Physical constants
_G = constants.G.to(u.pc**3/u.solMass/u.yr**2).value
_c = constants.c.to(u.pc/u.yr).value
# Factors for conversion from galpy internal units
_pc = 8000
_yr = time_in_Gyr(220, 8) * 1e+9
class KeplerRing:
"""
A class used to evolve a Keplerian ring using vectorial formalism.
"""
def __init__(self, ecc, inc, long_asc, arg_peri, r, v, m=1, a=1):
"""Initialize a Keplerian ring.
Parameters
----------
ecc : float
Eccentricity. Must be between 0 and 1.
inc : float
Inclination relative to the x-y plane in radians.
long_asc : float
Longitude of the ascending node in radians.
arg_peri : float
Argument of pericentre in radians.
r : array_like
Initial position of the barycentre in Galactocentric cylindrical
coordinates, of the form [R, z, phi] in [pc, pc, rad].
v : array_like
Initial velocity of the barycentre in Galactocentric cylindrical
coordinates, of the form [v_R, v_z, v_phi] in km/s.
a : float, optional
Semi-major axis of the ring in AU.
m : float, optional
Total mass of the ring in solar masses.
"""
# Initial conditions
self.set_elements(ecc, inc, long_asc, arg_peri, m=m, a=a)
self._r0 = np.array(r)
self._v0 = np.array(v)
# Result arrays
self._t = None # Time array
self._e = None # e vector array
self._j = None # j vector array
self._r = None # Position vector array
self._v = None # Velocity vector array
# List of splines to interpolate the integrated parameters
self._interpolatedInner = None
self._interpolatedOuter = None
# Check that e0 and j0 are valid
if self._e0.shape != (3,) or self._j0.shape != (3,):
raise ValueError("Orbital elements must be scalars, not arrays")
@classmethod
def from_file(cls, filename):
"""Initialize a KeplerRing from a file.
Parameters
----------
filename : str
The .fits file from which to load. The format must be the same as
the file generated by KeplerRing.save().
Returns
-------
instance : KeplerRing
A KeplerRing instance containing the data from the file.
"""
instance = cls(0.1, 0, 0, 0, [0, 0, 0], [0, 0, 0])
instance.restore(filename)
return instance
def set_elements(self, ecc, inc, long_asc, arg_peri, a=None, m=None):
"""Set the orbital elements of this KeplerRing.
Parameters
----------
ecc : float
Eccentricity. Must be between 0 and 1.
inc : float
Inclination relative to the x-y plane in radians.
long_asc : float
Longitude of the ascending node in radians.
arg_peri : float
Argument of pericentre in radians.
a : float, optional
Semi-major axis of the ring in AU.
m : float, optional
Total mass of the ring in solar masses.
Returns
-------
None
"""
if a is not None:
self._a = (a*u.au).to(u.pc).value
if m is not None:
self._m = m
self._e0, self._j0 = elements_to_vectors(ecc, inc, long_asc, arg_peri)
# Constant factor for integration
self._tau = self._a ** 1.5 / 2 / _G ** 0.5 / self._m ** 0.5
# Reset the result arrays
self._t = None
self._e = None
self._j = None
def integrate(self, t, pot=None, func=None, r_pot=None, rtol=1e-9,
atol=1e-12, r_method='dop853_c', ej_method='LSODA',
relativity=False, checkpoint_file=None, checkpoint_size=None):
"""Integrate the orbit of this KeplerRing.
Parameters
----------
t : array_like
Array of times at which to output, in years. Must be 1D and sorted.
pot : galpy.potential.Potential or list of Potentials, optional
A potential used to integrate the orbit. This potential's tidal
tensor will be used to evolve the e and j vectors. If not provided,
you must provide both a func and r_pot parameter to integrate the
e/j vectors and barycentre, respectively.
func : callable, optional
An additional term to add to the derivatives of the e and j vectors.
The calling signature is func(t, e, j, r) where t is the time step,
e and j are the eccentricity and dimensionless angular momentum
vectors, and r is the position vector of the barycentre in Cartesian
coordinates. The return value must be a tuple (de, dj), where de and
dj are arrays of shape (3,) representing the derivatives of e and j.
r_pot : galpy.potential.Potential or list of Potentials, optional
An additional potential used to integrate the barycentre position,
but not to evolve the e and j vectors. This potential will be summed
with pot to integrate the r vector.
rtol, atol : float or array_like, optional
Relative and absolute error tolerances for the solver. Here, rtol
controls the number of correct digits, while atol controls the
threshold below which the precision of a component of e or j is no
longer guaranteed. For more details, see the documentation of the
scipy.integrate.solve_ivp function.
r_method : str, optional
Method used to integrate the barycentre position. See the
documentation for galpy.orbit.Orbit.integrate for available options.
ej_method : str, optional
Integration method for evolving the e and j vectors. See the
documentation for scipy.integrate.solve_ivp for available options.
relativity : boolean, optional
If True, will include the relativistic precession of the e vector.
checkpoint_file : str, optional
The path to a checkpoint file. If set, this KeplerRing will attempt
to restore data from this file before beginning the integration.
Additionally, if checkpoint_size is set, then the results of the
integration will be periodically saved to this file. The format of
this file must be the same as that generated by KeplerRing.save().
checkpoint_size : int, optional
The number of time steps to save at each checkpoint. If set, you
must also provide checkpoint_file.
Returns
-------
None
"""
# Whether the integration should start from scratch, or resume
resume = False
# Attempt to restore from a checkpoint file
if checkpoint_file is not None:
if checkpoint_file.lower()[-5:] != ".fits":
checkpoint_file = checkpoint_file + ".fits"
if os.path.exists(checkpoint_file):
self.restore(checkpoint_file)
resume = True
# Check that the provided time array matches the checkpoint file
slc = len(self._t)
if not np.allclose(self._t, t[:slc], rtol=1e-12, atol=0):
raise KeplerRingError("t array does not match checkpoint")
# Update the time array to continue from the checkpoint
t = t[slc-1:]
if len(t) <= 1:
return
# Break up the time array according to checkpoint_size
if checkpoint_size is not None:
if not isinstance(checkpoint_size, int) or checkpoint_size < 1:
raise KeplerRingError("checkpoint_size must be a positive int")
if checkpoint_file is None:
raise KeplerRingError("checkpoint_file must be provided if "
"checkpoint_size is provided")
ts = []
for i in range(0, len(t), checkpoint_size):
ts.append(t[i:i+checkpoint_size+1])
else:
ts = [t]
# Integrate piece-by-piece
for i in range(len(ts)):
self._integrate(ts[i], pot=pot, func=func, r_pot=r_pot, rtol=rtol,
atol=atol, r_method=r_method, ej_method=ej_method,
relativity=relativity, resume=resume)
if checkpoint_file is not None:
self.save(checkpoint_file)
resume = True
def e(self, t=None):
"""Return the e vector at a specified time.
Parameters
----------
t : array_like, optional
A time or array of times at which to retrieve e.
Returns
-------
e : ndarray
e vector at the specified time steps.
"""
return self._params(t=t)[0]
def j(self, t=None):
"""Return the j vector at a specified time.
Parameters
----------
t : array_like, optional
A time or array of times at which to retrieve j.
Returns
-------
j : ndarray
j vector at the specified time steps.
"""
return self._params(t=t)[1]
def r(self, t=None):
"""Return the position vector at a specified time.
Parameters
----------
t : array_like, optional
A time or array of times at which to retrieve r.
Returns
-------
r : ndarray
Position vector at the specified time steps. Has the form
[R, z, phi] in [pc, pc, rad].
"""
return self._params(t=t)[2]
def v(self, t=None):
"""Return the velocity vector at a specified time.
Parameters
----------
t : array_like, optional
A time or array of times at which to retrieve v.
Returns
-------
v : ndarray
Velocity vector at the specified time steps. Has the form
[v_R, v_z, v_phi] in km/s.
"""
return self._params(t=t)[3]
def ecc(self, t=None):
"""Return the eccentricity at a specified time.
Parameters
----------
t : array_like, optional
A time or array of times at which to retrieve the eccentricity.
Returns
-------
| |
meta data
self._backup_remote_meta_info()
# Backup `virsh dumpxml` output
self._backup_remote_xml()
# Backup logical volume snapshot to disk image file using `dd`
self._backup_remote_lv()
def _backup_remote_directory(self):
'''
Verify the remote directory exists. If not, attempt to create it.
'''
command = self._remote_ssh_command(['mkdir', '-p', '{0}'.format(self.args.source)])
self._output('Verifying the remote directory over ssh, creating it if needed: {0}'.format(' '.join(command)), 2)
self._execute(command)
def _backup_remote_meta_info(self):
'''
Send the VM meta file over `scp`
'''
# Save locally then transfer via SCP. Not ideal but popen() has trouble
# with directional pipes without resorting to shell=True. Since we have
# user input we don't want to use shell=True.
local_meta_file = '{0}-{1}.temp.meta.txt'.format(self.args.name, self.now)
self._output('Writing temporary meta info file locally at "{0}" before transfering via SCP remotely.'.format(local_meta_file), 2)
meta_dict = self._create_vm_meta(self.args.name)
meta_json = self._return_json(meta_dict)
self._write_file(local_meta_file, meta_json)
# Display action/meta information
self._output('Backup VM "{0}" to "{1}"'.format(self.args.name, self.args.source))
self._pprint_meta(meta_dict)
self._output('Now executing SCP file transfer of local meta info file.', 2)
target = '{0}meta.txt'.format(self.args.source)
command = self._remote_target_scp_command(local_meta_file, target)
self._execute(command)
self._output('Unlinking the local temporary meta info file at "{0}".'.format(local_meta_file), 2)
self._unlink_file(local_meta_file)
def _backup_remote_xml(self):
'''
Send the VM XML file over `scp`
'''
# Save locally then transfer via SCP. Not ideal but popen() has trouble
# with directional pipes without resorting to shell=True. Since we have
# user input we don't want to use shell=True.
target_xml_file = os.path.realpath('{0}-{1}.temp.xml'.format(self.args.name, self.now))
self._output('Writing temporary VM XML file locally at "{0}" before transfering via SCP remotely.'.format(target_xml_file), 2)
self._write_file(target_xml_file, self.vm_info(self.args.name, 'xml'))
self._output('Now executing SCP file transfer of local VM XML file', 2)
target = '{0}{1}.xml'.format(self.args.source, self.args.name)
command = self._remote_target_scp_command(target_xml_file, target)
self._execute(command)
self._output('Unlinking the local temporary VM XML file at "{0}".'.format(target_xml_file), 2)
self._unlink_file(target_xml_file)
@execute_safely
def _backup_remote_lv(self):
'''
Convert the LV snapshot to a disk image in a remote location using `ssh`.
If specified, use compression on local side first.
'''
# Set variables
vm = self.args.name
vm_path = self.vm_info(vm, 'disk')
if self.args.compression != 'none':
zip_extension = '.' + self.args.compression
zip_command = [str(self.args.compression), '-c']
else:
zip_extension = ''
zip_command = None
of = '{0}{1}.img{2}'.format(self.args.source, vm, zip_extension)
# Create commands
command_queue = []
# Add dd command
command_queue.append(['dd', 'bs={0}'.format(self.args.block_size), 'if={0}.snapshot'.format(vm_path)])
# Add zip command
if zip_command:
command_queue.append(zip_command)
# Add remote ssh command
ssh_command = self._remote_ssh_command(['dd', 'bs={0}'.format(self.args.block_size), 'of={0}'.format(of)])
command_queue.append(ssh_command)
# Execute commands
self._output('Backing Up VM disk image. This will take time.', show_timestamp=True)
self._output('Starting dd remote backup', 2)
self._execute_queue(command_queue)
self._output('Successfully completed dd remote backup', 2)
# --------------------------------------------------------------------------
# Action function - Backup Local
# --------------------------------------------------------------------------
def _backup_local(self):
'''
Backup VM meta info, XML and LV snapshot to local directory
'''
self._output('Executing local backup action', 2)
# Setup local vm storage directory
self._verify_local_vm_storage()
# Backup our internal meta data
self._backup_local_meta_info()
# Backup `virsh dumpxml` output
self._backup_local_xml()
# Backup logical volume snapshot to disk image file using `dd`
self._backup_local_lv()
def _verify_local_vm_storage(self):
'''
Verify the path exists. If it does not, stepwise check each directory
and attempt to create the full path. Save the parsed path in the arg
array.
'''
# Set variables
path = self.args.source
# Create directory step-wise if does not already exist
if not os.path.isdir(path):
try:
os.makedirs(path)
except IOError, e:
self._raise(e, 'Could not create storage directory in "{0}"'.format(path))
except OSError:
pass
self._output('Verified storage directory in "{0}"'.format(path), 2)
def _backup_local_meta_info(self):
'''
Backup VM metadata info file
'''
# Backup meta to file
meta_file = '{0}/meta.txt'.format(self.args.source)
meta_dict = self._create_vm_meta(self.args.name)
meta_json = self._return_json(meta_dict)
self._write_file(meta_file, meta_json)
# Display action/meta information
self._output('Backup VM "{0}" to "{1}"'.format(self.args.name, self.args.source))
self._pprint_meta(meta_dict)
def _backup_local_xml(self):
'''
Backup VM XML to file
'''
xml_file = '{0}/{1}.xml'.format(self.args.source, self.args.name)
self._write_file(xml_file, self.vm_info(self.args.name, 'xml'))
@execute_safely
def _backup_local_lv(self):
'''
Backup VM logical volume snapshot to a disk image, using compression
if specified.
'''
# Set variables
vm = self.args.name
vm_path = self.vm_info(vm, 'disk')
if self.args.compression != 'none':
zip_extension = '.' + self.args.compression
zip_command = [str(self.args.compression), '-c']
else:
zip_extension = ''
zip_command = None
of = '{0}{1}.img{2}'.format(self.args.source, vm, zip_extension)
# Create commands
command_queue = []
command_queue.append(['dd', 'bs={0}'.format(self.args.block_size), 'if={0}.snapshot'.format(vm_path)])
if zip_command:
command_queue.append(zip_command)
command_queue.append(['dd', 'bs={0}'.format(self.args.block_size), 'of={0}'.format(of)])
# Execute commands
self._output('Backing Up VM disk image. This will take time.', show_timestamp=True)
self._output('Starting dd local backup', 2)
self._execute_queue(command_queue)
self._output('Successfully completed dd local backup', 2)
# --------------------------------------------------------------------------
# Action function - Import
# --------------------------------------------------------------------------
def import_vm(self):
'''
Import a new VM from a VM backup image. Default action is to load from a
backup directory containing a meta file, XML file and VM disk image. If
the remote argument is set, will pipe the meta file, XML file and VM disk
image from a remote location using SSH.
'''
self._output('Starting Import action.', 2)
# Determine whether this is a live backup or backup from storage.
if self.args.remote:
data = self._import_remote()
success_message = 'Success: imported VM from a remote VM backup directory "{0}".'.format('{0}:{1}'.format(self.args.remote, self.args.source))
else:
data = self._import_local()
success_message = 'Success: imported VM from a local VM backup directory "{0}".'.format(self.args.source)
# virsh define target_xml file
self._vm_define(data['target_xml_file'])
# Boot VM if argument has been passed
if self.args.start:
self._vm_start(data['target_name'])
# Add new VM to autostart list if the autostart argument has been passed
if self.args.autostart:
self._vm_autostart(data['target_name'])
# Remove temporary target_xml_file
self._unlink_file(data['target_xml_file'])
# Print success message and warning to change hostname
self._output(success_message)
self._output('\n**Note: Guest OS may need additional configuration.')
self._output('Changing hostname can be done by by updating values ' + \
'/etc/hostname and /etc/hosts and using `hostname` ' + \
'command.')
# --------------------------------------------------------------------------
# Action function - Import Remote
# --------------------------------------------------------------------------
def _import_remote(self):
'''
Import a VM from a remote backup image over ssh.
'''
self._output('Executing remote import action', 2)
# Set variables
return_data = {}
remote_address = self.args.remote
remote_dir = self.args.source
remote_path = '{0}:{1}'.format(remote_address, remote_dir)
# Confirm meta.txt file exists
self._output('Confirming meta.txt file exists in remote directory: "{0}"'.format(remote_path), 2)
command = self._remote_ssh_command(['test', '-f', '{0}/meta.txt'.format(remote_dir)])
if not self._execute(command, boolean=True):
self._raise('The required meta.txt file does not exist in remote directory: "{0}"'.format(remote_path))
# Return and parse remote meta.txt
self._output('Retrieving meta.txt file data from remote directory: "{0}"'.format(remote_path), 2)
command = self._remote_ssh_command(['cat', '{0}/meta.txt'.format(remote_dir)])
source_meta_data = self._execute(command)
source_meta = self._load_vm_meta(source_meta_data)
target_meta = self._load_target_meta(source_meta.copy(), action='import')
self._verify_target_meta(target_meta)
# Display action/meta information
self._output('Importing a VM from remote backup "{0}" to a new VM named "{1}"'.format(remote_path, target_meta['name']))
self._pprint_meta(source_meta, target_meta)
# Confirm XML file exists
self._output('Confirming XML file exists in remote directory: "{0}"'.format(remote_path), 2)
command = self._remote_ssh_command(['test', '-f', '{0}/{1}'.format(remote_dir, source_meta['xml'])])
if not self._execute(command, boolean=True):
self._raise('The required XML file does not exist in remote directory: "{0}/{1}"'.format(remote_path, source_meta['xml']))
# Confirm image file exists
self._output('Confirming VM image file exists in remote directory: "{0}"'.format(remote_path), 2)
command = self._remote_ssh_command(['test', '-f', '{0}/{1}'.format(remote_dir, source_meta['image'])])
if not self._execute(command, boolean=True):
self._raise('The required VM image file does not exist in remote directory: "{0}/{1}"'.format(remote_path, source_meta['image']))
# Transfer remote XML to local file
self._output('Loading remote XML and creating a temporary modified copy: "{0}/{1}"'.format(remote_path, source_meta['xml']), 2)
command = self._remote_ssh_command(['cat', '{0}/{1}'.format(remote_dir, source_meta['xml'])])
source_xml = self._execute(command)
target_xml = self._load_target_xml(source_xml, source_meta, target_meta, action='import')
target_xml_file = os.path.realpath('{0}-{1}.temp.xml'.format(target_meta['name'], self.now))
self._write_file(target_xml_file, target_xml)
# Resolve conflicts with existing VMs on the host machine
potential_conflicts = [
('name', target_meta['name']),
('disk', target_meta['disk']),
('uuid', target_meta['uuid']),
('mac', target_meta['mac'])
]
self._vm_resolve_conflicts(potential_conflicts)
# Create logical volume
self._lv_create(target_meta['logical_volume_size'], target_meta['logical_volume'], target_meta['volume_group'])
# Transfer LV image with dd over ssh
command_queue = []
ssh_command = self._remote_ssh_command(['dd', 'bs={0}'.format(self.args.block_size), 'if={0}/{1}'.format(remote_dir, source_meta['image'])])
command_queue.append(ssh_command)
if source_meta['compression'] != 'none':
zip_command = [str(source_meta['compression']), '-d']
command_queue.append(zip_command)
target_lv = '/dev/{0}/{1}'.format(target_meta['volume_group'], target_meta['logical_volume'])
command_queue.append(['dd', 'bs={0}'.format(self.args.block_size), 'of={0}'.format(target_lv)])
# Execute commands
self._output('Importing VM disk image. This will take time.', show_timestamp=True)
self._output('Starting remote VM image import.', 2)
self._execute_queue(command_queue)
self._output('Successfully completed remote VM image import.', 2)
# Set return data dictionary and return data
return_data['target_xml_file'] = target_xml_file
return_data['target_name'] = target_meta['name']
return return_data
# --------------------------------------------------------------------------
# Action function - Import Local
# --------------------------------------------------------------------------
def _import_local(self):
'''
Import a VM from a local backup image.
'''
self._output('Executing local import action', 2)
# Create variables
return_data = {}
# Verify source directory exists
source_directory = os.path.abspath(self.args.source)
source_directory = source_directory.rstrip('/') + '/'
if not os.path.exists(source_directory):
self._raise('Could not find source directory: "{0}".'.format(source_directory))
# Load and verify meta data
self._output('Loading and verifying VM meta data', 2)
source_meta_file = source_directory + 'meta.txt'
source_meta = self._load_vm_meta_from_file(source_meta_file)
target_meta = self._load_target_meta(source_meta.copy(), action='import')
self._verify_target_meta(target_meta)
# | |
a standard bit of pyparsing, and an Or.
print("\nUse ignore expression (2)")
comment = ";;" + restOfLine
teststring = """
(let ((greeting "Hello, )world!")) ;;(foo bar
(display greeting))
"""
expected = [
[
"let",
[["greeting", '"Hello, )world!"']],
";;",
"(foo bar",
["display", "greeting"],
]
]
expr = nestedExpr(ignoreExpr=(comment ^ quotedString))
self.assertParseAndCheckList(
expr,
teststring,
expected,
'Lisp-ish comments (";; <...> $") and quoted strings didn\'t work. Expected: {}, got: {}'.format(
expected, result
),
verbose=True,
)
def testNestedExpressions2(self):
"""test nestedExpr with conditions that explore other paths
identical opener and closer
opener and/or closer of type other than string or iterable
multi-character opener and/or closer
single character opener and closer with ignoreExpr=None
multi-character opener and/or closer with ignoreExpr=None
"""
name = pp.Word(pp.alphanums + "_")
# identical opener and closer
with self.assertRaises(
ValueError, msg="matching opener and closer should raise error"
):
expr = name + pp.nestedExpr(opener="{", closer="{")
# opener and/or closer of type other than string or iterable
with self.assertRaises(
ValueError, msg="opener and closer as ints should raise error"
):
expr = name + pp.nestedExpr(opener=12, closer=18)
# multi-character opener and/or closer
tstMulti = "aName {{ outer {{ 'inner with opener {{ and closer }} in quoted string' }} }}"
expr = name + pp.nestedExpr(opener="{{", closer="}}")
result = expr.parseString(tstMulti)
expected = [
"aName",
["outer", ["'inner with opener {{ and closer }} in quoted string'"]],
]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with multi-character opener and closer"
)
# single character opener and closer with ignoreExpr=None
tst = (
"aName { outer { 'inner with opener { and closer } in quoted string' }} }}"
)
expr = name + pp.nestedExpr(opener="{", closer="}", ignoreExpr=None)
singleCharResult = expr.parseString(tst)
print(singleCharResult.dump())
# multi-character opener and/or closer with ignoreExpr=None
expr = name + pp.nestedExpr(opener="{{", closer="}}", ignoreExpr=None)
multiCharResult = expr.parseString(tstMulti)
print(multiCharResult.dump())
self.assertParseResultsEquals(
singleCharResult,
multiCharResult.asList(),
msg="using different openers and closers shouldn't affect resulting ParseResults",
)
def testWordExclude(self):
allButPunc = pp.Word(pp.printables, excludeChars=".,:;-_!?")
test = "Hello, Mr. Ed, it's Wilbur!"
result = allButPunc.searchString(test).asList()
print(result)
self.assertEqual(
[["Hello"], ["Mr"], ["Ed"], ["it's"], ["Wilbur"]],
result,
"failed WordExcludeTest",
)
def testWordMinOfZero(self):
"""test a Word with min=0"""
with self.assertRaises(ValueError, msg="expected min 0 to error"):
expr = pp.Word(pp.nums, min=0, max=10)
def testCharAsKeyword(self):
"""test a Char with asKeyword=True"""
grade = pp.OneOrMore(pp.Char("ABCDF", asKeyword=True))
# all single char words
result = grade.parseString("B B C A D")
print(result)
expected = ["B", "B", "C", "A", "D"]
self.assertParseResultsEquals(
result, expected, msg="issue with Char asKeyword=True"
)
# NOT all single char words
test2 = "B BB C A D"
result2 = grade.parseString(test2)
print(result2)
expected2 = ["B"]
self.assertParseResultsEquals(
result2, expected2, msg="issue with Char asKeyword=True parsing 2 chars"
)
def testCharsNotIn(self):
"""test CharsNotIn initialized with various arguments"""
vowels = "AEIOU"
tst = "bcdfghjklmnpqrstvwxyz"
# default args
consonants = pp.CharsNotIn(vowels)
result = consonants.parseString(tst)
print(result)
self.assertParseResultsEquals(
result, [tst], msg="issue with CharsNotIn w/ default args"
)
# min = 0
with self.assertRaises(ValueError, msg="issue with CharsNotIn w/ min=0"):
consonants = pp.CharsNotIn(vowels, min=0)
# max > 0
consonants = pp.CharsNotIn(vowels, max=5)
result = consonants.parseString(tst)
print(result)
self.assertParseResultsEquals(
result, [tst[:5]], msg="issue with CharsNotIn w max > 0"
)
# exact > 0
consonants = pp.CharsNotIn(vowels, exact=10)
result = consonants.parseString(tst[:10])
print(result)
self.assertParseResultsEquals(
result, [tst[:10]], msg="issue with CharsNotIn w/ exact > 0"
)
# min > length
consonants = pp.CharsNotIn(vowels, min=25)
with self.assertRaisesParseException(msg="issue with CharsNotIn min > tokens"):
result = consonants.parseString(tst)
def testParseAll(self):
from pyparsing import Word, cppStyleComment
testExpr = Word("A")
tests = [
("AAAAA", False, True),
("AAAAA", True, True),
("AAABB", False, True),
("AAABB", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test for trailing comments
testExpr.ignore(cppStyleComment)
tests = [
("AAAAA //blah", False, True),
("AAAAA //blah", True, True),
("AAABB //blah", False, True),
("AAABB //blah", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
# add test with very long expression string
# testExpr = pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != 'B'])[1, ...]
anything_but_an_f = pp.OneOrMore(
pp.MatchFirst([pp.Literal(c) for c in pp.printables if c != "f"])
)
testExpr = pp.Word("012") + anything_but_an_f
tests = [
("00aab", False, True),
("00aab", True, True),
("00aaf", False, True),
("00aaf", True, False),
]
for s, parseAllFlag, shouldSucceed in tests:
try:
print(
"'{}' parseAll={} (shouldSucceed={})".format(
s, parseAllFlag, shouldSucceed
)
)
testExpr.parseString(s, parseAll=parseAllFlag)
self.assertTrue(
shouldSucceed, "successfully parsed when should have failed"
)
except ParseException as pe:
print(pe.explain())
self.assertFalse(
shouldSucceed, "failed to parse when should have succeeded"
)
def testGreedyQuotedStrings(self):
from pyparsing import (
QuotedString,
sglQuotedString,
dblQuotedString,
quotedString,
delimitedList,
)
src = """\
"string1", "strin""g2"
'string1', 'string2'
^string1^, ^string2^
<string1>, <string2>"""
testExprs = (
sglQuotedString,
dblQuotedString,
quotedString,
QuotedString('"', escQuote='""'),
QuotedString("'", escQuote="''"),
QuotedString("^"),
QuotedString("<", endQuoteChar=">"),
)
for expr in testExprs:
strs = delimitedList(expr).searchString(src)
print(strs)
self.assertTrue(
bool(strs), "no matches found for test expression '%s'" % expr
)
for lst in strs:
self.assertEqual(
2, len(lst), "invalid match found for test expression '%s'" % expr
)
from pyparsing import alphas, nums, Word
src = """'ms1',1,0,'2009-12-22','2009-12-22 10:41:22') ON DUPLICATE KEY UPDATE sent_count = sent_count + 1, mtime = '2009-12-22 10:41:22';"""
tok_sql_quoted_value = QuotedString(
"'", "\\", "''", True, False
) ^ QuotedString('"', "\\", '""', True, False)
tok_sql_computed_value = Word(nums)
tok_sql_identifier = Word(alphas)
val = tok_sql_quoted_value | tok_sql_computed_value | tok_sql_identifier
vals = delimitedList(val)
print(vals.parseString(src))
self.assertEqual(
5, len(vals.parseString(src)), "error in greedy quote escaping"
)
def testWordBoundaryExpressions(self):
from pyparsing import WordEnd, WordStart, oneOf
ws = WordStart()
we = WordEnd()
vowel = oneOf(list("AEIOUY"))
consonant = oneOf(list("BCDFGHJKLMNPQRSTVWXZ"))
leadingVowel = ws + vowel
trailingVowel = vowel + we
leadingConsonant = ws + consonant
trailingConsonant = consonant + we
internalVowel = ~ws + vowel + ~we
bnf = leadingVowel | trailingVowel
tests = """\
ABC DEF GHI
JKL MNO PQR
STU VWX YZ """.splitlines()
tests.append("\n".join(tests))
expectedResult = [
[["D", "G"], ["A"], ["C", "F"], ["I"], ["E"], ["A", "I"]],
[["J", "M", "P"], [], ["L", "R"], ["O"], [], ["O"]],
[["S", "V"], ["Y"], ["X", "Z"], ["U"], [], ["U", "Y"]],
[
["D", "G", "J", "M", "P", "S", "V"],
["A", "Y"],
["C", "F", "L", "R", "X", "Z"],
["I", "O", "U"],
["E"],
["A", "I", "O", "U", "Y"],
],
]
for t, expected in zip(tests, expectedResult):
print(t)
results = [
flatten(e.searchString(t).asList())
for e in [
leadingConsonant,
leadingVowel,
trailingConsonant,
trailingVowel,
internalVowel,
bnf,
]
]
print(results)
print()
self.assertEqual(
expected,
results,
"Failed WordBoundaryTest, expected {}, got {}".format(
expected, results
),
)
def testRequiredEach(self):
from pyparsing import Keyword
parser = Keyword("bam") & Keyword("boo")
try:
res1 = parser.parseString("bam boo")
print(res1.asList())
res2 = parser.parseString("boo bam")
print(res2.asList())
except ParseException:
failed = True
else:
failed = False
self.assertFalse(failed, "invalid logic in Each")
self.assertEqual(
set(res1),
set(res2),
"Failed RequiredEachTest, expected "
+ str(res1.asList())
+ " and "
+ str(res2.asList())
+ "to contain same words in any order",
)
def testOptionalEachTest1(self):
from pyparsing import Optional, Keyword
for the_input in [
"Tal Weiss Major",
"Tal Major",
"Weiss Major",
"Major",
"Major Tal",
"Major Weiss",
"Major Tal Weiss",
]:
print(the_input)
parser1 = (Optional("Tal") + Optional("Weiss")) & Keyword("Major")
parser2 = Optional(Optional("Tal") + Optional("Weiss")) & Keyword("Major")
p1res = parser1.parseString(the_input)
p2res = parser2.parseString(the_input)
self.assertEqual(
p1res.asList(),
p2res.asList(),
"Each failed to match with nested Optionals, "
+ str(p1res.asList())
+ " should match "
+ str(p2res.asList()),
)
def testOptionalEachTest2(self):
from pyparsing import Word, alphanums, OneOrMore, Group, Regex, Optional
word = Word(alphanums + "_").setName("word")
with_stmt = "with" + OneOrMore(Group(word("key") + "=" + word("value")))(
"overrides"
)
using_stmt = "using" + Regex("id-[0-9a-f]{8}")("id")
modifiers = Optional(with_stmt("with_stmt")) & Optional(
using_stmt("using_stmt")
)
self.assertEqual("with foo=bar bing=baz using id-deadbeef", modifiers)
self.assertNotEqual(
"with foo=bar bing=baz using id-deadbeef using id-feedfeed", modifiers
)
def testOptionalEachTest3(self):
from pyparsing import Literal, Suppress
foo = Literal("foo")
bar = Literal("bar")
openBrace = Suppress(Literal("{"))
closeBrace = Suppress(Literal("}"))
exp = openBrace + (foo[1, ...]("foo") & bar[...]("bar")) + closeBrace
tests = """\
{foo}
{bar foo | |
"""Various VR utilities."""
import queue
import threading
import time
from asyncio.streams import StreamReader
from typing import Sequence, Dict
import struct
import numpy as np
import serial.threaded
from pykalman import KalmanFilter
from copy import copy
try:
import cv2
from displayarray import read_updates
from displayarray import display
HAVE_VOD = True
except Exception as e:
print(f"failed to import displayarray and/or opencv, reason: {e}")
print("camera based tracking methods will not be available")
HAVE_VOD = False
from itertools import islice, takewhile
import re
from typing import Optional
def format_str_for_write(input_str: str) -> bytes:
"""Format a string for writing to SteamVR's stream."""
if len(input_str) < 1:
return "".encode("utf-8")
if input_str[-1] != "\n":
return (input_str + "\n").encode("utf-8")
return input_str.encode("utf-8")
async def read(reader: StreamReader, read_len: int = 20) -> str:
"""Read one line from reader asynchronously."""
data = []
temp = " "
while "\n" not in temp and temp != "":
temp = await reader.read(read_len)
temp = temp.decode("utf-8")
data.append(temp)
time.sleep(0) # allows thread switching
return "".join(data)
def read2(reader, read_len=20):
"""Read one line from reader asynchronously."""
data = []
temp = " "
while "\n" not in temp and temp != "":
temp = reader.recv(read_len)
temp = temp.decode("utf-8")
data.append(temp)
time.sleep(0) # allows thread switching
return "".join(data)
async def read3(reader: StreamReader, read_len: int = 20) -> str:
"""Read one line from reader asynchronously."""
data = bytearray()
temp = b" "
while b"\n" not in temp and temp != b"":
temp = await reader.read(read_len)
data.extend(temp)
# time.sleep(0) # allows thread switching
return data
def make_rotmat(angls, dtype=np.float64):
"""
Rotate a set of points around the x, y, then z axes.
:param points: a point dictionary, such as: [[0, 0, 0], [1, 0, 0]]
:param angles: the degrees to rotate on the x, y, and z axis
"""
rotx = np.array(
[
[1, 0, 0],
[0, np.cos(angls[0]), -np.sin(angls[0])],
[0, np.sin(angls[0]), np.cos(angls[0])],
],
dtype=dtype,
)
roty = np.array(
[
[np.cos(angls[1]), 0, np.sin(angls[1])],
[0, 1, 0],
[-np.sin(angls[1]), 0, np.cos(angls[1])],
],
dtype=dtype,
)
rotz = np.array(
[
[np.cos(angls[2]), -np.sin(angls[2]), 0],
[np.sin(angls[2]), np.cos(angls[2]), 0],
[0, 0, 1],
],
dtype=dtype,
)
return np.matmul(np.matmul(rotx, roty), rotz)
def translate(points, offsets: Sequence[float]) -> None:
"""
Translate a set of points along the x, y, then z axes.
:param points: a point dictionary, such as: {"marker 1" : np.array((0, 0, 0))}
:param offsets: the distance to translate on the x, y, and z axes
"""
points += offsets
def strings_share_characters(str1: str, str2: str) -> bool:
"""Determine if two strings share any characters."""
for i in str2:
if i in str1:
return True
return False
def get_numbers_from_text(text, separator="\t"):
"""Get a list of number from a string of numbers separated by :separator:[default: "\t"]."""
try:
if isinstance(text, bytearray) or isinstance(text, bytes):
text = text.decode("utf-8")
if (
strings_share_characters(
text.lower(), "qwrtyuiopsasdfghjklzxcvbnm><*[]{}()"
)
or len(text) == 0
):
return []
return [float(i) for i in text.split(separator)]
except Exception as e:
print(f"get_numbers_from_text: {repr(e)} {repr(text)}")
return []
def get_pose_struct_from_text(text):
"""returns struct from :text:, :text: has to be styled so that it completely matches this regex: ([htc][0-9]+[ ])*([htc][0-9]+)$"""
res = re.search("([htc][0-9]+[ ])*([htc][0-9]+)$", text)
if res != None:
if res.group(0) == text:
return tuple(i[0] for i in text.split(" ")), tuple(
int(i[1:]) for i in text.split(" ")
)
return (), ()
def parse_poses_from_packet(packet, struct):
"""parses all poses from a :packet:, provided a packet :struct:"""
it = iter(packet)
return [tuple(islice(it, 0, i)) for i in struct]
def get_poses_shape(poses):
"""returns a shape of :poses: parsed by parse_poses_from_packet"""
return tuple(len(i) for i in poses)
def has_nan_in_pose(pose):
"""Determine if any numbers in pose are invalid."""
return np.isnan(pose).any() or np.isinf(pose).any()
# experimental contour solver function, uses cone fitting instead of ellipse fitting(and some other things), not tested yet
def fit_focal_cone_to_sphere(points2D, points2D_count, sphere_radius, camera_focal_length):
zz = camera_focal_length**2
A = np.zeros((points2D_count, 3), dtype=np.float64)
for i in range(points2D_count):
p = points2D[i]
norm_A = np.linalg.norm([p[0], p[1], camera_focal_length])
A[i][0] = p[0]
A[i][1] = p[1]
A[i][2] = -norm_A
b = np.zeros((points2D_count,), dtype=np.float64)
b.fill(-zz)
# print (A.shape)
Bx_By_c = np.linalg.lstsq(A, b, rcond=None)[0]
# print (Bx_By_c)
# lu, piv = lu_factor(A)
# Bx_By_c = lu_solve((lu, piv), b)
norm_norm_B = np.sqrt(Bx_By_c[0] * Bx_By_c[0] +
Bx_By_c[1] * Bx_By_c[1] +
zz)
cos_theta = Bx_By_c[2] / norm_norm_B
k = cos_theta * cos_theta
norm_B = sphere_radius / np.sqrt(1 - k)
out = np.array((Bx_By_c[0], Bx_By_c[1], camera_focal_length), dtype=np.float64)
out *= (norm_B / norm_norm_B)
return out
class LazyKalman:
"""
no docs... too bad.
But then, SimLeek added some.
example usage:
t = LazyKalman([1, 2, 3], np.eye(3), np.eye(3)) # init filter
for i in range(10):
print (t.apply([5+i, 6+i, 7+i])) # apply and update filter
"""
def __init__(
self, init_state, transition_matrix, observation_matrix, n_iter=5, train_size=15
):
"""
Create the Kalman filter.
:param init_state: Initial state of the Kalman filter. Should be equal to first element in first_train_batch.
:param transition_matrix: adjacency matrix representing state transition from t to t+1 for any time t
Example: http://campar.in.tum.de/Chair/KalmanFilter
Most likely, this will be an NxN eye matrix the where N is the number of variables
being estimated
:param observation_matrix: translation matrix from measurement coordinate system to desired coordinate system
See: https://dsp.stackexchange.com/a/27488
Most likely, this will be an NxN eye matrix the where N is the number of variables
being estimated
:param n_iter: Number of times to repeat the parameter estimation function (estimates noise)
"""
init_state = np.array(init_state)
transition_matrix = np.array(transition_matrix)
observation_matrix = np.array(observation_matrix)
self._expected_shape = init_state.shape
self._filter = KalmanFilter(
transition_matrices=transition_matrix,
observation_matrices=observation_matrix,
initial_state_mean=init_state,
)
self.do_learning = True
self._calibration_countdown = 0
self._calibration_observations = []
self._em_iter = n_iter
self.calibrate(train_size, n_iter)
self._x_now = np.zeros(transition_matrix.shape[0])
self._p_now = np.zeros(transition_matrix.shape[0])
def apply(self, obz):
"""Apply the Kalman filter against the observation obz."""
# Todo: automatically start a calibration routine if everything is at rest for long enough,
# or if the user leaves, or if there is enough error. Also, add some UI calibrate button, save, and load.
if self.do_learning:
self._x_now, self._p_now = self._filter.filter_update(
filtered_state_mean=self._x_now,
filtered_state_covariance=self._p_now,
observation=obz,
)
else:
# print(obz.shape)
# self._x_now, self._p_now = self._filter.filter([obz,])
self._x_now, _ = self._filter.filter_update(
filtered_state_mean=self._x_now,
filtered_state_covariance=self._p_now,
observation=obz,
)
if self._calibration_countdown:
self._calibration_observations.append(obz)
self._calibration_countdown -= 1
if self._calibration_countdown == 0:
self._run_calibration()
return self._x_now
def _run_calibration(self):
"""Update the Kalman filter so that noise matrices are more accurate."""
t_start = time.time()
self._filter = self._filter.em(
self._calibration_observations, n_iter=self._em_iter
)
print(f" kalman filter calibrated, took {time.time() - t_start}s")
f_means, f_covars = self._filter.filter(self._calibration_observations)
self._x_now = f_means[-1, :]
self._p_now = f_covars[-1, :]
self._calibration_observations = []
def calibrate(self, observations=100000, em_iter=10):
"""
Start the calibration routine.
:param observations: Number of observations before calibration runs.
:param em_iter: number of times calibration runs over all observations.
"""
self._calibration_countdown = observations
self._em_iter = em_iter
class SerialReaderFactory(serial.threaded.LineReader):
"""
A protocol factory for serial.threaded.ReaderThread.
self.lastRead should be read only, if you need to modify it make a copy
usage:
with serial.threaded.ReaderThread(serial_instance, SerialReaderFactory) as protocol:
protocol.lastRead # contains the last incoming message from serial
protocol.write_line(single_line_text) # used to write a single line to serial
"""
TERMINATOR = b'\n'
def __init__(self):
"""Create the SerialReaderFactory."""
super().__init__()
self._last_read = ""
@property
def last_read(self):
"""Get the readonly last read."""
return self._last_read
def handle_line(self, data):
"""Store the latest line in last_read."""
self._last_read = data
# print (f'cSerialReader: data received: {repr(data)}')
def connection_lost(self, exc):
"""Notify the user that the connection was lost."""
print(
f"SerialReaderFactory: port {repr(self.transport.serial.port)} closed {repr(exc)}"
)
class SerialReaderBinary(serial.threaded.Packetizer):
"""
Read binary packets from serial port. Packets are expected to be terminated
with a TERMINATOR byte (null byte by default).
The class also keeps track of the transport.
"""
TERMINATOR = b'\t\r\n'
ENCODING = 'utf-8'
UNICODE_HANDLING = 'replace'
def __init__(self, struct_type='f', struct_len=13, type_len=4):
super().__init__()
self._last_packet = None
self._struct_form = struct_type * struct_len
self._struct_len = struct_len * type_len
def __call__(self):
return self
@property
def last_read(self):
"""Get the readonly last read."""
return self._last_packet
def connection_lost(self, exc):
"""Notify the user that the connection was lost."""
print(
f"SerialReaderBinary: port {repr(self.transport.serial.port)} closed {repr(exc)}"
)
def handle_packet(self, packet):
"""Process packets - to be overridden by subclassing"""
# print (repr(packet))
if len(packet) == self._struct_len:
self._last_packet = struct.unpack_from(self._struct_form, packet)
def write_line(self, text):
"""
Write text to the transport. ``text`` is a Unicode string and the encoding
is applied before sending ans also the newline is append.
"""
# + is not the best choice but bytes does not support % or .format in | |
import argparse
from collections import defaultdict
from itertools import izip
import numpy as np
import os
import pandas as pd
from scipy.interpolate import interp1d
from scipy.stats import linregress, spearmanr
from base.io_util import write_json
# from builds.flu.scores import select_nodes_in_season
from frequencies import logit_transform, tree_frequencies
from fitness_predictors import fitness_predictors
min_tips = 10
pc=1e-2
regularization = 1e-3
default_predictors = ['lb', 'ep', 'ne_star']
def process_predictor_args(predictors, params=None, sds=None):
"""Returns a predictor data structure for the given lists of predictors, params,
and standard deviations.
When no parameters or deviations are provided, the predictors are a simple
list. When parameters and deviations are provided, the predictor are a
dictionary indexed by predictor name with values corresponding to each
predictor's param and global standard deviation.
>>> process_predictor_args(None, None, None)
>>> process_predictor_args(['ep'])
['ep']
>>> process_predictor_args(['ep'], None, None)
['ep']
>>> process_predictor_args(['ep'], [1], [5])
{'ep': [1, 5]}
"""
if predictors is None:
processed_predictors = None
elif params is None or sds is None:
processed_predictors = predictors
else:
merged_params = map(list, zip(params, sds))
processed_predictors = dict(zip(predictors, merged_params))
return processed_predictors
def make_pivots(start, stop, pivots_per_year=12, precision=2):
"""Makes an array of pivots (i.e., timepoints) between the given start and stop
by the given pivots per year. The generated pivots are floating point values
that are then rounded to the requested decimal precision.
>>> list(make_pivots(2000.0, 2001.0, 5))
[2000.0, 2000.25, 2000.5, 2000.75, 2001.0]
"""
# Calculate number of pivots (i.e., months) in the requested interval.
number_of_pivots = np.ceil((stop - start) * pivots_per_year)
# Build an evenly-spaced closed interval (including the start and stop
# points) based on the calculated number of pivots.
return np.around(
np.linspace(start, stop, number_of_pivots),
precision
)
def matthews_correlation_coefficient(tp, tn, fp, fn):
"""Return Matthews correlation coefficient for values from a confusion matrix.
Implementation is based on the definition from wikipedia:
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
"""
numerator = (tp * tn) - (fp * fn)
denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
if denominator == 0:
denominator = 1
return float(numerator) / denominator
class fitness_model(object):
def __init__(self, tree, frequencies, time_interval, predictor_input = ['ep', 'lb', 'dfreq'], pivots = None, pivot_spacing = 1.0 / 12, verbose = 0, enforce_positive_predictors = True, predictor_kwargs=None, **kwargs):
'''
parameters:
tree -- tree of sequences for which a fitness model is to be determined
frequencies -- dictionary of precalculated clade frequencies indexed by region (e.g., "global")
predictor_input -- list of predictors to fit or dict of predictors to coefficients / std deviations
'''
self.tree = tree
self.frequencies = frequencies
self.pivot_spacing = pivot_spacing
self.verbose = verbose
self.enforce_positive_predictors = enforce_positive_predictors
self.estimate_coefficients = True
self.min_freq = kwargs.get("min_freq", 0.1)
self.max_freq = kwargs.get("max_freq", 0.99)
if predictor_kwargs is None:
self.predictor_kwargs = {}
else:
self.predictor_kwargs = predictor_kwargs
self.time_window = kwargs.get("time_window", 6.0 / 12.0)
# Convert datetime date interval to floating point interval from
# earliest to latest.
self.time_interval = (
time_interval[1].year + (time_interval[1].month) / 12.0,
time_interval[0].year + (time_interval[0].month - 1) / 12.0
)
if isinstance(predictor_input, dict):
predictor_names = predictor_input.keys()
self.estimate_coefficients = False
else:
predictor_names = predictor_input
if "estimate_fitness_model" in kwargs:
if kwargs["estimate_fitness_model"]:
self.estimate_coefficients = True
# If pivots have not been calculated yet, calculate them here.
if pivots is not None:
self.pivots = pivots
else:
self.pivots = make_pivots(
self.time_interval[0],
self.time_interval[1],
1 / self.pivot_spacing
)
# final timepoint is end of interval and is only projected forward, not tested
self.timepoint_step_size = 0.5 # amount of time between timepoints chosen for fitting
self.delta_time = 1.0 # amount of time projected forward to do fitting
self.timepoints = np.around(
np.append(
make_pivots(self.time_interval[0], self.time_interval[1]-self.delta_time+0.0001, 1 / self.timepoint_step_size),
self.time_interval[1]
),
2
)
self.predictors = predictor_names
self.model_params = np.zeros(len(self.predictors))
if isinstance(predictor_input, dict):
self.model_params = np.array([predictor_input[k][0] for k in predictor_names])
self.to_standardize = np.array([p!='dfreq' for p in self.predictors])
if isinstance(predictor_input, dict):
self.global_sds = np.array([predictor_input[k][1] for k in predictor_names])
else:
self.global_sds = np.zeros(len(self.predictors))
self.fp = fitness_predictors(predictor_names = predictor_names, **kwargs)
# Map node names to parents.
self.node_parents = {}
for clade in self.tree.find_clades(order='level'):
for child in clade:
self.node_parents[child] = clade
def prep_nodes(self):
"""Assigns data from the tree to top-level fitness model attributes.
TODO: consider moving this code directly into the `predict`
method since it is only ever called there.
"""
self.nodes = [node for node in self.tree.find_clades(order="postorder")]
self.tips = [node for node in self.nodes if node.is_terminal()]
self.rootnode = self.tree.root
self.rootnode.pivots = self.pivots
# Create a list of tip indices under node.tips that map to self.tips
# list.
tip_index_region_specific = 0
for node in self.nodes:
tmp_tips = []
if node.is_terminal():
tmp_tips.append((tip_index_region_specific, node.numdate))
tip_index_region_specific += 1
for child in node.clades:
tmp_tips.extend(child.tips)
# Sort tips by corresponding date.
node.tips = np.array([x for x in sorted(tmp_tips, key = lambda x: x[1])])
# Erase the dates from the tip lists and cast to int such that they can
# be used for indexing. These operations must happen after all nodes
# have been processed and sorted.
for node in self.nodes:
if len(node.tips.shape) == 2:
node.tips = np.array(node.tips[:, 0], dtype=int)
else:
node.tips = np.array([], dtype=int)
def calc_node_frequencies(self):
'''
goes over all nodes and calculates frequencies at timepoints based on previously calculated frequency trajectories
'''
region = "global"
# Calculate global tree/clade frequencies if they have not been calculated already.
if region not in self.frequencies or self.rootnode.clade not in self.frequencies["global"]:
print("calculating global node frequencies")
tree_freqs = tree_frequencies(self.tree, self.pivots, method="SLSQP", verbose=1)
tree_freqs.estimate_clade_frequencies()
self.frequencies[region] = tree_freqs.frequencies
else:
print("found existing global node frequencies")
# Annotate frequencies on nodes.
# TODO: replace node-based annotation with dicts indexed by node name.
for node in self.nodes:
node.freq = {
region: self.frequencies[region][node.clade]
}
node.logit_freq = {
region: logit_transform(self.frequencies[region][node.clade], 1e-4)
}
for node in self.nodes:
interpolation = interp1d(self.rootnode.pivots, node.freq[region], kind='linear', bounds_error=True)
node.timepoint_freqs = defaultdict(float)
node.delta_freqs = defaultdict(float)
for time in self.timepoints:
node.timepoint_freqs[time] = np.asscalar(interpolation(time))
for time in self.timepoints[:-1]:
node.delta_freqs[time] = np.asscalar(interpolation(time + self.delta_time))
# freq_arrays list *all* tips for each initial timepoint
self.freq_arrays={}
for time in self.timepoints:
tmp_freqs = []
for tip in self.tips:
tmp_freqs.append(tip.timepoint_freqs[time])
self.freq_arrays[time] = np.array(tmp_freqs)
def calc_predictors(self, timepoint):
for pred in self.predictors:
# calculate the predictors for all nodes of the tree and save as node.attr
if pred != 'dfreq':
self.fp.setup_predictor(self.tree, pred, timepoint, **self.predictor_kwargs)
def calc_time_censored_tree_frequencies(self):
print("fitting time censored tree frequencies")
# this doesn't interfere with the previous freq estimates via difference in region: global_censored vs global
region = "global_censored"
if not region in self.frequencies:
self.frequencies[region] = {}
freq_cutoff = 25.0
pivots_fit = 6
freq_window = 1.0
for node in self.nodes:
node.fit_frequencies = {}
node.freq_slope = {}
for time in self.timepoints:
time_interval = [time - freq_window, time]
pivots = make_pivots(
time_interval[0],
time_interval[1],
1 / self.pivot_spacing
)
node_filter_func = lambda node: node.attr['num_date'] >= time_interval[0] and node.attr['num_date'] < time_interval[1]
# Recalculate tree frequencies for the given time interval and its
# corresponding pivots.
tree_freqs = tree_frequencies(self.tree, pivots, node_filter=node_filter_func, method="SLSQP")
tree_freqs.estimate_clade_frequencies()
self.frequencies[region][time] = tree_freqs.frequencies
# Annotate censored frequencies on nodes.
# TODO: replace node-based annotation with dicts indexed by node name.
for node in self.nodes:
node.freq = {
region: self.frequencies[region][time][node.clade]
}
node.logit_freq = {
region: logit_transform(self.frequencies[region][time][node.clade], 1e-4)
}
for node in self.nodes:
if node.logit_freq[region] is not None:
node.fit_frequencies[time] = np.minimum(freq_cutoff, np.maximum(-freq_cutoff,node.logit_freq[region]))
else:
node.fit_frequencies[time] = self.node_parents[node].fit_frequencies[time]
try:
slope, intercept, rval, pval, stderr = linregress(pivots[pivots_fit:-1], node.fit_frequencies[time][pivots_fit:-1])
node.freq_slope[time] = slope
except:
import ipdb; ipdb.set_trace()
# Clean up frequencies.
del self.frequencies[region]
# reset pivots in tree to global pivots
self.rootnode.pivots = self.pivots
def calc_all_predictors(self, estimate_frequencies = True):
if estimate_frequencies and 'dfreq' in [x for x in self.predictors]:
self.calc_time_censored_tree_frequencies()
# predictor_arrays list *all* tips for each timepoint
self.predictor_arrays={}
for node in self.nodes:
node.predictors = {}
for time in self.timepoints:
if self.verbose: print "calculating predictors for time", time
select_nodes_in_season(self.tree, time, self.time_window)
self.calc_predictors(time)
for node in self.nodes:
if 'dfreq' in [x for x in self.predictors]: node.dfreq = node.freq_slope[time]
node.predictors[time] = np.array([hasattr(node, pred) and getattr(node, pred) or node.attr[pred]
for pred in self.predictors])
tmp_preds = []
for tip in self.tips:
tmp_preds.append(tip.predictors[time])
self.predictor_arrays[time]=np.array(tmp_preds, dtype=float)
def standardize_predictors(self):
self.predictor_means = {}
self.predictor_sds = {}
if self.verbose: print "standardizing predictors"
for time in self.timepoints:
values = self.predictor_arrays[time]
weights = self.freq_arrays[time]
means = np.average(values, weights=weights, axis=0)
variances = np.average((values-means)**2, weights=weights, axis=0)
sds = np.sqrt(variances)
self.predictor_means[time] = means
self.predictor_sds[time] = sds
if self.estimate_coefficients:
self.global_sds | |
db_dx
J[('boundary_constraint','turbineY')] = db_dy
return J
class SpacingComp(Component):
"""
Calculates inter-turbine spacing for all turbine pairs
"""
def __init__(self, nTurbines):
super(SpacingComp, self).__init__()
# set finite difference options (fd used for testing only)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-5
self.deriv_options['check_step_calc'] = 'relative'
# Explicitly size input arrays
self.add_param('turbineX', val=np.zeros(nTurbines), units='m',
desc='x coordinates of turbines in wind dir. ref. frame')
self.add_param('turbineY', val=np.zeros(nTurbines), units='m',
desc='y coordinates of turbines in wind dir. ref. frame')
# Explicitly size output array
self.add_output('wtSeparationSquared', val=np.zeros(int(nTurbines*(nTurbines-1)/2)),
desc='spacing of all turbines in the wind farm')
def solve_nonlinear(self, params, unknowns, resids):
# print('in dist const')
turbineX = params['turbineX']
turbineY = params['turbineY']
nTurbines = turbineX.size
# separation_squared = np.zeros(int((nTurbines-1)*nTurbines/2))
#
# k = 0
# for i in range(0, nTurbines):
# for j in range(i+1, nTurbines):
# separation_squared[k] = (turbineX[j]-turbineX[i])**2+(turbineY[j]-turbineY[i])**2
# k += 1
separation_squared = position_constraints.turbine_spacing_squared(turbineX, turbineY)
unknowns['wtSeparationSquared'] = separation_squared
def linearize(self, params, unknowns, resids):
# obtain necessary inputs
turbineX = params['turbineX']
turbineY = params['turbineY']
# get number of turbines
nTurbines = turbineX.size
#
# # initialize gradient calculation array
# dS = np.zeros((int((nTurbines-1.)*nTurbines/2.), 2*nTurbines))
#
# # set turbine pair counter to zero
# k = 0
#
# # calculate the gradient of the distance between each pair of turbines w.r.t. turbineX and turbineY
# for i in range(0, nTurbines):
# for j in range(i+1, nTurbines):
# # separation wrt Xj
# dS[k, j] = 2*(turbineX[j]-turbineX[i])
# # separation wrt Xi
# dS[k, i] = -2*(turbineX[j]-turbineX[i])
# # separation wrt Yj
# dS[k, j+nTurbines] = 2*(turbineY[j]-turbineY[i])
# # separation wrt Yi
# dS[k, i+nTurbines] = -2*(turbineY[j]-turbineY[i])
# # increment turbine pair counter
# k += 1
turbineXd = np.eye(nTurbines)
turbineYd = np.zeros((nTurbines, nTurbines))
_, separation_squareddx = \
position_constraints.turbine_spacing_squared_dv(turbineX, turbineXd, turbineY, turbineYd)
turbineXd = np.zeros((nTurbines, nTurbines))
turbineYd = np.eye(nTurbines)
_, separation_squareddy = \
position_constraints.turbine_spacing_squared_dv(turbineX, turbineXd, turbineY, turbineYd)
# initialize Jacobian dict
J = {}
# populate Jacobian dict
# J['wtSeparationSquared', 'turbineX'] = dS[:, :nTurbines]
# J['wtSeparationSquared', 'turbineY'] = dS[:, nTurbines:]
J['wtSeparationSquared', 'turbineX'] = np.transpose(separation_squareddx)
J['wtSeparationSquared', 'turbineY'] = np.transpose(separation_squareddy)
return J
class BoundaryComp(Component):
def __init__(self, nTurbines, nVertices):
super(BoundaryComp, self).__init__()
self.nTurbines = nTurbines
self.nVertices = nVertices
if nVertices > 1:
self.type = type = 'polygon'
elif nVertices == 1:
self.type = type = 'circle'
else:
ValueError('nVertices in BoundaryComp must be greater than 0')
if type == 'polygon':
# Explicitly size input arrays
self.add_param('boundaryVertices', np.zeros([nVertices, 2]), units='m', pass_by_obj=True,
desc="vertices of the convex hull CCW in order s.t. boundaryVertices[i] -> first point of face"
"for unit_normals[i]")
self.add_param('boundaryNormals', np.zeros([nVertices, 2]), pass_by_obj=True,
desc="unit normal vector for each boundary face CCW where boundaryVertices[i] is "
"the first point of the corresponding face")
elif type == 'circle':
self.add_param('boundary_radius', val=1000., units='m', pass_by_obj=True, desc='radius of wind farm boundary')
self.add_param('boundary_center', val=np.array([0., 0.]), units='m', pass_by_obj=True,
desc='x and y positions of circular wind farm boundary center')
else:
ValueError('Invalid value (%s) encountered in BoundaryComp input -type-. Must be one of [polygon, circle]'
%(type))
self.add_param('turbineX', np.zeros(nTurbines), units='m',
desc='x coordinates of turbines in global ref. frame')
self.add_param('turbineY', np.zeros(nTurbines), units='m',
desc='y coordinates of turbines in global ref. frame')
# Explicitly size output array
# (vector with positive elements if turbines outside of hull)
self.add_output('boundaryDistances', np.zeros([nTurbines, nVertices]),
desc="signed perpendicular distance from each turbine to each face CCW; + is inside")
def solve_nonlinear(self, params, unknowns, resids):
turbineX = params['turbineX']
turbineY = params['turbineY']
if self.type == 'polygon':
# put locations in correct arrangement for calculations
locations = np.zeros([self.nTurbines, 2])
for i in range(0, self.nTurbines):
locations[i] = np.array([turbineX[i], turbineY[i]])
# print("in comp, locs are: ".format(locations))
# calculate distance from each point to each face
# unknowns['boundaryDistances'] = calculate_distance(locations,
# params['boundaryVertices'], params['boundaryNormals'])
unknowns['boundaryDistances'] = position_constraints.boundary_distances(turbineX, turbineY,
params['boundaryVertices'], params['boundaryNormals'])
elif self.type == 'circle':
xc = params['boundary_center'][0]
yc = params['boundary_center'][1]
r = params['boundary_radius']
unknowns['boundaryDistances'] = r**2 - (np.power((turbineX - xc), 2) + np.power((turbineY - yc), 2))
# bv = np.array([[xc, yc]])
# br = np.array([[r, r]])
# print bv.shape, br.shape
# unknowns['boundaryDistances'] = (position_constraints.boundary_distances(turbineX, turbineY, bv, br))**2
else:
ValueError('Invalid value (%s) encountered in BoundaryComp input -type-. Must be one of [polygon, circle]'
%(type))
def linearize(self, params, unknowns, resids):
if self.type == 'polygon':
unit_normals = params['boundaryNormals']
# initialize array to hold distances from each point to each face
dfaceDistance_dx = np.zeros([int(self.nTurbines*self.nVertices), self.nTurbines])
dfaceDistance_dy = np.zeros([int(self.nTurbines*self.nVertices), self.nTurbines])
for i in range(0, self.nTurbines):
# determine if point is inside or outside of each face, and distance from each face
for j in range(0, self.nVertices):
# define the derivative vectors from the point of interest to the first point of the face
dpa_dx = np.array([-1.0, 0.0])
dpa_dy = np.array([0.0, -1.0])
# find perpendicular distance derivatives from point to current surface (vector projection)
ddistanceVec_dx = np.vdot(dpa_dx, unit_normals[j])*unit_normals[j]
ddistanceVec_dy = np.vdot(dpa_dy, unit_normals[j])*unit_normals[j]
# calculate derivatives for the sign of perpendicular distance from point to current face
dfaceDistance_dx[i*self.nVertices+j, i] = np.vdot(ddistanceVec_dx, unit_normals[j])
dfaceDistance_dy[i*self.nVertices+j, i] = np.vdot(ddistanceVec_dy, unit_normals[j])
elif self.type == 'circle':
turbineX = params['turbineX']
turbineY = params['turbineY']
xc = params['boundary_center'][0]
yc = params['boundary_center'][1]
A = np.eye(self.nTurbines, self.nTurbines)
B = - 2. * (turbineX - xc)
C = - 2. * (turbineY - yc)
dfaceDistance_dx = A*B
dfaceDistance_dy = A*C
else:
ValueError('Invalid value (%s) encountered in BoundaryComp input -type-. Must be one of [polygon, circle]'
% (type))
# initialize Jacobian dict
J = {}
# return Jacobian dict
J['boundaryDistances', 'turbineX'] = dfaceDistance_dx
J['boundaryDistances', 'turbineY'] = dfaceDistance_dy
return J
def calculate_boundary(vertices):
# find the points that actually comprise a convex hull
hull = ConvexHull(list(vertices))
# keep only vertices that actually comprise a convex hull and arrange in CCW order
vertices = vertices[hull.vertices]
# get the real number of vertices
nVertices = vertices.shape[0]
# initialize normals array
unit_normals = np.zeros([nVertices, 2])
# determine if point is inside or outside of each face, and distance from each face
for j in range(0, nVertices):
# calculate the unit normal vector of the current face (taking points CCW)
if j < nVertices - 1: # all but the set of point that close the shape
normal = np.array([vertices[j+1, 1]-vertices[j, 1],
-(vertices[j+1, 0]-vertices[j, 0])])
unit_normals[j] = normal/np.linalg.norm(normal)
else: # the set of points that close the shape
normal = np.array([vertices[0, 1]-vertices[j, 1],
-(vertices[0, 0]-vertices[j, 0])])
unit_normals[j] = normal/np.linalg.norm(normal)
return vertices, unit_normals
def calculate_distance(points, vertices, unit_normals, return_bool=False):
"""
:param points: points that you want to calculate the distance from to the faces of the convex hull
:param vertices: vertices of the convex hull CCW in order s.t. vertices[i] -> first point of face for
unit_normals[i]
:param unit_normals: unit normal vector for each face CCW where vertices[i] is first point of face
:param return_bool: set to True to return an array of bools where True means the corresponding point
is inside the hull
:return face_distace: signed perpendicular distance from each point to each face; + is inside
:return [inside]: (optional) an array of zeros and ones where 1.0 means the corresponding point is inside the hull
"""
# print points.shape, vertices.shape, unit_normals.shape
nPoints = points.shape[0]
nVertices = vertices.shape[0]
# initialize array to hold distances from each point to each face
face_distance = np.zeros([nPoints, nVertices])
if not return_bool:
# loop through points and find distance to each face
for i in range(0, nPoints):
# determine if point is inside or outside of each face, and distance from each face
for j in range(0, nVertices):
# define the vector from the point of interest to the first point of the face
pa = np.array([vertices[j, 0]-points[i, 0], vertices[j, 1]-points[i, 1]])
# find perpendicular distance from point to current surface (vector projection)
d_vec = np.vdot(pa, unit_normals[j])*unit_normals[j]
# calculate the sign of perpendicular distance from point to current face (+ is inside, - is outside)
face_distance[i, j] = np.vdot(d_vec, unit_normals[j])
return face_distance
else:
# initialize array to hold boolean indicating whether a point is inside the hull or not
inside = np.zeros(nPoints)
# loop through points and find distance to each face
for i in range(0, nPoints):
# determine if point is inside or outside of each face, and | |
def test_sync_child_tickets(self):
"""
Test to ensure that a ticket will sync related objects,
in its case schedule, note, and time entries
"""
fixture_utils.init_schedule_entries()
fixture_utils.init_service_notes()
synchronizer = self.sync_class()
ticket = models.Ticket.objects.get(id=self.ticket_fixture['id'])
# Change some fields on all child objects
updated_fixture = deepcopy(fixtures.API_SERVICE_NOTE_LIST[0])
updated_fixture['ticketId'] = ticket.id
updated_fixture['text'] = 'Some new text'
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.ServiceAPIClient.get_notes'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
updated_fixture = deepcopy(fixtures.API_TIME_ENTRY)
updated_fixture['chargeToId'] = ticket.id
updated_fixture['text'] = 'Some new text'
updated_fixture['timeEnd'] = '2005-05-16T15:00:00Z'
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.TimeAPIClient.get_time_entries'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
updated_fixture = deepcopy(fixtures.API_SALES_ACTIVITY)
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
method_name = 'djconnectwise.api.TicketAPIMixin.get_ticket'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Trigger method called on callback
synchronizer.fetch_sync_by_id(ticket.id)
# Get the new Values from the db
updated_note = models.ServiceNote.objects.filter(ticket=ticket)[0]
updated_time = models.TimeEntry.objects.filter(charge_to_id=ticket)[0]
_task_patch.stop()
# Confirm that they have all been updated
self.assertEqual('Some new text', updated_note.text)
self.assertEqual(
datetime.datetime(
2005, 5, 16, 15, 0, tzinfo=datetime.timezone.utc),
updated_time.time_end
)
def test_sync_updated(self):
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = self.sync_class()
created_count, updated_count, _, _ = synchronizer.sync()
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, len(fixture_list))
instance = models.Ticket.objects.get(id=updated_ticket_fixture['id'])
self._assert_sync(instance, updated_ticket_fixture)
def test_sync_skips(self):
# Update the ticket to know it skips it the second time
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
_, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = self.sync_class()
# Synchronizer is called twice, as we are testing that when
# synchronizers are called twice it skips the record if no change is
# detected.
synchronizer.sync()
created_count, _, skipped_count, _ = synchronizer.sync()
self.assertEqual(created_count, 0)
self.assertEqual(skipped_count, len(fixture_list))
instance = models.Ticket.objects.get(id=updated_ticket_fixture['id'])
self._assert_sync(instance, updated_ticket_fixture)
def test_sync_multiple_status_batches(self):
sync.MAX_URL_LENGTH = 330
sync.MIN_URL_LENGTH = 320
self._init_data()
fixture_utils.init_tickets()
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = sync.ServiceTicketSynchronizer()
synchronizer.batch_condition_list.extend(
[234234, 345345, 234213, 2344523, 345645]
)
created_count, updated_count, _, _ = synchronizer.sync()
self.assertEqual(mock_call.call_count, 2)
def test_delete_stale_tickets(self):
"""Local ticket should be deleted if omitted from sync"""
ticket_id = self.ticket_fixture['id']
ticket_qset = models.Ticket.objects.filter(id=ticket_id)
self.assertEqual(ticket_qset.count(), 1)
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertEqual(ticket_qset.count(), 0)
_patch.stop()
def test_callback_sync_time_entry(self):
# Sync initial time entry
mocks.time_api_get_time_entries_call(fixtures.API_TIME_ENTRY_LIST)
time_entry_sync = sync.TimeEntrySynchronizer()
time_entry_sync.sync()
self.assertGreater(
models.SyncJob.objects.filter(entity_name='TimeEntry').count(), 0
)
# Mock the child class syncs
mocks.service_api_get_notes_call(fixtures.API_SERVICE_NOTE_LIST)
mocks.sales_api_get_activities_call(fixtures.API_SALES_ACTIVITIES)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
method_name = 'djconnectwise.sync.TicketSynchronizerMixin.get_single'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Create new time entry to sync
new_time_entry = deepcopy(fixtures.API_TIME_ENTRY)
new_time_entry['id'] = 3
mocks.time_api_get_time_entries_call([new_time_entry,
fixtures.API_TIME_ENTRY])
ticket_id = self.ticket_fixture['id']
synchronizer = self.sync_class()
# Simulate ticket getting updated by a callback
synchronizer.fetch_sync_by_id(ticket_id)
# Verify that no time entries are removed,
# and that only one entry is added
last_sync_job = \
models.SyncJob.objects.filter(entity_name='TimeEntry').last()
_task_patch.stop()
self.assertEqual(last_sync_job.deleted, 0)
self.assertEqual(last_sync_job.updated, 0)
self.assertEqual(last_sync_job.added, 1)
self.assertEqual(last_sync_job.sync_type, 'partial')
class TestServiceTicketSynchronizer(TestTicketSynchronizerMixin, TestCase):
sync_class = sync.ServiceTicketSynchronizer
ticket_fixture = fixtures.API_SERVICE_TICKET
def setUp(self):
super().setUp()
self._init_data()
fixture_utils.init_tickets()
def _assert_sync(self, instance, json_data):
self.assertEqual(instance.summary, json_data['summary'])
self.assertEqual(instance.closed_flag, json_data.get('closedFlag'))
self.assertEqual(instance.entered_date_utc,
parse(json_data.get('_info').get('dateEntered')))
self.assertEqual(instance.last_updated_utc,
parse(json_data.get('_info').get('lastUpdated')))
self.assertEqual(instance.required_date_utc,
parse(json_data.get('requiredDate')))
self.assertEqual(instance.resources, json_data.get('resources'))
self.assertEqual(instance.budget_hours, json_data.get('budgetHours'))
self.assertEqual(instance.actual_hours, json_data.get('actualHours'))
self.assertEqual(instance.record_type, json_data.get('recordType'))
self.assertEqual(instance.parent_ticket_id,
json_data.get('parentTicketId'))
self.assertEqual(instance.has_child_ticket,
json_data.get('hasChildTicket'))
self.assertEqual(instance.has_child_ticket,
json_data.get('hasChildTicket'))
# verify assigned team
self.assertEqual(instance.team_id, json_data['team']['id'])
# verify assigned board
self.assertEqual(instance.board_id, json_data['board']['id'])
# verify assigned company
self.assertEqual(instance.company_id, json_data['company']['id'])
# verify assigned priority
self.assertEqual(instance.priority_id, json_data['priority']['id'])
# verify assigned location
self.assertEqual(instance.location_id,
json_data['serviceLocation']['id'])
# verify assigned status
self.assertEqual(instance.status_id,
json_data['status']['id'])
# verify assigned type
self.assertEqual(instance.type_id, json_data['type']['id'])
# verify assigned type
self.assertEqual(instance.sub_type_id, json_data['subType']['id'])
# verify assigned type
self.assertEqual(instance.sub_type_item_id, json_data['item']['id'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.automatic_email_cc_flag,
json_data['automaticEmailCcFlag'])
self.assertEqual(instance.automatic_email_contact_flag,
json_data['automaticEmailContactFlag'])
self.assertEqual(instance.automatic_email_resource_flag,
json_data['automaticEmailResourceFlag'])
self.assertEqual(instance.automatic_email_cc,
json_data['automaticEmailCc'])
self.assertEqual(instance.agreement, json_data['agreement'])
def test_project_tickets_not_deleted_during_sync(self):
"""
Verify that during a sync of service tickets, no project tickets are
removed.
"""
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertTrue(
models.Ticket.objects.get(id=self.ticket_fixture['id']))
project_ticket = models.Ticket.objects.create(
summary='Project ticket',
record_type='ProjectTicket'
)
project_ticket.save()
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
# Verify that the service ticket has been removed and project ticket
# still exists.
self.assertEqual(
models.Ticket.objects.get(id=project_ticket.id), project_ticket)
self.assertFalse(
models.Ticket.objects.filter(
id=self.ticket_fixture['id']).exists())
def test_callback_sync_service_note(self):
# Sync initial service note
mocks.service_api_get_notes_call(fixtures.API_SERVICE_NOTE_LIST)
service_note_sync = sync.ServiceNoteSynchronizer()
service_note_sync.sync()
self.assertGreater(
models.SyncJob.objects.filter(entity_name='ServiceNote').count(), 0
)
# Mock the child class syncs
mocks.time_api_get_time_entries_call(fixtures.API_TIME_ENTRY_LIST)
mocks.sales_api_get_activities_call(fixtures.API_SALES_ACTIVITIES)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
method_name = 'djconnectwise.sync.TicketSynchronizerMixin.get_single'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Create new service note to sync
new_service_note = deepcopy(fixtures.API_SERVICE_NOTE_LIST[0])
new_service_note['id'] = self.ticket_fixture['id']
mocks.service_api_get_notes_call(
[new_service_note, fixtures.API_SERVICE_NOTE_LIST[0]]
)
ticket_id = self.ticket_fixture['id']
synchronizer = self.sync_class()
# Simulate ticket getting updated by a callback
synchronizer.fetch_sync_by_id(ticket_id)
# Verify that no notes are removed, and that only one note is added
last_sync_job = \
models.SyncJob.objects.filter(entity_name='ServiceNote').last()
_task_patch.stop()
self.assertEqual(last_sync_job.deleted, 0)
self.assertEqual(last_sync_job.updated, 0)
self.assertEqual(last_sync_job.added, 1)
self.assertEqual(last_sync_job.sync_type, 'partial')
class TestProjectTicketSynchronizer(TestTicketSynchronizerMixin, TestCase):
sync_class = sync.ProjectTicketSynchronizer
ticket_fixture = fixtures.API_PROJECT_TICKET
def setUp(self):
super().setUp()
mocks.project_api_tickets_call()
self._init_data()
fixture_utils.init_project_tickets()
def _assert_sync(self, instance, json_data):
self.assertEqual(instance.summary, json_data['summary'])
self.assertEqual(instance.closed_flag, json_data.get('closedFlag'))
self.assertEqual(instance.last_updated_utc,
parse(json_data.get('_info').get('lastUpdated')))
self.assertEqual(instance.required_date_utc,
parse(json_data.get('requiredDate')))
self.assertEqual(instance.resources, json_data.get('resources'))
self.assertEqual(instance.budget_hours, json_data.get('budgetHours'))
self.assertEqual(instance.actual_hours, json_data.get('actualHours'))
# verify assigned board
self.assertEqual(instance.board_id, json_data['board']['id'])
# verify assigned company
self.assertEqual(instance.company_id, json_data['company']['id'])
# verify assigned priority
self.assertEqual(instance.priority_id, json_data['priority']['id'])
# verify assigned location
self.assertEqual(instance.location_id,
json_data['serviceLocation']['id'])
# verify assigned project
self.assertEqual(instance.project_id,
json_data['project']['id'])
# verify assigned status
self.assertEqual(instance.status_id,
json_data['status']['id'])
# verify assigned type
self.assertEqual(instance.type_id, json_data['type']['id'])
# verify assigned subtype
self.assertEqual(instance.sub_type_id, json_data['subType']['id'])
# verify assigned subtype item
self.assertEqual(instance.sub_type_item_id, json_data['item']['id'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.automatic_email_cc_flag,
json_data['automaticEmailCcFlag'])
self.assertEqual(instance.automatic_email_contact_flag,
json_data['automaticEmailContactFlag'])
self.assertEqual(instance.automatic_email_resource_flag,
json_data['automaticEmailResourceFlag'])
self.assertEqual(instance.automatic_email_cc,
json_data['automaticEmailCc'])
self.assertEqual(instance.agreement, json_data['agreement'])
def test_service_tickets_not_deleted_during_sync(self):
"""
Verify that during a sync of project tickets, no service tickets are
removed.
"""
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertTrue(
models.Ticket.objects.get(id=self.ticket_fixture['id']))
service_ticket = models.Ticket.objects.create(
summary='Service ticket',
record_type='ServiceTicket'
)
service_ticket.save()
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
# Verify that the project ticket has been removed and service ticket
# still exists.
self.assertEqual(
models.Ticket.objects.get(id=service_ticket.id), service_ticket)
self.assertFalse(
models.Ticket.objects.filter(
id=self.ticket_fixture['id']).exists())
class TestActivityStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivityStatusSynchronizer
model_class = models.ActivityStatusTracker
fixture = fixtures.API_SALES_ACTIVITY_STATUSES
def call_api(self, return_data):
return mocks.sales_api_get_activities_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(
instance.spawn_followup_flag,
json_data.get('spawnFollowupFlag', False)
)
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
class TestActivityTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivityTypeSynchronizer
model_class = models.ActivityTypeTracker
fixture = fixtures.API_SALES_ACTIVITY_TYPES
def call_api(self, return_data):
return mocks.sales_api_get_activities_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.email_flag, json_data['emailFlag'])
self.assertEqual(instance.memo_flag, json_data['memoFlag'])
self.assertEqual(instance.history_flag, json_data['historyFlag'])
class TestActivitySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivitySynchronizer
model_class = models.ActivityTracker
fixture = fixtures.API_SALES_ACTIVITIES
def setUp(self):
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
fixture_utils.init_work_roles()
fixture_utils.init_members()
fixture_utils.init_tickets()
fixture_utils.init_territories()
fixture_utils.init_companies()
fixture_utils.init_opportunity_types()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunities()
fixture_utils.init_activity_statuses()
fixture_utils.init_activity_types()
fixture_utils.init_agreements()
fixture_utils.init_activities()
def call_api(self, return_data):
return mocks.sales_api_get_activities_call(return_data)
def _get_datetime(self, instance, date_field):
date_field = instance.get(date_field)
if date_field:
date_field = parse(date_field, default=parse('00:00Z'))
return date_field
def _assert_fields(self, activity, api_activity):
self.assertEqual(activity.name, api_activity['name'])
self.assertEqual(activity.notes, api_activity['notes'])
self.assertEqual(activity.date_start,
self._get_datetime(api_activity, 'dateStart')
)
self.assertEqual(activity.date_end,
self._get_datetime(api_activity, 'dateEnd')
)
self.assertEqual(activity.assign_to_id, api_activity['assignTo']['id'])
self.assertEqual(activity.opportunity_id,
api_activity['opportunity']['id'])
if api_activity['ticket'] is not None:
self.assertEqual(activity.ticket_id, api_activity['ticket']['id'])
self.assertEqual(
activity.status_id, api_activity['status']['id']
)
self.assertEqual(
activity.type_id, api_activity['type']['id']
)
self.assertEqual(
activity.company_id, api_activity['company']['id']
)
self.assertEqual(
activity.agreement_id, api_activity['agreement']['id']
)
def test_sync_null_member_activity(self):
null_member_activity = deepcopy(fixtures.API_SALES_ACTIVITY)
null_member_activity['id'] = 999
null_member_activity['assignTo'] = {'id': 99999} # Member that does
# not exist
activity_list = [null_member_activity]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = \
mocks.create_mock_call(method_name, activity_list)
synchronizer = sync.ActivitySynchronizer(full=True)
created_count, updated_count, skipped_count, deleted_count = \
synchronizer.sync()
# The existing Activity (#47) should be deleted since it is not
# returned from when the sync is run.
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, 0)
self.assertEqual(deleted_count, 1)
def test_sync_activity_null_assign_to(self):
"""
Verify that an activity with a null 'assignTo' field is skipped.
"""
null_assign_to_activity = deepcopy(fixtures.API_SALES_ACTIVITY)
null_assign_to_activity['id'] = 888
null_assign_to_activity['assignTo'] = None
activity_list = [null_assign_to_activity]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = \
mocks.create_mock_call(method_name, activity_list)
synchronizer = sync.ActivitySynchronizer(full=True)
created_count, updated_count, skipped_count, deleted_count = \
synchronizer.sync()
self.assertRaises(InvalidObjectException)
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, 0)
class TestSyncTicketTasks(TestCase):
def setUp(self):
self.ticket = models.Ticket()
self.ticket.save()
def tearDown(self):
self.ticket.delete()
def test_sync_tasks(self):
mocks.create_mock_call(
"djconnectwise.sync.ServiceTicketTaskSynchronizer.get",
[
{'closed_flag': False},
{'closed_flag': True},
{'closed_flag': False}
]
)
self.assertIsNone(self.ticket.tasks_total)
self.assertIsNone(self.ticket.tasks_completed)
synchronizer = sync.ServiceTicketTaskSynchronizer()
synchronizer.sync_tasks(self.ticket)
self.assertEqual(3, self.ticket.tasks_total)
self.assertEqual(1, self.ticket.tasks_completed)
class TestSyncSettings(TestCase):
def test_default_batch_size(self):
synchronizer = sync.BoardSynchronizer()
self.assertEqual(synchronizer.batch_size, 50)
def test_dynamic_batch_size(self):
method_name = 'djconnectwise.utils.DjconnectwiseSettings.get_settings'
request_settings = {
'batch_size': 10,
'timeout': 10.0,
}
_, _patch = mocks.create_mock_call(method_name, request_settings)
synchronizer = sync.BoardSynchronizer()
self.assertEqual(synchronizer.batch_size,
request_settings['batch_size'])
_patch.stop()
class MockSynchronizer:
error_message = 'One heck of an error'
model_class = models.TicketTracker
full = False
@log_sync_job
def sync(self):
return 1, 2, 3, 4
@log_sync_job
def sync_with_error(self):
raise ValueError(self.error_message)
class TestSyncJob(TestCase):
def setUp(self):
self.synchronizer = MockSynchronizer()
def assert_sync_job(self, created, updated, skipped, deleted, message,
| |
from os import path
import glob
from updater import get_updater
from filter import get_filter
from cache import get_cache
from merge import (FileHunk, MemoryHunk, apply_filters, merge,
make_url, merge_filters)
__all__ = ('Bundle', 'BundleError',)
class BundleError(Exception):
pass
class BuildError(BundleError):
pass
class Bundle(object):
"""A bundle is the unit django-assets uses to organize groups of media
files, which filters to apply and where to store them.
Bundles can be nested.
"""
def __init__(self, *contents, **options):
self.env = None
self.contents = contents
self.output = options.get('output')
self.filters = options.get('filters')
self.debug = options.get('debug')
self.extra_data = {}
def __repr__(self):
return "<Bundle output=%s, filters=%s, contents=%s>" % (
self.output,
self.filters,
self.contents,
)
def _get_filters(self):
return self._filters
def _set_filters(self, value):
"""Filters may be specified in a variety of different ways,
including by giving their name; we need to make sure we resolve
everything to an actual filter instance.
"""
if value is None:
self._filters = ()
return
if isinstance(value, basestring):
filters = map(unicode.strip, unicode(value).split(','))
elif isinstance(value, (list, tuple)):
filters = value
else:
filters = [value]
self._filters = [get_filter(f) for f in filters]
filters = property(_get_filters, _set_filters)
def _get_contents(self):
return self._contents
def _set_contents(self, value):
self._contents = value
self._resolved_contents = None
contents = property(_get_contents, _set_contents)
def resolve_contents(self, env):
"""Returns contents, with globbed patterns resolved to actual
filenames.
"""
# TODO: We cache the values, which in theory is problematic, since
# due to changes in the env object, the result of the globbing may
# change. Not to mention that a different env object may be passed
# in. We should find a fix for this.
if not getattr(self, '_resolved_contents', None):
l = []
for item in self.contents:
if isinstance(item, basestring):
# We only go through glob() if this actually is a
# pattern; this means that invalid filenames will
# remain in the content set, and only raise an error
# at a later point in time.
# TODO: This is possible a good place to check for
# a file's existance though; currently, when in debug
# mode, no error would be raised at all, and simply a
# broken url sent to the browser.
if glob.has_magic(item):
path = env.abspath(item)
for f in glob.glob(path):
l.append(f[len(path)-len(item):])
else:
l.append(item)
else:
l.append(item)
self._resolved_contents = l
return self._resolved_contents
def determine_action(self, env):
"""Decide what needs to be done when this bundle needs to be
resolved.
Specifically, whether to apply filters and whether to merge. This
depends on both the global settings, as well as the ``debug``
attribute of this bundle.
Returns a 2-tuple of (should_merge, should_filter). The latter
always implies the former.
"""
if not env.debug:
return True, True
debug = self.debug if self.debug is not None else env.debug
if debug == 'merge':
return True, False
elif debug is True:
return False, False
elif debug is False:
return True, True
else:
raise BundleError('Invalid debug value: %s' % debug)
def get_files(self, env=None):
"""Return a flattened list of all source files of this bundle,
and all the nested bundles.
"""
env = self._get_env(env)
files = []
for c in self.resolve_contents(env):
if isinstance(c, Bundle):
files.extend(c.get_files(env))
else:
files.append(env.abspath(c))
return files
@property
def is_container(self):
"""Return true if this is a container bundle, that is, a bundle
that acts only as a container for a number of sub-bundles.
It must not contain any files of it's own, and must have an
empty ``output`` attribute.
"""
has_files = any([c for c in self.contents if not isinstance(c, Bundle)])
return not has_files and not self.output
def _get_env(self, env):
# Note how bool(env) can be False, due to __len__.
env = env if env is not None else self.env
if env is None:
raise BundleError('Bundle is not connected to an environment')
return env
def _build(self, env, output_path, force, no_filters, parent_filters=[]):
"""Internal recursive build method.
"""
# TODO: We could support a nested bundle downgrading it's debug
# setting from "filters" to "merge only", i.e. enabling
# ``no_filters``. We cannot support downgrading to
# "full debug/no merge" (debug=True), of course.
#
# Right now we simply use the debug setting of the root bundle
# we build, und it overrides all the nested bundles. If we
# allow nested bundles to overwrite the debug value of parent
# bundles, as described above, then we should also deal with
# a child bundle enabling debug=True during a merge, i.e.
# raising an error rather than ignoring it as we do now.
resolved_contents = self.resolve_contents(env)
if not resolved_contents:
raise BuildError('empty bundle cannot be built')
# Ensure that the filters are ready
for filter in self.filters:
filter.set_environment(env)
# Apply input filters to all the contents. Note that we use
# both this bundle's filters as well as those given to us by
# the parent. We ONLY do those this for the input filters,
# because we need them to be applied before the apply our own
# output filters.
# TODO: Note that merge_filters() removes duplicates. Is this
# really the right thing to do, or does it just confuse things
# due to there now being different kinds of behavior...
combined_filters = merge_filters(self.filters, parent_filters)
cache = get_cache(env)
hunks = []
for c in resolved_contents:
if isinstance(c, Bundle):
hunk = c._build(env, output_path, force, no_filters,
combined_filters)
hunks.append(hunk)
else:
hunk = FileHunk(env.abspath(c))
if no_filters:
hunks.append(hunk)
else:
hunks.append(apply_filters(
hunk, combined_filters, 'input', cache,
output_path=output_path))
# Return all source hunks as one, with output filters applied
final = merge(hunks)
if no_filters:
return final
else:
return apply_filters(final, self.filters, 'output', cache)
def build(self, env=None, force=False, no_filters=False):
"""Build this bundle, meaning create the file given by the
``output`` attribute, applying the configured filters etc.
A ``FileHunk`` will be returned.
TODO: Support locking. When called from inside a template tag,
this should lock, so that multiple requests don't all start
to build. When called from the command line, there is no need
to lock.
"""
if not self.output:
raise BuildError('No output target found for %s' % self)
env = self._get_env(env)
# Determine if we really need to build, or if the output file
# already exists and nothing has changed.
if force:
update_needed = True
elif not path.exists(env.abspath(self.output)):
if not env.updater:
raise BuildError(('\'%s\' needs to be created, but '
'automatic building is disabled ('
'configure an updater)') % self)
else:
update_needed = True
else:
source_paths = [p for p in self.get_files(env)]
update_needed = get_updater(env.updater)(
env.abspath(self.output), source_paths)
if not update_needed:
# We can simply return the existing output file
return FileHunk(env.abspath(self.output))
hunk = self._build(env, self.output, force, no_filters)
hunk.save(env.abspath(self.output))
return hunk
def iterbuild(self, env=None):
"""Iterate over the bundles which actually need to be built.
This will often only entail ``self``, though for container
bundles (and container bundle hierarchies), a list of all the
non-container leafs will be yielded.
Essentally, what this does is "skip" bundles which do not need
to be built on their own (container bundles), and gives the
caller the child bundles instead.
"""
env = self._get_env(env)
if self.is_container:
for bundle in self.resolve_contents(env):
if bundle.is_container:
for t in bundle.iterbuild(env):
yield t
else:
yield bundle
else:
yield self
def _urls(self, env, *args, **kwargs):
env = self._get_env(env)
supposed_to_merge, do_filter = self.determine_action(env)
if supposed_to_merge and (self.filters or self.output):
# We need to build this bundle, unless a) the configuration
# tells us not to ("determine_action"), or b) this bundle
# isn't actually configured to be built, that is, has no
# filters and no output target.
hunk = self.build(env, no_filters=not do_filter, *args, **kwargs)
return [make_url(env, self.output)]
else:
# We either have no files (nothing to build), or we are
# in debug mode: Instead of building the bundle, we
# source all contents instead.
urls = []
for c in self.resolve_contents(env):
if isinstance(c, Bundle):
urls.extend(c.urls(env, *args, **kwargs))
else:
urls.append(make_url(env, c, expire=False))
return urls
def urls(self, env=None, *args, **kwargs):
"""Return a list of urls for this bundle.
Depending on the environment and given options, this may be a
single url (likely the case in production mode), or many urls
(when we source | |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
# MDAnalysis -- density analysis
# Copyright (c) 2007-2011 <NAME> <<EMAIL>>
# (based on code from Hop --- a framework to analyze solvation dynamics from MD simulations)
r"""Generating densities from trajectories --- :mod:`MDAnalysis.analysis.density`
=============================================================================
:Author: O<NAME>
:Year: 2011
:Copyright: GNU Public License v3
The module provides classes and functions to generate and represent
volumetric data, in particular densities.
Generating a density from a MD trajectory
-----------------------------------------
A common use case is to analyze the solvent density around a protein of
interest. The density is calculated with :func:`density_from_Universe` in the
fixed coordinate system of the simulation unit cell. It is therefore necessary
to orient and fix the protein with respect to the box coordinate system. In
practice this means centering and superimposing the protein, frame by frame, on
a reference structure and translating and rotating all other components of the
simulation with the protein. In this way, the solvent will appear in the
reference frame of the protein.
An input trajectory must
1. have been centered on the protein of interest;
2. have all molecules made whole that have been broken across periodic
boundaries [#pbc]_;
3. have the solvent molecules remapped so that they are closest to the
solute (this is important when using triclinic unit cells such as
a dodecahedron or a truncated octahedron) [#pbc]_.
4. have a fixed frame of reference; for instance, by superimposing a protein
on a reference structure so that one can study the solvent density around
it [#fit]_.
To generate the density of water molecules around a protein (assuming that the
trajectory is already appropriately treated for periodic boundary artifacts and
is suitably superimposed to provide a fixed reference frame) [#testraj]_ ::
from MDAnalysis.analysis.density import density_from_Universe
u = Universe(TPR, XTC)
D = density_from_Universe(u, delta=1.0, atomselection="name OW")
D.convert_density('TIP4P')
D.export("water.dx", type="double")
The positions of all water oxygens are histogrammed on a grid with spacing
*delta* = 1 Å. Initially the density is measured in :math:`\text{Å}^{-3}`. With
the :meth:`Density.convert_density` method, the units of measurement are
changed. In the example we are now measuring the density relative to the
literature value of the TIP4P water model at ambient conditions (see the values
in :data:`MDAnalysis.units.water` for details). Finally, the density is written
as an OpenDX_ compatible file that can be read in VMD_, Chimera_, or PyMOL_.
See :class:`Density` for details. In particular, the density is stored
as a NumPy array in :attr:`Density.grid`, which can be processed in
any manner.
Creating densities
------------------
The following functions take trajectory or coordinate data and generate a
:class:`Density` object.
.. autofunction:: density_from_Universe
.. autofunction:: density_from_PDB
.. autofunction:: Bfactor2RMSF
Supporting classes and functions
--------------------------------
The main output of the density creation functions is a
:class:`Density` instance, which is derived from a
:class:`gridData.core.Grid`. A :class:`Density` is essentially, a 3D
array with origin and lengths together with associated metadata (which
can be used in downstream processing).
.. autoclass:: Density
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: BfactorDensityCreator
:members:
.. autofunction:: notwithin_coordinates_factory
.. rubric:: Footnotes
.. [#pbc] Making molecules whole can be accomplished with the
:meth:`MDAnalysis.core.groups.AtomGroup.wrap` of
:attr:`Universe.atoms` (use ``compound="fragments"``).
When using, for instance, the Gromacs_ command `gmx trjconv`_
.. code-block:: bash
gmx trjconv -pbc mol -center -ur compact
one can make the molecules whole ``-pbc whole``, center it on a group
(``-center``), and also pack all molecules in a compact unitcell
representation, which can be useful for density generation.
.. [#fit] Superposition can be performed with
:class:`MDAnalysis.analysis.align.AlignTraj`.
The Gromacs_ command `gmx trjconv`_
.. code-block:: bash
gmx trjconv -fit rot+trans
will also accomplish such a superposition. Note that the fitting has
to be done in a *separate* step from the treatment of the periodic
boundaries [#pbc]_.
.. [#testraj] Note that the trajectory in the example (`XTC`) is *not* properly
made whole and fitted to a reference structure; these steps were
omitted to clearly show the steps necessary for the actual density
calculation.
.. Links
.. -----
.. _OpenDX: http://www.opendx.org/
.. _VMD: http://www.ks.uiuc.edu/Research/vmd/
.. _Chimera: https://www.cgl.ucsf.edu/chimera/
.. _PyMOL: http://www.pymol.org/
.. _Gromacs: http://www.gromacs.org
.. _`gmx trjconv`: http://manual.gromacs.org/programs/gmx-trjconv.html
"""
from __future__ import print_function, division, absolute_import
from six.moves import range, zip
from six import string_types
import numpy as np
import sys
import os
import os.path
import errno
import warnings
from gridData import Grid
import MDAnalysis
from MDAnalysis.core import groups
from MDAnalysis.lib.util import fixedwidth_bins, iterable, asiterable
from MDAnalysis.lib import NeighborSearch as NS
from MDAnalysis import NoDataError, MissingDataWarning
from .. import units
from ..lib import distances
from MDAnalysis.lib.log import ProgressMeter
import logging
logger = logging.getLogger("MDAnalysis.analysis.density")
class Density(Grid):
r"""Class representing a density on a regular cartesian grid.
Parameters
----------
grid : array_like
histogram or density, typically a :class:`numpy.ndarray`
edges : list
list of arrays, the lower and upper bin edges along the axes
parameters : dict
dictionary of class parameters; saved with
:meth:`Density.save`. The following keys are meaningful to
the class. Meaning of the values are listed:
*isDensity*
- ``False``: grid is a histogram with counts [default]
- ``True``: a density
Applying :meth:`Density.make_density`` sets it to ``True``.
units : dict
A dict with the keys
- *length*: physical unit of grid edges (Angstrom or nm) [Angstrom]
- *density*: unit of the density if ``isDensity=True`` or ``None``
otherwise; the default is "Angstrom^{-3}" for densities
(meaning :math:`\text{Å}^{-3}`).
(Actually, the default unit is the value of
``MDAnalysis.core.flags['length_unit']``; in most
cases this is "Angstrom".)
metadata : dict
a user defined dictionary of arbitrary values associated with the
density; the class does not touch :attr:`Density.metadata` but
stores it with :meth:`Density.save`
Attributes
----------
grid : array
counts or density
edges : list of 1d-arrays
The boundaries of each cell in `grid` along all axes (equivalent
to what :func:`numpy.histogramdd` returns).
delta : array
Cell size in each dimension.
origin : array
Coordinates of the *center* of the cell at index `grid[0, 0, 0, ...,
0]`, which is considered to be the front lower left corner.
units : dict
The units for lengths and density; change units with the method
:meth:`~Density.convert_length` or :meth:`~Density.convert_density`.
Notes
-----
The data (:attr:`Density.grid`) can be manipulated as a standard numpy
array. Changes can be saved to a file using the :meth:`Density.save` method. The
grid can be restored using the :meth:`Density.load` method or by supplying the
filename to the constructor.
The attribute :attr:`Density.metadata` holds a user-defined dictionary that
can be used to annotate the data. It is also saved with :meth:`Density.save`.
The :meth:`Density.export` method always exports a 3D object (written in
such a way to be readable in VMD_, Chimera_, and PyMOL_), the rest should
work for an array of any dimension. Note that PyMOL_ only understands DX
files with the DX data type "double" in the "array" object (see `known
issues when writing OpenDX files`_ and issue
`MDAnalysis/GridDataFormats#35`_ for details). Using the keyword
``type="double"`` for the method :meth:`Density.export`, the user can
ensure that the DX file is written in a format suitable for PyMOL_.
If the input histogram consists of counts per cell then the
:meth:`Density.make_density` method converts the grid to a physical density. For
a probability density, divide it by :meth:`Density.grid.sum` or use ``normed=True``
right away in :func:`~numpy.histogramdd`.
The user *should* set the *parameters* keyword (see docs for the
constructor); in particular, if the data are already a density, one must
set ``isDensity=True`` because there is no reliable way to detect if
data represent counts or a density. As a special convenience, if data are
read from a file and the user has not set ``isDensity`` then it is assumed
that the data are in fact a density.
.. _`MDAnalysis/GridDataFormats#35`:
https://github.com/MDAnalysis/GridDataFormats/issues/35
.. _`known issues when writing OpenDX files`:
https://www.mdanalysis.org/GridDataFormats/gridData/formats/OpenDX.html#known-issues-for-writing-opendx-files
See Also
--------
gridData.core.Grid : the base class of :class:`Density`.
Examples
--------
Typical use:
| |
"""
All the data sources are scattered around the D drive, this script
organizes it and consolidates it into the "Data" subfolder in the
"Chapter 2 Dune Aspect Ratio" folder.
<NAME>, 5/6/2020
"""
import shutil as sh
import pandas as pd
import numpy as np
import os
# Set the data directory to save files into
DATA_DIR = os.path.join('..', 'Data')
# Set the directory with most of the XBeach data
XB_DIR = os.path.join('..', '..', 'XBeach Modelling', 'Dune Complexity Experiments')
def bogue_lidar_data():
"""
Load all Bogue Banks morphometrics from 1997-2016
and return a dataframe of aspect ratios and natural
dune volumes
"""
# Set a list of years
years = [1997, 1998, 1999, 2000, 2004, 2005, 2010, 2011, 2014, 2016]
# Set an empty dataframe
morpho = pd.DataFrame()
# Loop through the years and load the data
for year in years:
# Set a path to the data and load
path = os.path.join('..', '..', 'Chapter 1 Sand Fences', 'Data', f'Morphometrics for Bogue {year}.csv')
temp = pd.read_csv(path, delimiter=',', header=0)
# Add a column for the year
temp['Year'] = year
# Append the data to the main dataframe
morpho = pd.concat([morpho, temp])
# Make a new dataframe with just aspect ratios and volumes
data = pd.DataFrame()
data['Year'] = morpho['Year']
data['Ratio'] = (morpho['y_crest'] - morpho['y_toe']) / (morpho['x_heel'] - morpho['x_toe'])
data['Volume'] = morpho['Natural Dune Volume']
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv')
data.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_profiles():
"""
Take all the initial profiles and place them
into a Dataframe to save as a .csv
Make a column for the experiment names, a column for
the X-grids, and columns for the profiles
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
profiles = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Set a path to the profiles
PROFILE_DIR = os.path.join(XB_DIR, f'{experiment} Half Surge')
# Load the x-grid
x_grid_fname = os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'x.grd')
x_grid = np.loadtxt(x_grid_fname)
# Load the dunes
dune_1 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 1 1', 'bed.dep'))
dune_2 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 20 1', 'bed.dep'))
dune_3 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 40 1', 'bed.dep'))
dune_4 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity 60 1', 'bed.dep'))
dune_5 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -20 1', 'bed.dep'))
dune_6 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -40 1', 'bed.dep'))
dune_7 = np.loadtxt(fname=os.path.join(PROFILE_DIR, 'Dune Complexity -60 1', 'bed.dep'))
# Put all of the stretched dunes into a dataframe
dune_dict = {
'Experiment': experiment.replace('Joined', 'Aligned'),
'X': x_grid,
'1 pct': dune_1,
'20 pct': dune_2,
'40 pct': dune_3,
'60 pct': dune_4,
'-20 pct': dune_5,
'-40 pct': dune_6,
'-60 pct': dune_7,
}
dune_data = pd.DataFrame(data=dune_dict)
# Concatenate the Dataframes
profiles = pd.concat([profiles, dune_data])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Profiles.csv')
profiles.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def initial_ratios():
"""
Make a .csv file with the initial dune aspect ratios and
dune volumes for the profiles used in the simulations
"""
# Set the experiment names. The initial profiles are the same regardless of
# the surge level so just take from the half surge simulations
experiments = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
# Set an empty dataframe
ratios = pd.DataFrame()
# Loop through the experiments
for experiment in experiments:
# Load the initial dune ratios
init_ratio_fname = os.path.join(XB_DIR, f'{experiment} Half Surge', 'Setup Data', 'Initial Dune Ratios.csv')
init_ratios = pd.read_csv(init_ratio_fname, delimiter=',', header=None, names=['Stretch', 'Ratio', 'Volume'])
# Add a column for the experiment name
init_ratios['Experiment'] = experiment.replace('Joined', 'Aligned')
# Concatenate the data
ratios = pd.concat([ratios, init_ratios])
# Save the Dataframe to the data folder
save_name = os.path.join(DATA_DIR, 'Initial Dune Ratios.csv')
ratios.to_csv(save_name, index=False)
print(f'File Saved: {save_name}')
def joaquin_and_florence():
"""
Load the storm surge time series' from
Tropical Storm Joaquin and Hurricane
Florence, put them in a .csv file
"""
# Loop through the storms
for storm in ['Joaquin', 'Florence']:
# Load the tide predictions and observations as a Pandas dataframe
filename = os.path.join(XB_DIR, 'Setup Data', f'{storm}.csv')
if storm == 'Joaquin':
parse_dates_cols = ['Date', 'Time']
data_columns = ['Time', 'Predicted', 'Observed']
else:
parse_dates_cols = ['Date', 'Time (GMT)']
data_columns = ['Time', 'Predicted', 'Preliminary', 'Observed']
data = pd.read_csv(filename, delimiter=',', parse_dates=[parse_dates_cols], header=0)
data.columns = data_columns
# Calculate the non-tidal residual
data['NTR'] = data['Observed'] - data['Predicted']
# Load the time data
times = data['Time'].tolist()
data['String Times'] = [t.strftime('%Y-%m-%d %H') for t in times]
# Save the DataFrame as a .csv
save_name = os.path.join(DATA_DIR, f'{storm}.csv')
data.to_csv(save_name, index=False)
def move_csv_output():
"""
Take the .csv files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Morphometrics', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, f'{run_name} Morphometrics.csv')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name} Morphometrics.csv')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def move_field_data():
"""
Move the field data morphometrics from 2017
and 2018 into the data folder
"""
# Set the years
years = [2017, 2018]
# Set a path to the field data
field_dir = os.path.join('..', '..', 'Bogue Banks Field Data')
# Loop through the years
for year in years:
# Identify the source file
source = os.path.join(field_dir, str(year), f'Morphometrics for Bogue Banks {year}.csv')
# Set the target
destination = os.path.join(DATA_DIR, f'Morphometrics for Bogue Banks {year}.csv')
# Copy the file
sh.copy(source, destination)
def move_netcdf_output():
"""
Take the netCDF files and move them into the "Data" folder,
then rename them from "xboutput.nc" to the name of the simulation
"""
# Set lists with the dune configurations, storm surge
# modifications, storm duration increases, and dune aspect
# ratio stretches
dunes = ['Toes Joined', 'Crests Joined', 'Heels Joined', 'Fenced']
surges = ['Half', 'Normal', 'One Half']
durations = [1, 12, 18, 24, 36, 48]
stretches = [-60, -40, -20, 1, 20, 40, 60]
# Loop through the dunes and surges
for dune in dunes:
for surge in surges:
# Set the experiment folder name
experiment_name = f'{dune} {surge} Surge'
experiment_folder = os.path.join(XB_DIR, experiment_name)
# Make a target folder to move the runs into
save_folder = os.path.join(DATA_DIR, 'XBeach Output', experiment_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# Loop through the dunes and durations within the experiment
for stretch in stretches:
for duration in durations:
# Set the simulation folder
run_name = f'Dune Complexity {stretch} {duration}'
simulation_folder = os.path.join(experiment_folder, run_name)
# Set the XBeach output file as the source. Set the destination
# name. Then copy the file over
source = os.path.join(simulation_folder, 'xboutput.nc')
if os.path.exists(source):
destination = os.path.join(save_folder, f'{run_name}.nc')
if not os.path.exists(destination):
sh.copy(source, destination)
print(f'File Successfully Copied: {destination}')
else:
print(f'File already exists: {destination}')
else:
print(f'FILE DOES NOT EXIST: {source}')
def surge_time_series():
"""
Put all the storm time series' into
a .csv file that can be loaded as a
DataFrame
"""
# Set a list of storm surge modifiers
# and storm duration increases
surges, surge_labels = [0.5, 1.0, 1.5], ['Half', 'Normal', 'One Half']
durations = [1, | |
<reponame>victoriarspada/woudc-data-registry
# =================================================================
#
# Terms and Conditions of Use
#
# Unless otherwise noted, computer program source code of this
# distribution is covered under Crown Copyright, Government of
# Canada, and is distributed under the MIT License.
#
# The Canada wordmark and related graphics associated with this
# distribution are protected under trademark law and copyright law.
# No permission is granted to use them outside the parameters of
# the Government of Canada's corporate identity program. For
# more information, see
# http://www.tbs-sct.gc.ca/fip-pcim/index-eng.asp
#
# Copyright title to all 3rd party software distributed with this
# software is held by the respective copyright holders as noted in
# those files. Users are asked to read the 3rd Party Licenses
# referenced with those assets.
#
# Copyright (c) 2021 Government of Canada
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# Compute and persist UV Index from WOUDC archive
import csv
from datetime import datetime
import logging
import os
from woudc_data_registry.models import UVIndex, Instrument
from woudc_data_registry import config, registry
from woudc_data_registry.parser import ExtendedCSV
from woudc_data_registry.util import read_file
from woudc_data_registry.epicentre.metadata import add_metadata
LOGGER = logging.getLogger(__name__)
def execute(path, formula_lookup, update, start_year, end_year, bypass):
"""
Orchestrate uv-index generation process
"""
datasets = [
'/'.join([path, 'Spectral_1.0_1']),
'/'.join([path, 'Broad-band_1.0_1']),
'/'.join([path, 'Spectral_2.0_1']),
'/'.join([path, 'Broad-band_2.0_1']),
]
registry_ = registry.Registry()
if not update:
LOGGER.info('erasing current uv index')
registry_.session.query(UVIndex).delete()
registry_.save()
# traverse directory of files
for dataset in datasets:
for dirname, dirnames, filenames in os.walk(dataset):
# only ingest years within range for update command
if update:
split_dir = dirname.split('/')
# determine if file year is valid for ingest
valid_year = True
for base in split_dir:
if base.isnumeric():
int_dir = int(base)
if end_year and int(end_year) < int_dir:
valid_year = False
break
elif start_year and int(start_year) > int_dir:
valid_year = False
break
if not valid_year:
continue
for filename in filenames:
ipath = os.path.join(dirname, filename)
contents = read_file(ipath)
LOGGER.debug('Parsing extcsv {}'.format(ipath))
try:
extcsv = ExtendedCSV(contents)
except Exception as err:
msg = 'Unable to parse extcsv {}: {}'.format(ipath, err)
LOGGER.error(msg)
continue
# get common fields
try:
dataset = extcsv.extcsv['CONTENT']['Category'][0]
level = extcsv.extcsv['CONTENT']['Level'][0]
form = extcsv.extcsv['CONTENT']['Form'][0]
project_id = extcsv.extcsv['CONTENT']['Class'][0]
station_type = extcsv.extcsv['PLATFORM']['Type'][0]
station_id = extcsv.extcsv['PLATFORM']['ID'][0]
gaw_id = extcsv.extcsv['PLATFORM']['GAW_ID'][0]
country = extcsv.extcsv['PLATFORM']['Country'][0]
agency = extcsv.extcsv['DATA_GENERATION']['Agency'][0]
instrument_name = extcsv.extcsv['INSTRUMENT']['Name'][0]
instrument_model = extcsv.extcsv['INSTRUMENT']['Model'][0]
instrument_number = \
extcsv.extcsv['INSTRUMENT']['Number'][0]
instrument_latitude = \
extcsv.extcsv['LOCATION']['Latitude'][0]
instrument_longitude = \
extcsv.extcsv['LOCATION']['Longitude'][0]
instrument_height = extcsv.extcsv['LOCATION']['Height'][0]
timestamp_date = extcsv.extcsv['TIMESTAMP']['Date'][0]
except Exception as err:
msg = 'Unable to get data from extcsv {}: {}'.format(
ipath, err)
LOGGER.error(msg)
continue
if len(station_id) == 2:
station_id = station_id.zfill(3)
station = '{}{}'.format(station_type.upper(), station_id)
if station in formula_lookup.keys():
if dataset.lower() == 'spectral':
# find max set of table groupings
summary_table = 'GLOBAL_SUMMARY_NSF' \
if 'GLOBAL_SUMMARY_NSF' in extcsv.extcsv \
else 'GLOBAL_SUMMARY'
timestamp_count = extcsv.table_count('TIMESTAMP')
global_count = extcsv.table_count('GLOBAL')
summary_count = extcsv.table_count(summary_table)
try:
max_index = max(timestamp_count, global_count,
summary_count)
except ValueError:
max_index = 1
try:
uv_packages = compute_uv_index(ipath, extcsv,
dataset, station,
instrument_name,
country,
formula_lookup,
max_index)
except Exception as err:
msg = 'Unable to compute UV for file {}: {}'.format( # noqa
ipath, err)
LOGGER.error(msg)
continue
elif dataset.lower() == 'broad-band':
try:
uv_packages = compute_uv_index(ipath, extcsv,
dataset, station,
instrument_name,
country,
formula_lookup)
except Exception as err:
msg = 'Unable to compute UV for file {}: {}'.format( # noqa
ipath, err)
LOGGER.error(msg)
continue
else:
msg = 'Unsupported dataset {}. Skipping.'.format(
dataset)
LOGGER.error(msg)
# form ids for data insert
contributor_id = ':'.join([agency, project_id])
deployment_id = ':'.join([station_id, contributor_id])
instrument_id = ':'.join([instrument_name,
instrument_model,
instrument_number, dataset,
deployment_id])
# check if instrument is in registry
exists = registry_.query_by_field(Instrument,
'instrument_id',
instrument_id)
if not exists:
# instrument not found. add it to registry
if bypass:
LOGGER.info('Skipping instrument addition check')
allow_add_instrument = True
else:
response = \
input('Instrument {} not found. '
'Add? (y/n) [n]: '
.format(instrument_id))
allow_add_instrument = \
response.lower() in ['y', 'yes']
if allow_add_instrument:
instrument_ = {
'station_id': station_id,
'dataset_id': dataset,
'contributor': agency,
'project': project_id,
'name': instrument_name,
'model': instrument_model,
'serial': instrument_number,
'start_date': datetime.now(),
'x': instrument_longitude,
'y': instrument_latitude,
'z': instrument_height,
}
add_metadata(Instrument, instrument_,
True, False)
# compute max daily uv index value
uv_max = None
for package in uv_packages:
if uv_max:
uv_max = max(package['uv'], uv_max)
else:
uv_max = package['uv']
# insert and save uv index model objects
for package in uv_packages:
ins_data = {
'file_path': ipath,
'filename': filename,
'dataset_id': dataset,
'dataset_level': level,
'dataset_form': form,
'station_id': station_id,
'station_type': station_type,
'country_id': country,
'instrument_id': instrument_id,
'instrument_name': instrument_name,
'gaw_id': gaw_id,
'solar_zenith_angle': package['zen_angle'],
'timestamp_date': timestamp_date,
'observation_date': package['date'],
'observation_time': package['time'],
'observation_utcoffset': package['utcoffset'],
'uv_index': package['uv'],
'uv_daily_max': uv_max,
'uv_index_qa': package['qa'],
'x': instrument_longitude,
'y': instrument_latitude,
'z': instrument_height,
}
uv_object = UVIndex(ins_data)
registry_.save(uv_object)
LOGGER.debug('Done get_data().')
def compute_uv_index(ipath, extcsv, dataset, station,
instrument_name, country,
formula_lookup, max_index=1):
"""
Compute UV index
"""
LOGGER.debug('Executing compute_uv_index()...')
uv_packages = []
if all([
dataset.lower() == 'spectral',
any([
'biospherical' in instrument_name.lower(),
'brewer' in instrument_name.lower()
])
]):
# get formula
try:
formula = (formula_lookup[station]
[instrument_name.lower()]
['GLOBAL_SUMMARY'])
except KeyError:
formula = (formula_lookup[station]
[instrument_name.lower()]
['GLOBAL_SUMMARY_NSF'])
# Some spectral files are missing TIMESTAMP per each payload
# Get and store first Date
common_date = None
for index in range(1, max_index + 1):
package = {
'uv': None,
'date': None,
'time': None,
'utcoffset': None,
'zen_angle': None,
'qa': None
}
if index == 1:
timestamp_t = 'TIMESTAMP'
global_summary_t = 'GLOBAL_SUMMARY'
global_summary_nsf_t = 'GLOBAL_SUMMARY_NSF'
else:
timestamp_t = '_'.join(['TIMESTAMP', str(index)])
global_summary_t = '_'.join(['GLOBAL_SUMMARY', str(index)])
global_summary_nsf_t = '_'.join(['GLOBAL_SUMMARY_NSF',
str(index)])
# common spectral fields
try:
date = extcsv.extcsv[timestamp_t]['Date'][0]
if index == 1:
common_date = date
if any([
date is None,
date == ''
]):
# for stations without TIMESTAMP per GLOBAL_SUMMARY
date = common_date
time = extcsv.extcsv[timestamp_t]['Time'][0]
if any([
time is None,
time == ''
]):
# for stations without TIMESTAMP per GLOBAL_SUMMARY
time = extcsv.extcsv[global_summary_t]['Time'][0]
utcoffset = extcsv.extcsv[timestamp_t]['UTCOffset'][0]
except Exception as err:
msg = 'Unable to get value from file {}: {}'.format(
ipath, err)
LOGGER.error(msg)
pass
if instrument_name.lower() == 'biospherical': # available in file
try:
uv = extcsv.extcsv[global_summary_nsf_t]['UVIndex'][0]
try:
uv = float(uv)
except ValueError as err:
msg = ('Unable to make UVIndex: {} value into a float.'
' Time: {}, file: {}: {}'.format(uv, time,
ipath, err))
LOGGER.error(msg)
pass
except Exception as err:
msg = ('Unable to get {}.UVIndex'
' from file: {}. Time: {}: {}'.format(
global_summary_nsf_t, ipath, time, err))
LOGGER.error(msg)
pass
try:
zen_angle = extcsv.extcsv[global_summary_nsf_t]['SZA'][0]
except Exception as err:
msg = ('Unable to get {}.SZA from file {}: {}'.format(
global_summary_nsf_t, ipath, err))
LOGGER.error(msg)
pass
if instrument_name.lower() == 'brewer':
try:
intcie = extcsv.extcsv[global_summary_t]['IntCIE'][0]
# convert sci not to float
try:
intcie_f = float(intcie)
except Exception as err:
msg = ('Unable to convert to float intcie:'
' {}. File: {}. Time: {}: {}'.format(
intcie, ipath, time, err))
LOGGER.error(msg)
continue
# compute
if '*' in formula:
uv = intcie_f * 25
elif '/' in formula:
uv = intcie_f / 40
else:
msg = 'Unknown formula: {}'.format(formula)
LOGGER.error(msg)
continue
try:
zen_angle = \
extcsv.extcsv[global_summary_t]['ZenAngle'][0]
except Exception as err:
msg = ('Unable to get {}.ZenAngle from file: {}'
'Time: {}: {}'.format(
global_summary_t, ipath, time, err))
LOGGER.error(msg)
pass
except Exception as err:
msg = ('Unable to get {}.IntCIE from file: {}. Time: {}.'
': {}'.format(global_summary_t, ipath, time, err))
LOGGER.error(msg)
continue
qa_result = qa(country, uv)
package['uv'] = uv
package['date'] = date
package['time'] = time
package['utcoffset'] = utcoffset
package['zen_angle'] = zen_angle
package['qa'] = qa_result
uv_packages.append(package)
if | |
example. So, even though `_Subterm` (the analogue of one of my
# tuples of CategoricalCodings) is set based, it's possible that this is
# used to recover the original order.
# TODO: Would it be clearer or more efficient to use OrderedSet rather
# than tuple here? I'm not sure.
def absorb(t1, t2):
assert type(t1) == tuple
assert all(type(p) == CategoricalCoding for p in t1)
assert type(t2) == tuple
assert all(type(p) == CategoricalCoding for p in t2)
s1 = set(t1)
s2 = set(t2)
if s2.issubset(s1) and len(s1) - len(s2) == 1:
diff = s1.difference(s2)
assert len(diff) == 1
extra_factor = list(diff)[0]
if extra_factor.reduced:
factor = CategoricalCoding(extra_factor.factor, False)
return tuple((factor if f == extra_factor else f) for f in t1)
def simplify_one(termcoding):
assert type(termcoding) == list
assert all(type(t) == tuple and all(type(p) == CategoricalCoding for p in t) for t in termcoding)
for i, j in itertools.permutations(range(len(termcoding)), 2):
newterm = absorb(termcoding[i], termcoding[j])
if newterm:
out = termcoding[:]
out[i] = newterm # Replace with absorbing interaction.
del out[j] # Remove absorbed interaction.
return out
def simplify(termcoding):
assert type(termcoding) == list
assert all(type(t) == tuple and all(type(p) == CategoricalCoding for p in t) for t in termcoding)
while True:
maybe_termcoding = simplify_one(termcoding)
if maybe_termcoding is None:
return termcoding # We're done.
termcoding = maybe_termcoding
# all_previous([['a'], ['b','c'], ['d']])
# == [{}, {'a'}, {'a','b','c'}]
def all_previous(xss):
if len(xss) == 0:
return []
else:
return [set()] + [set(xss[0]).union(xs) for xs in all_previous(xss[1:])]
# This is an attempt to implement the algorithm described here:
# https://patsy.readthedocs.io/en/latest/formulas.html#technical-details
def code_categorical_terms(terms):
# It is assumed that each element of `terms` describes an
# interaction between zero or more categorical factors.
decomposed = [decompose(t) for t in terms]
non_redundant = [[t for t in term if t not in previous]
for term, previous in zip(decomposed, all_previous(decomposed))]
return [simplify(t) for t in non_redundant]
def partition(pred, iterable):
t1, t2 = itertools.tee(iterable)
return list(itertools.filterfalse(pred, t1)), list(filter(pred, t2))
CategoricalCoding = namedtuple('CategoricalCoding', 'factor reduced')
CategoricalCoding.__repr__ = lambda self: '{}{}'.format(self.factor, '-' if self.reduced else '+')
NumericCoding = namedtuple('NumericCoding', ['factor'])
NumericCoding.__repr__ = lambda self: self.factor
# Codes a group of terms that all share a common set of numeric factors.
# Returns a list of coded interactions.
def code_group_of_terms(terms, shared_numeric_factors):
assert type(terms) == list
assert all(type(term) == Term for term in terms)
assert type(shared_numeric_factors) == OrderedSet
# It's also the case that each term should contain no numeric
# factors not mentions in `shared_numeric_factors`, but that is
# not checked here.
assert all(all((factor in term.factors) for factor in shared_numeric_factors) for term in terms)
def drop_numeric_factors(term):
factors = [f for f in term.factors if f not in shared_numeric_factors]
return Term(OrderedSet(*factors))
categorical_terms = [drop_numeric_factors(term) for term in terms]
codings_for_terms = code_categorical_terms(categorical_terms)
num_codings_dict = {f: NumericCoding(f) for f in shared_numeric_factors}
# This adds codings for the shared numeric factors to the coding
# of a categorical interaction, respecting the factor order in the
# source term.
#
# e.g. term = Term(<a,x,b>)
# coding = (b+,)
# Returns:
# (x,b+)
# (Assuming shared numeric factors is ['x'].)
#
def extend_with_numeric_factors(term, coding):
cat_codings_dict = {c.factor: c for c in coding}
# This gives us a dictionary that maps from factor names
# (factors in coding U shared numeric factors) to codings
# (e.g. CategoricalCoding, NumericCoding).
codings_dict = dict(cat_codings_dict, **num_codings_dict)
# We then grab all of these codings following the factor order
# in the term. (Note that some factors in the term may not
# appear in the coding.)
out = [codings_dict[f] for f in term.factors if f in codings_dict]
assert len(out) == len(codings_dict)
return out
assert len(terms) == len(codings_for_terms) # complain if zip will drop things
return join([[extend_with_numeric_factors(term, coding) for coding in codings]
for (term, codings) in zip(terms, codings_for_terms)])
# [('a', 100), ('b', 200), ('a', 300)] =>
# {'a': [100, 300], 'b': [200]}
def group(pairs):
assert type(pairs) == list
assert all(type(pair) == tuple and len(pair) == 2 for pair in pairs)
# Remember insertion order. i.e. The returned dictionary captures
# the order in which the groups were first encountered in the
# input list.
out = OrderedDict()
for (k, v) in pairs:
if k not in out:
out[k] = []
out[k].append(v)
return out
# Partition terms by the numeric factors they contain, and sort the
# resulting groups.
def partition_terms(terms, metadata):
assert type(terms) == OrderedSet
assert type(metadata) == Metadata
def numeric_factors(term):
factors = [f for f in term.factors if is_numeric_col(metadata.column(f))]
return OrderedSet(*factors)
# The idea here is to store the full term (including the numeric
# factors) as a way of remembering the order in which the numeric
# and numeric factors originally appeared. I think Patsy does
# something like this.
groups = group([(numeric_factors(term), term) for term in terms])
# Sort the groups. First comes the group containing no numeric
# factors. The remaining groups appear in the order in which a
# term containing exactly those numeric factors associated with
# the group first appears in `terms`. (The latter is guaranteed by
# the fact that `group` is order aware.
empty_set = OrderedSet()
first, rest = partition(lambda kv: kv[0] != empty_set, groups.items())
return first + rest
def code_lengths(contrasts):
assert type(contrasts) == dict
return {k: mat.shape[1] for k, mat in contrasts.items()}
# Build a simple design matrix (as a torch tensor) from columns of a
# pandas data frame.
# TODO: This could be generalised to work with a wider range of data
# representation. Rather than expecting a pandas dataframe, it could
# take a dictionary that gives access to the columns (iterables full
# of floats, ints, or level values?) and the existing dataframe
# metadata structure to describe the types of the columns, etc.
def designmatrix(terms, df, metadata, contrasts):
assert type(terms) == OrderedSet
coded_interactions = code_terms(terms, metadata)
product_cols = join(coded_interaction_to_product_cols(code, metadata, code_lengths(contrasts))
for code in coded_interactions)
N = len(df)
arrs = [execute_product_col(pcol, df, metadata, contrasts) for pcol in product_cols]
X = np.stack(arrs, axis=1) if arrs else np.empty((N, 0))
assert X.shape[0] == N
if X.shape[0] > 0 and X.shape[1] > 0 and np.linalg.matrix_rank(X) != X.shape[1]:
print('WARNING: Design matrix may not be full rank.')
return X
# Take an ordered set of terms (e.g. from a formula) to a list of
# coded interctions.
# e.g. code_terms(parse('y ~ 1 + a:b').terms, metadata) => .?.
# Terms with in a group are ordered by their order, i.e. the number of
# factors they contain.
def sort_by_order(terms):
return sorted(terms, key=lambda term: len(term.factors))
def code_terms(terms, metadata):
assert type(metadata) == Metadata
groups = partition_terms(terms, metadata)
return join(code_group_of_terms(sort_by_order(terms), shared_num_factors)
for shared_num_factors, terms in groups)
# TODO: Arg. checks for these named tuples?
IndicatorCol = namedtuple('IndicatorCol', ['factor', 'level'])
IndicatorCol.__repr__ = lambda self: 'I[{}={}]'.format(self.factor, self.level)
CustomCol = namedtuple('CustomCol', ['factor', 'index'])
CustomCol.__repr__ = lambda self: 'Custom({})[{}]'.format(self.factor, self.index)
NumericCol = namedtuple('NumericCol', ['factor'])
NumericCol.__repr__ = lambda self: 'Num({})'.format(self.factor)
# Represents the product of zero of more columns.
ProductCol = namedtuple('ProductCol', ['cols']) # `cols` is expected to be a list
def coded_interaction_to_product_cols(coded_interaction, metadata, code_lengths):
assert type(coded_interaction) == list
assert type(metadata) == Metadata
assert type(code_lengths) == dict
assert all(type(c) in [CategoricalCoding, NumericCoding] for c in coded_interaction)
cs, ns = partition(lambda cf: type(cf) == NumericCoding, coded_interaction)
def go(c):
# Patsy and R seem to differ in what they do here. This
# implementation is similar to Patsy. In contrast, in R the
# custom coding is only used when `c.reduced == True`.
if c.factor in code_lengths:
# Custom coding.
code_length = code_lengths[c.factor]
assert type(code_length) == int
return [CustomCol(c.factor, i) for i in range(code_length)]
else:
# Default coding.
all_levels = metadata.column(c.factor).levels
levels = all_levels[1:] if c.reduced else all_levels
return [IndicatorCol(c.factor, level) for level in levels]
interactions = product([go(c) for c in cs])
ncols_dict = {n.factor: NumericCol(n.factor) for n in ns}
def extend_with_numeric_cols(ccols):
ccols_dict = {ccol.factor: ccol for ccol in ccols}
cols_dict = dict(ccols_dict, **ncols_dict)
# Make a list of both the indicator and numeric columns,
# ordered by the factor order in the coded interaction given
# as input.
out = [cols_dict[ci.factor] for ci in coded_interaction]
assert len(out) == len(coded_interaction)
return out
return [ProductCol(extend_with_numeric_cols(ccols)) for ccols in interactions]
def product_col_to_coef_name(product_col):
assert type(product_col) == ProductCol
# TODO: I do similar dispatching elsewhere. It would be more
| |
[ basestring, 'None' ], False ],
'max_transfer_rate': [ max_transfer_rate, 'max-transfer-rate', [ int, 'None' ], False ],
'clean_up_failure': [ clean_up_failure, 'clean-up-failure', [ bool, 'None' ], False ],
'destination_cluster': [ destination_cluster, 'destination-cluster', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def snapmirror_initialize_ls_set(self, source_cluster=None, source_vserver=None, source_location=None, source_volume=None):
"""
The snapmirror-initialize-ls-set API performs the initial manual
update of a set of load-sharing mirrors. This API is usually used
after the snapmirror-create API is used to create a SnapMirror
relationship for each of the destination volumes in the set of
load-sharing mirrors.
You must specify the source endpoint when using
snapmirror-initialize-ls-set.
Data and Snapshot copies are transferred from the source volume
to all up-to-date destination volumes in the set of load-sharing
mirrors.
Use the snapmirror-initialize API to add and initialize a new
destination volume to an existing set of load-sharing mirrors.
A job will be spawned to operate on the snapmirror and the job id
will be returned.
The progress of the job can be tracked using the job APIs.
:param source_cluster: Specifies the source cluster of the SnapMirror relationship. The
source Vserver and source volume must also be specified if using
this parameter. This parameter is supported only in cluster
context.
:param source_vserver: Specifies the source Vserver of the SnapMirror relationship. The
source cluster and source volume must also be specified if using
this parameter.
:param source_location: Specifies the source endpoint of the SnapMirror relationship.
When specifying a source endpoint, you must use either the source
location, or the source cluster, source Vserver, and source
volume.
:param source_volume: Specifies the source volume of the SnapMirror relationship. The
source cluster and source Vserver must also be specified if using
this parameter. This parameter may be optional if executed
outside cluster context.
"""
return self.request( "snapmirror-initialize-ls-set", {
'source_cluster': [ source_cluster, 'source-cluster', [ basestring, 'None' ], False ],
'source_vserver': [ source_vserver, 'source-vserver', [ basestring, 'None' ], False ],
'source_location': [ source_location, 'source-location', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def snapmirror_get_total_records(self):
"""
Obtain the total number of SnapMirror relationships. This is a
point in time estimate and may be different on subsequent calls.
"""
return self.request( "snapmirror-get-total-records", {
}, {
'count': [ int, False ],
} )
def snapmirror_quiesce_iter(self, query, max_failure_count=None, max_records=None, return_success_list=None, tag=None, continue_on_failure=None, return_failure_list=None):
"""
Disables future transfers for one or more SnapMirror
relationships.
:param query: If operating on a specific snapmirror, this input element must
specify all keys.
If operating on snapmirror objects based on query, this input
element must specify a query.
:param max_failure_count: When allowing failures ('continue-on-failure' is set to true),
then this input element may be provided to limit the number of
failed operations before the server gives up and returns.
If set, the API will continue with the next matching snapmirror
even when the operation on a previous matching snapmirror fails,
and do so until the total number of objects failed to be operated
on reaches the maximum specified.
If set to the maximum or not provided, then there will be no
limit on the number of failed operations.
Only applicable if 'continue-on-failure' is set to true.
Default: 2^32-1
:param max_records: The maximum number of snapmirror objects to be operated in this
call.
Default: 20
:param return_success_list: If set to true, the API will return the list of snapmirror
objects (just keys) that were successfully operated on.
If set to false, the list of snapmirror objects operated on will
not be returned.
Default: true
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the next-tag obtained from the previous
call.
:param continue_on_failure: This input element is useful when multiple snapmirror objects
match a given query.
If set to true, the API will continue with the next matching
snapmirror even when the operation fails for the snapmirror.
If set to false, the API will return on the first failure.
Default: false
:param return_failure_list: If set to true, the API will return the list of snapmirror
objects (just keys) that were not operated on due to some error.
If set to false, the list of snapmirror objects not operated on
will not be returned.
Default: true
"""
return self.request( "snapmirror-quiesce-iter", {
'max_failure_count': [ max_failure_count, 'max-failure-count', [ int, 'None' ], False ],
'max_records': max_records,
'return_success_list': [ return_success_list, 'return-success-list', [ bool, 'None' ], False ],
'tag': tag,
'continue_on_failure': [ continue_on_failure, 'continue-on-failure', [ bool, 'None' ], False ],
'return_failure_list': [ return_failure_list, 'return-failure-list', [ bool, 'None' ], False ],
'query': [ query, 'query', [ SnapmirrorInfo, 'None' ], False ],
}, {
'num-succeeded': [ int, False ],
'num-failed': [ int, False ],
'success-list': [ SnapmirrorQuiesceIterInfo, True ],
'failure-list': [ SnapmirrorQuiesceIterInfo, True ],
} )
def snapmirror_snapshot_owner_get_snapshots(self, vserver, volume):
"""
List all Snapshot copies that are preserved for a SnapMirror
mirror-to-vault cascade configuration.
:param vserver: Vserver Name
:param volume: Volume Name
"""
return self.request( "snapmirror-snapshot-owner-get-snapshots", {
'vserver': [ vserver, 'vserver', [ basestring, 'vserver-name' ], False ],
'volume': [ volume, 'volume', [ basestring, 'volume-name' ], False ],
}, {
'snapshots': [ basestring, True ],
} )
def snapmirror_break_iter(self, query, max_failure_count=None, max_records=None, return_success_list=None, tag=None, continue_on_failure=None, return_failure_list=None):
"""
The snapmirror-break-iter API breaks one or more SnapMirror
relationships.
:param query: If operating on a specific snapmirror, this input element must
specify all keys.
If operating on snapmirror objects based on query, this input
element must specify a query.
:param max_failure_count: When allowing failures ('continue-on-failure' is set to true),
then this input element may be provided to limit the number of
failed operations before the server gives up and returns.
If set, the API will continue with the next matching snapmirror
even when the operation on a previous matching snapmirror fails,
and do so until the total number of objects failed to be operated
on reaches the maximum specified.
If set to the maximum or not provided, then there will be no
limit on the number of failed operations.
Only applicable if 'continue-on-failure' is set to true.
Default: 2^32-1
:param max_records: The maximum number of snapmirror objects to be operated in this
call.
Default: 20
:param return_success_list: If set to true, the API will return the list of snapmirror
objects (just keys) that were successfully operated on.
If set to false, the list of snapmirror objects operated on will
not be returned.
Default: true
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the next-tag obtained from the previous
call.
:param continue_on_failure: This input element is useful when multiple snapmirror objects
match a given query.
If set to true, the API will continue with the next matching
snapmirror even when the operation fails for the snapmirror.
If set to false, the API will return on the first failure.
Default: false
:param return_failure_list: If set to true, the API will return the list of snapmirror
objects (just keys) that were not operated on due to some error.
If set to false, the list of snapmirror objects not operated on
will not be returned.
Default: true
"""
return self.request( "snapmirror-break-iter", {
'max_failure_count': [ max_failure_count, 'max-failure-count', [ int, 'None' ], False ],
'max_records': max_records,
'return_success_list': [ return_success_list, 'return-success-list', [ bool, 'None' ], False ],
'tag': tag,
'continue_on_failure': [ continue_on_failure, 'continue-on-failure', [ bool, 'None' ], False ],
| |
and os is None):
return 0, 1
elif (ss is None and os is not None):
return 1, 0
s0, s1, o0, o1 = s[0], s[1], o[0], o[1]
s1d, o1d = s1.isdigit(), o1.isdigit()
if 'H' == s0 == o0:
if (s1 == o1) or (s1d and o1d):
return s, o
elif s1d:
return 0, 1
elif o1d:
return 1, 0
else:
return (self._greek_sort_keys[s1],
self._greek_sort_keys[o1])
return s, o # raise exception?
return 1, 1
def __eq__(self, other):
"""Test for equality."""
if isinstance(other, type(self)):
return self.akl == other.akl
else:
return NotImplemented
def __ne__(self, other):
"""Test for inequality."""
if isinstance(other, type(self)):
return self.akl != other.akl
else:
return NotImplemented
def __gt__(self, other):
"""Test greater than."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] > rslt[1]
else:
return NotImplemented
def __ge__(self, other):
"""Test greater or equal."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] >= rslt[1]
else:
return NotImplemented
def __lt__(self, other):
"""Test less than."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] < rslt[1]
else:
return NotImplemented
def __le__(self, other):
"""Test less or equal."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] <= rslt[1]
else:
return NotImplemented
class Edron(object):
"""Base class for Hedron and Dihedron classes.
Supports rich comparison based on lists of AtomKeys.
Attributes
----------
aks : tuple
3 (hedron) or 4 (dihedron) AtomKeys defining this di/hedron
id : str
':'-joined string of AtomKeys for this di/hedron
atoms_updated : bool
indicates hedron local atom_coords reflect current di/hedron angle and
length values in hedron local coordinate space
dh_class : str
sequence of atoms (no position or residue) comprising di/hedron
for statistics
rdh_class : str
sequence of residue, atoms comprising di/hedron for statistics
edron_re : compiled regex (Class Attribute)
A compiled regular expression matching string IDs for Hedron
and Dihedron objects
Methods
-------
gen_key([AtomKey, ...] or AtomKey, ...) (Static Method)
generate a ':'-joined string of AtomKey Ids
gen_acs(atom_coords)
generate tuple of atom coords for keys in self.aks
is_backbone()
Return True if all aks atoms are N, Ca, C or O
"""
# regular expresion to capture hedron and dihedron specifications, as in
# .pic files
edron_re = re.compile(
# pdbid and chain id
r'^(?P<pdbid>\w+)?\s(?P<chn>[\w|\s])?\s'
# 3 atom specifiers for hedron
r'(?P<a1>[\w\-\.]+):(?P<a2>[\w\-\.]+):(?P<a3>[\w\-\.]+)'
# 4th atom speicfier for dihedron
r'(:(?P<a4>[\w\-\.]+))?'
r'\s+'
# len-angle-len for hedron
r'(((?P<len1>\S+)\s+(?P<angle2>\S+)\s+(?P<len3>\S+)\s*$)|'
# dihedral angle for dihedron
r'((?P<dihedral1>\S+)\s*$))')
@staticmethod
def gen_key(lst):
"""Generate string of ':'-joined AtomKey strings from input.
:param lst: list of AtomKey objects or id strings
"""
if isinstance(lst[0], AtomKey):
return ':'.join(ak.id for ak in lst)
else:
return ':'.join(lst)
def __init__(self, *args, **kwargs):
"""Initialize Edron with sequence of AtomKeys.
Acceptable input:
[ atom key, ... ] : list of AtomKeys
atom key, ... : sequence of AtomKeys as args
{'a1': str, 'a2': str, ... } : dict of AtomKeys as 'a1', 'a2' ...
"""
aks = []
for arg in args:
if isinstance(arg, list):
aks = arg
elif isinstance(arg, tuple):
aks = list(arg)
else:
if arg is not None:
aks.append(arg)
if [] == aks:
aks = [kwargs['a1'], kwargs['a2'], kwargs['a3']]
try:
if kwargs['a4'] is not None:
aks.append(kwargs['a4'])
except KeyError:
pass
# if args are atom key strings instead of AtomKeys
for i in range(len(aks)):
if not isinstance(aks[i], AtomKey):
aks[i] = AtomKey(aks[i])
self.aks = tuple(aks)
self.id = Edron.gen_key(aks)
self._hash = hash(self.aks)
# flag indicating that atom coordinates are up to date
# (do not need to be recalculated from dihedral1)
self.atoms_updated = False
# no residue or position, just atoms
self.dh_class = ''
# same but residue specific
self.rdh_class = ''
atmNdx = AtomKey.fields.atm
resNdx = AtomKey.fields.resname
for ak in aks:
akl = ak.akl
self.dh_class += akl[atmNdx]
self.rdh_class += akl[resNdx] + akl[atmNdx]
def gen_acs(self, atom_coords):
"""Generate tuple of atom coord arrays for keys in self.aks.
:param atom_coords: AtomKey dict of atom coords for residue
:raises: MissingAtomError any atoms in self.aks missing coordinates
"""
aks = self.aks
acs = []
estr = ''
for ak in aks:
ac = atom_coords[ak]
if ac is None:
estr += ak + ' '
else:
acs.append(ac)
if estr != '':
raise MissingAtomError(
'%s missing coordinates for %s' % (self, estr))
return tuple(acs)
def is_backbone(self):
"""Report True for contains only N, C, CA, O, H atoms."""
atmNdx = AtomKey.fields.atm
if all(atm in ('N', 'C', 'CA', 'O', 'H')
for atm in (ak.akl[atmNdx] for ak in self.aks)):
return True
return False
def __repr__(self):
"""Tuple of AtomKeys is default repr string."""
return self.aks
def __hash__(self):
"""Hash calculated at init from aks tuple."""
return self._hash
def _cmp(self, other):
"""Comparison function ranking self vs. other."""
for ak_s, ak_o in zip(self.aks, other.aks):
if ak_s != ak_o:
return ak_s, ak_o
return 1, 1
def __eq__(self, other):
"""Test for equality."""
if isinstance(other, type(self)):
return self.id == other.id
else:
return NotImplemented
def __ne__(self, other):
"""Test for inequality."""
if isinstance(other, type(self)):
return self.id != other.id
else:
return NotImplemented
def __gt__(self, other):
"""Test greater than."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] > rslt[1]
else:
return NotImplemented
def __ge__(self, other):
"""Test greater or equal."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] >= rslt[1]
else:
return NotImplemented
def __lt__(self, other):
"""Test less than."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] < rslt[1]
else:
return NotImplemented
def __le__(self, other):
"""Test less or equal."""
if isinstance(other, type(self)):
rslt = self._cmp(other)
return rslt[0] <= rslt[1]
else:
return NotImplemented
class Hedron(Edron):
"""Class to represent three joined atoms forming a plane.
Contains atom coordinates in local coordinate space, central atom
at origin. Stored in two orientations, with the 3rd (forward) or
first (reversed) atom on the +Z axis.
Attributes
----------
len1 : float
distance between 1st and 2nd atom
angle2 : float
angle (degrees) formed by 3 atoms
len3 : float
distance between 2nd and 3rd atoms
atoms : tuple[3] of numpy arrays [4][1]
3 atoms comprising hedron, 1st on XZ, 2nd at origin, 3rd on +Z
atomsR : tuple[3] of numpy arrays [4][1]
atoms reversed, 1st on +Z, 2nd at origin, 3rd on XZ plane
Methods
-------
init_pos()
Create hedron space atom coordinate numpy arrays.
hedron_from_atoms()
Compute length, angle, length for hedron from IC_Residue atom coords
set_angle()
update angle2 with supplied value
"""
def __init__(self, *args, **kwargs):
"""Initialize Hedron with sequence of AtomKeys, kwargs.
Acceptable input:
As for Edron, plus optional 'len1', 'angle2', 'len3'
keyworded values.
"""
super().__init__(*args, **kwargs)
# print('initialising', self.id)
# 3 matrices specifying hedron space coordinates of constituent atoms,
# initially atom3 on +Z axis
self.atoms = None
# 3 matrices, hedron space coordinates, reversed order
# initially atom1 on +Z axis
self.atomsR = None
if 'len1' in kwargs:
# distance between 1st and 2nd atom
self.len1 = float(kwargs['len1'])
# angle formed between 3 atoms
self.angle2 = float(kwargs['angle2'])
# distance between 2nd and 3rd atoms
self.len3 = float(kwargs['len3'])
self.init_pos()
else:
self.len1 = None
self.angle2 = None
self.len3 = None
# print(self)
def __str__(self):
"""Print string for Hedron object."""
return ('3-' + self.id + ' ' + self.rdh_class + ' ' + str(self.len1)
+ ' ' + str(self.angle2) + ' ' + str(self.len3))
def init_pos(self):
"""Initialize Hedron by creating atom coordinate numpy arrays."""
# build hedron with a2 on +Z axis, a1 at origin,
# a0 in -Z at angle n XZ plane
atoms = []
for _ in range(3):
# note this initializes a1 to 0,0,0 origin
atoms.append(numpy.array([[0], [0], [0], [1]],
dtype=numpy.float64)) # 4x1 array
# supplementary angle radian: angles which add to 180 are supplementary
sar = numpy.deg2rad(180.0 - self.angle2)
# a2 is len3 up from a2 on Z axis, X=Y=0
atoms[2][2][0] = self.len3
# a0 X is sin( sar ) * len1
atoms[0][0][0] = numpy.sin(sar) * self.len1
# a0 Z is -(cos( sar ) * len1)
# (assume angle2 always obtuse, so a0 is in -Z)
atoms[0][2][0] = - (numpy.cos(sar) * self.len1)
self.atoms = tuple(atoms)
atomsR = []
# same again but 'reversed' : a0 on Z axis, a1 at origin, a2 in -Z
for _ in range(3):
# | |
<reponame>LanceaKing/kaldi<filename>egs/wsj/s5/steps/nnet3/dot/nnet3_to_dot.py
#!/usr/bin/env python
# Copyright 2015 Johns Hopkins University (Author: <NAME>)
# Apache 2.0
# script to convert nnet3-am-info output to a dot graph
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import re
import os
import argparse
import sys
import math
import warnings
import descriptor_parser
import pprint
node_attributes = {
'input-node':{
'shape':'oval'
},
'output-node':{
'shape':'oval'
},
'NaturalGradientAffineComponent':{
'color':'lightgrey',
'shape':'box',
'style':'filled'
},
'NaturalGradientPerElementScaleComponent':{
'color':'lightpink',
'shape':'box',
'style':'filled'
},
'ConvolutionComponent':{
'color':'lightpink',
'shape':'box',
'style':'filled'
},
'FixedScaleComponent':{
'color':'blueviolet',
'shape':'box',
'style':'filled'
},
'FixedAffineComponent':{
'color':'darkolivegreen1',
'shape':'box',
'style':'filled'
},
'SigmoidComponent':{
'color':'bisque',
'shape':'rectangle',
'style':'filled'
},
'TanhComponent':{
'color':'bisque',
'shape':'rectangle',
'style':'filled'
},
'NormalizeComponent':{
'color':'aquamarine',
'shape':'rectangle',
'style':'filled'
},
'RectifiedLinearComponent':{
'color':'bisque',
'shape':'rectangle',
'style':'filled'
},
'ClipGradientComponent':{
'color':'bisque',
'shape':'rectangle',
'style':'filled'
},
'ElementwiseProductComponent':{
'color':'green',
'shape':'rectangle',
'style':'filled'
},
'LogSoftmaxComponent':{
'color':'cyan',
'shape':'rectangle',
'style':'filled'
}
}
def GetDotNodeName(name_string, is_component = False):
# this function is required as dot does not allow all the component names
# allowed by nnet3.
# Identified incompatibilities :
# 1. dot does not allow hyphen(-) and dot(.) in names
# 2. Nnet3 names can be shared among components and component nodes
# dot does not allow common names
#
node_name_string = re.sub("-", "hyphen", name_string)
node_name_string = re.sub("\.", "_dot_", node_name_string)
if is_component:
node_name_string += node_name_string.strip() + "_component"
return {"label":name_string, "node":node_name_string}
def ProcessAppendDescriptor(segment, parent_node_name, affix, edge_attributes = None):
dot_graph = []
names = []
desc_name = 'Append_{0}'.format(affix)
for i in range(len(segment['sub_segments'])):
sub_segment = segment['sub_segments'][i]
part_name = "{0}{1}{2}".format(desc_name, sub_segment['name'], i)
names.append("<{0}> part {1}".format(GetDotNodeName(part_name)['node'], i))
dot_graph += DescriptorSegmentToDot(sub_segment, "{0}:{1}".format(desc_name, part_name), desc_name)
part_index = len(segment['sub_segments'])
for i in range(len(segment['arguments'])):
part_name = "{0}{1}{2}".format(desc_name, segment['arguments'][i], part_index + i)
names.append("<{0}> part {1}".format(GetDotNodeName(part_name)['node'], part_index + i))
dot_graph.append("{0} -> {1}:{2}".format(GetDotNodeName(segment['arguments'][i])['node'], GetDotNodeName(desc_name)['node'], GetDotNodeName(part_name)['node']))
label = "|".join(names)
label = "{{"+label+"}|Append}"
dot_graph.append('{0} [shape=Mrecord, label="{1}"];'.format(GetDotNodeName(desc_name)['node'], label))
attr_string = ''
if edge_attributes is not None:
if 'label' in edge_attributes:
attr_string += " label={0} ".format(edge_attributes['label'])
if 'style' in edge_attributes:
attr_string += ' style={0} '.format(edge_attributes['style'])
dot_string = '{0} -> {1} [tailport=s]'.format(GetDotNodeName(desc_name)['node'], GetDotNodeName(parent_node_name)['node'])
if attr_string != '':
dot_string += ' [{0}] '.format(attr_string)
dot_graph.append(dot_string)
return dot_graph
def ProcessRoundDescriptor(segment, parent_node_name, affix, edge_attributes = None):
dot_graph = []
label = 'Round ({0})'.format(segment['arguments'][1])
style = None
if edge_attributes is not None:
if 'label' in edge_attributes:
label = "{0} {1}".format(edge_attributes['label'], label)
if 'style' in edge_attributes:
style = 'style={0}'.format(edge_attributes['style'])
attr_string = 'label="{0}"'.format(label)
if style is not None:
attr_string += ' {0}'.format(style)
dot_graph.append('{0}->{1} [ {2} ]'.format(GetDotNodeName(segment['arguments'][0])['node'],
GetDotNodeName(parent_node_name)['node'],
attr_string))
if segment['sub_segments']:
raise Exception("Round can just deal with forwarding descriptor, no sub-segments allowed")
return dot_graph
def ProcessOffsetDescriptor(segment, parent_node_name, affix, edge_attributes = None):
dot_graph = []
label = 'Offset ({0})'.format(segment['arguments'][1])
style = None
if edge_attributes is not None:
if 'label' in edge_attributes:
label = "{0} {1}".format(edge_attributes['label'], label)
if 'style' in edge_attributes:
style = 'style={0}'.format(edge_attributes['style'])
attr_string = 'label="{0}"'.format(label)
if style is not None:
attr_string += ' {0}'.format(style)
dot_graph.append('{0}->{1} [ {2} ]'.format(GetDotNodeName(segment['arguments'][0])['node'],
GetDotNodeName(parent_node_name)['node'],
attr_string))
if segment['sub_segments']:
raise Exception("Offset can just deal with forwarding descriptor, no sub-segments allowed")
return dot_graph
def ProcessSumDescriptor(segment, parent_node_name, affix, edge_attributes = None):
dot_graph = []
names = []
desc_name = 'Sum_{0}'.format(affix)
# create the sum node
for i in range(len(segment['sub_segments'])):
sub_segment = segment['sub_segments'][i]
part_name = "{0}{1}{2}".format(desc_name, sub_segment['name'], i)
names.append("<{0}> part {1}".format(GetDotNodeName(part_name)['node'], i))
dot_graph += DescriptorSegmentToDot(sub_segment, "{0}:{1}".format(desc_name, part_name), "{0}_{1}".format(desc_name, i))
# link the sum node parts to corresponding segments
part_index = len(segment['sub_segments'])
for i in range(len(segment['arguments'])):
part_name = "{0}{1}{2}".format(desc_name, segment['arguments'][i], part_index + i)
names.append("<{0}> part {1}".format(GetDotNodeName(part_name)['node'], part_index + i))
dot_graph.append("{0} -> {1}:{2}".format(GetDotNodeName(segment['arguments'][i])['node'], GetDotNodeName(desc_name)['node'], GetDotNodeName(part_name)['node']))
label = "|".join(names)
label = '{{'+label+'}|Sum}'
dot_graph.append('{0} [shape=Mrecord, label="{1}", color=red];'.format(GetDotNodeName(desc_name)['node'], label))
attr_string = ''
if edge_attributes is not None:
if 'label' in edge_attributes:
attr_string += " label={0} ".format(edge_attributes['label'])
if 'style' in edge_attributes:
attr_string += ' style={0} '.format(edge_attributes['style'])
dot_string = '{0} -> {1}'.format(GetDotNodeName(desc_name)['node'], GetDotNodeName(parent_node_name)['node'])
dot_string += ' [{0} tailport=s ] '.format(attr_string)
dot_graph.append(dot_string)
return dot_graph
def ProcessReplaceIndexDescriptor(segment, parent_node_name, affix, edge_attributes = None):
dot_graph = []
label = 'ReplaceIndex({0}, {1})'.format(segment['arguments'][1], segment['arguments'][2])
style = None
if edge_attributes is not None:
if 'label' in edge_attributes:
label = "{0} {1}".format(edge_attributes['label'], label)
if 'style' in edge_attributes:
style = 'style={0}'.format(edge_attributes['style'])
attr_string = 'label="{0}"'.format(label)
if style is not None:
attr_string += ' {0}'.format(style)
dot_graph.append('{0}->{1} [{2}]'.format(GetDotNodeName(segment['arguments'][0])['node'],
GetDotNodeName(parent_node_name)['node'],
attr_string))
if segment['sub_segments']:
raise Exception("ReplaceIndex can just deal with forwarding descriptor, no sub-segments allowed")
return dot_graph
def ProcessIfDefinedDescriptor(segment, parent_node_name, affix, edge_attributes = None):
# IfDefined adds attributes to the edges
if edge_attributes is not None:
raise Exception("edge_attributes was not None, this means an IfDefined descriptor was calling the current IfDefined descriptor. This is not allowed")
dot_graph = []
dot_graph.append('#ProcessIfDefinedDescriptor')
names = []
if segment['sub_segments']:
sub_segment = segment['sub_segments'][0]
dot_graph += DescriptorSegmentToDot(sub_segment, parent_node_name, parent_node_name, edge_attributes={'style':'dotted', 'label':'IfDefined'})
if segment['arguments']:
dot_graph.append('{0} -> {1} [style=dotted, label="IfDefined"]'.format(GetDotNodeName(segment['arguments'][0])['node'], GetDotNodeName(parent_node_name)['node']))
return dot_graph
def DescriptorSegmentToDot(segment, parent_node_name, affix, edge_attributes = None):
# segment is a dicionary which corresponds to a descriptor
dot_graph = []
if segment['name'] == "Append":
dot_graph += ProcessAppendDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "Offset":
dot_graph += ProcessOffsetDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "Sum":
dot_graph += ProcessSumDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "IfDefined":
dot_graph += ProcessIfDefinedDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "ReplaceIndex":
dot_graph += ProcessReplaceIndexDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "Round":
dot_graph += ProcessRoundDescriptor(segment, parent_node_name, affix, edge_attributes)
elif segment['name'] == "Scale":
pass
else:
raise Exception('Descriptor {0}, is not recognized by this script. Please add Process{0}Descriptor method'.format(segment['name']))
return dot_graph
def Nnet3DescriptorToDot(descriptor, parent_node_name):
dot_lines = []
[segments, arguments] = descriptor_parser.IdentifyNestedSegments(descriptor)
if segments:
for segment in segments:
dot_lines += DescriptorSegmentToDot(segment, parent_node_name, parent_node_name)
elif arguments:
assert(len(arguments) == 1)
dot_lines.append("{0} -> {1}".format(GetDotNodeName(arguments[0])['node'], GetDotNodeName(parent_node_name)['node']))
return dot_lines
def ParseNnet3String(string):
if re.search('^input-node|^component|^output-node|^component-node|^dim-range-node', string.strip()) is None:
return [None, None]
parts = string.split()
config_type = parts[0]
fields = []
prev_field = ''
for i in range(1, len(parts)):
if re.search('=', parts[i]) is None:
prev_field += ' '+parts[i]
else:
if not (prev_field.strip() == ''):
fields.append(prev_field)
sub_parts = parts[i].split('=')
if (len(sub_parts) != 2):
raise Exception('Malformed config line {0}'.format(string))
fields.append(sub_parts[0])
prev_field = sub_parts[1]
fields.append(prev_field)
parsed_string = {}
try:
while len(fields) > 0:
value = re.sub(',$', '', fields.pop().strip())
key = fields.pop()
parsed_string[key.strip()] = value.strip()
except IndexError:
raise Exception('Malformed config line {0}'.format(string))
return [config_type, parsed_string]
# sample component config line
# component name=L0_lda type=FixedAffineComponent, input-dim=300, output-dim=300, linear-params-stddev=0.00992724, bias-params-stddev=0.573973
def Nnet3ComponentToDot(component_config, component_attributes = None):
label = ''
if component_attributes is None:
component_attributes = component_config.keys()
attributes_to_print = set(component_attributes).intersection(list(component_config.keys()))
# process the known fields
for key in attributes_to_print:
if key in component_config:
label += '{0} = {1}\\n'.format(key, component_config[key])
attr_string = ''
try:
attributes = node_attributes[component_config['type']]
for key in attributes.keys():
attr_string += ' {0}={1} '.format(key, attributes[key])
except KeyError:
pass
return ['{0} [label="{1}" {2}]'.format(GetDotNodeName(component_config['name'], is_component = True)['node'], label, attr_string)]
# input-node name=input dim=40
def Nnet3InputToDot(parsed_config):
return ['{0} [ label="{1}\\ndim={2}"]'.format(GetDotNodeName(parsed_config['name'])['node'], parsed_config['name'], parsed_config['dim'] )]
# output-node name=output input=Final_log_softmax dim=3940 objective=linear
#output-node name=output input=Offset(Final_log_softmax, 5) dim=3940 objective=linear
def Nnet3OutputToDot(parsed_config):
dot_graph = []
dot_graph += Nnet3DescriptorToDot(parsed_config['input'], parsed_config['name'])
dot_graph.append('{0} [ label="{1}\\nobjective={2}"]'.format(GetDotNodeName(parsed_config['name'])['node'], parsed_config['name'], parsed_config['objective']))
return dot_graph
# dim-range-node name=Lstm1_r_t input-node=Lstm1_rp_t dim-offset=0 dim=256
def Nnet3DimrangeToDot(parsed_config):
dot_graph = []
dot_node = GetDotNodeName(parsed_config['name'])
dot_graph.append('{0} [shape=rectangle, label="{1}"]'.format(dot_node['node'], dot_node['label']))
dot_graph.append('{0} -> {1} [taillabel="dimrange({2}, {3})"]'.format(GetDotNodeName(parsed_config['input-node'])['node'],
GetDotNodeName(parsed_config['name'])['node'],
parsed_config['dim-offset'],
parsed_config['dim']))
return dot_graph
def Nnet3ComponentNodeToDot(parsed_config):
dot_graph = []
dot_graph += Nnet3DescriptorToDot(parsed_config['input'], parsed_config['name'])
dot_node = GetDotNodeName(parsed_config['name'])
dot_graph.append('{0} [ label="{1}", shape=box ]'.format(dot_node['node'], dot_node['label']))
dot_graph.append('{0} -> {1} [ weight=10 ]'.format(GetDotNodeName(parsed_config['component'], is_component = True)['node'],
GetDotNodeName(parsed_config['name'])['node']))
return dot_graph
def GroupConfigs(configs, node_prefixes = None):
if node_prefixes is None:
node_prefixes = []
# we make the assumption that nodes belonging to the same sub-graph have a
# commong prefix.
grouped_configs = {}
for node_prefix in node_prefixes:
group = []
rest = []
for config in configs:
if re.search('^{0}'.format(node_prefix), config[1]['name']) is not None:
group.append(config)
else:
rest.append(config)
configs = rest
grouped_configs[node_prefix] = group
grouped_configs[None] = configs
return grouped_configs
def ParseConfigLines(lines, node_prefixes = None, component_attributes = None ):
if node_prefixes is None:
node_prefixes = []
config_lines = []
dot_graph=[]
configs = []
for line in lines:
config_type, parsed_config = ParseNnet3String(line)
if config_type is not None:
configs.append([config_type, parsed_config])
# process the config lines
grouped_configs = GroupConfigs(configs, node_prefixes)
for group in grouped_configs.keys():
configs = grouped_configs[group]
if not configs:
continue
if group is not None:
# subgraphs prefixed with cluster will be treated differently by
# dot
dot_graph.append('subgraph cluster_{0} '.format(group) + "{")
dot_graph.append('color=blue')
for config in configs:
config_type = config[0]
parsed_config = config[1]
if config_type is None:
continue
if config_type == 'input-node':
dot_graph += Nnet3InputToDot(parsed_config)
elif config_type == 'output-node':
dot_graph += Nnet3OutputToDot(parsed_config)
elif config_type == 'component-node':
dot_graph += Nnet3ComponentNodeToDot(parsed_config)
| |
/ 5, (1 - 0) / 1]
},
{
'x_scaled': [-1., .2,
float('nan'),
float('nan')] # [(-1 - 0) / 1, (4 - 3) / 5]
}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([4], tf.float32)})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
def testMeanAndVar(self):
def analyzer_fn(inputs):
mean, var = analyzers._mean_and_var(inputs['x'])
return {
'mean': mean,
'var': var
}
# NOTE: We force 10 batches: data has 100 elements and we request a batch
# size of 10.
input_data = [{'x': [x]}
for x in range(1, 101)]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], tf.int64)
})
# The expected data has 2 boundaries that divides the data into 3 buckets.
expected_outputs = {
'mean': np.float32(50.5),
'var': np.float32(833.25)
}
self.assertAnalyzerOutputs(
input_data,
input_metadata,
analyzer_fn,
expected_outputs,
desired_batch_size=10)
def testMeanAndVarPerKey(self):
def analyzer_fn(inputs):
key_vocab, mean, var = analyzers._mean_and_var_per_key(
inputs['x'], inputs['key'])
return {
'key_vocab': key_vocab,
'mean': mean,
'var': tf.round(100 * var) / 100.0
}
# NOTE: We force 10 batches: data has 100 elements and we request a batch
# size of 10.
input_data = [{'x': [x], 'key': 'a' if x < 50 else 'b'}
for x in range(1, 101)]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], tf.int64),
'key': tf.io.FixedLenFeature([], tf.string)
})
# The expected data has 2 boundaries that divides the data into 3 buckets.
expected_outputs = {
'key_vocab': np.array([b'a', b'b'], np.object),
'mean': np.array([25, 75], np.float32),
'var': np.array([200, 216.67], np.float32)
}
self.assertAnalyzerOutputs(
input_data,
input_metadata,
analyzer_fn,
expected_outputs,
desired_batch_size=10)
@tft_unit.named_parameters(('Int64In', tf.int64, {
'min': tf.int64,
'max': tf.int64,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Int32In', tf.int32, {
'min': tf.int32,
'max': tf.int32,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Int16In', tf.int16, {
'min': tf.int16,
'max': tf.int16,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Float64In', tf.float64, {
'min': tf.float64,
'max': tf.float64,
'sum': tf.float64,
'size': tf.int64,
'mean': tf.float64,
'var': tf.float64
}), ('Float32In', tf.float32, {
'min': tf.float32,
'max': tf.float32,
'sum': tf.float32,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Float16In', tf.float16, {
'min': tf.float16,
'max': tf.float16,
'sum': tf.float32,
'size': tf.int64,
'mean': tf.float16,
'var': tf.float16
}))
def testNumericAnalyzersWithScalarInputs(self, input_dtype, output_dtypes):
def analyzer_fn(inputs):
a = tf.cast(inputs['a'], input_dtype)
def assert_and_cast_dtype(tensor, out_dtype):
self.assertEqual(tensor.dtype, out_dtype)
return tf.cast(tensor, _canonical_dtype(out_dtype))
return {
'min': assert_and_cast_dtype(tft.min(a),
output_dtypes['min']),
'max': assert_and_cast_dtype(tft.max(a),
output_dtypes['max']),
'sum': assert_and_cast_dtype(tft.sum(a),
output_dtypes['sum']),
'size': assert_and_cast_dtype(tft.size(a),
output_dtypes['size']),
'mean': assert_and_cast_dtype(tft.mean(a),
output_dtypes['mean']),
'var': assert_and_cast_dtype(tft.var(a),
output_dtypes['var']),
}
input_data = [{'a': 4}, {'a': 1}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], _canonical_dtype(input_dtype))})
expected_outputs = {
'min': np.array(
1, _canonical_dtype(output_dtypes['min']).as_numpy_dtype),
'max': np.array(
4, _canonical_dtype(output_dtypes['max']).as_numpy_dtype),
'sum': np.array(
5, _canonical_dtype(output_dtypes['sum']).as_numpy_dtype),
'size': np.array(
2, _canonical_dtype(output_dtypes['size']).as_numpy_dtype),
'mean': np.array(
2.5, _canonical_dtype(output_dtypes['mean']).as_numpy_dtype),
'var': np.array(
2.25, _canonical_dtype(output_dtypes['var']).as_numpy_dtype),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
tf.uint8,
tf.uint16,
], (True, False)))
def testNumericAnalyzersWithSparseInputs(self, input_dtype,
reduce_instance_dims):
def analyzer_fn(inputs):
return {
'min':
tft.min(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'max':
tft.max(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'sum':
tft.sum(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'size':
tft.size(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'mean':
tft.mean(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'var':
tft.var(inputs['a'], reduce_instance_dims=reduce_instance_dims),
}
output_dtype = _canonical_dtype(input_dtype).as_numpy_dtype
input_data = [
{'idx': [0, 1], 'val': [0., 1.]},
{'idx': [1, 3], 'val': [2., 3.]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.SparseFeature('idx', 'val', _canonical_dtype(input_dtype), 4)
})
if reduce_instance_dims:
expected_outputs = {
'min': np.array(0., output_dtype),
'max': np.array(3., output_dtype),
'sum': np.array(6., output_dtype),
'size': np.array(4, np.int64),
'mean': np.array(1.5, np.float32),
'var': np.array(1.25, np.float32),
}
else:
if input_dtype.is_floating:
missing_value_max = float('nan')
missing_value_min = float('nan')
else:
missing_value_max = np.iinfo(output_dtype).min
missing_value_min = np.iinfo(output_dtype).max
expected_outputs = {
'min': np.array([0., 1., missing_value_min, 3.], output_dtype),
'max': np.array([0., 2., missing_value_max, 3.], output_dtype),
'sum': np.array([0., 3., 0., 3.], output_dtype),
'size': np.array([1, 2, 0, 1], np.int64),
'mean': np.array([0., 1.5, float('nan'), 3.], np.float32),
'var': np.array([0., 0.25, float('nan'), 0.], np.float32),
}
self.assertAnalyzerOutputs(input_data, input_metadata, analyzer_fn,
expected_outputs)
def testNumericAnalyzersWithInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [
{'a': [8, 9, 3, 4]},
{'a': [1, 2, 10, 11]}
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([4], tf.int64)})
expected_outputs = {
'min': np.array([1, 2, 3, 4], np.int64),
'max': np.array([8, 9, 10, 11], np.int64),
'sum': np.array([9, 11, 13, 15], np.int64),
'size': np.array([2, 2, 2, 2], np.int64),
'mean': np.array([4.5, 5.5, 6.5, 7.5], np.float32),
'var': np.array([12.25, 12.25, 12.25, 12.25], np.float32),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
def testNumericAnalyzersWithNDInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [
{'a': [[8, 9], [3, 4]]},
{'a': [[1, 2], [10, 11]]}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([2, 2], tf.int64)})
expected_outputs = {
'min': np.array([[1, 2], [3, 4]], np.int64),
'max': np.array([[8, 9], [10, 11]], np.int64),
'sum': np.array([[9, 11], [13, 15]], np.int64),
'size': np.array([[2, 2], [2, 2]], np.int64),
'mean': np.array([[4.5, 5.5], [6.5, 7.5]], np.float32),
'var': np.array([[12.25, 12.25], [12.25, 12.25]], np.float32),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
def testNumericAnalyzersWithShape1NDInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [{'a': [[8, 9]]}, {'a': [[1, 2]]}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([1, 2], tf.int64)})
expected_outputs = {
'min': np.array([[1, 2]], np.int64),
'max': np.array([[8, 9]], np.int64),
'sum': np.array([[9, 11]], np.int64),
'size': np.array([[2, 2]], np.int64),
'mean': np.array([[4.5, 5.5]], np.float32),
'var': np.array([[12.25, 12.25]], np.float32),
}
self.assertAnalyzerOutputs(input_data, input_metadata, analyzer_fn,
expected_outputs)
def testNumericAnalyzersWithNDInputs(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a']),
'max': tft.max(inputs['a']),
'sum': tft.sum(inputs['a']),
'size': tft.size(inputs['a']),
'mean': tft.mean(inputs['a']),
'var': tft.var(inputs['a']),
}
input_data = [
{'a': [[4, 5], [6, 7]]},
{'a': [[1, 2], [3, 4]]}
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([2, 2], tf.int64)})
expected_outputs = {
'min': np.array(1, np.int64),
'max': np.array(7, np.int64),
'sum': np.array(32, np.int64),
'size': np.array(8, np.int64),
'mean': np.array(4.0, np.float32),
'var': np.array(3.5, np.float32),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
def testNumericMeanWithSparseTensorReduceFalseOverflow(self):
def analyzer_fn(inputs):
return {'mean': tft.mean(tf.cast(inputs['sparse'], tf.int32), False)}
input_data = [
{'idx': [0, 1], 'val': [1, 1]},
{'idx': [1, 3], 'val': [2147483647, 3]},
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'sparse': tf.io.SparseFeature('idx', 'val', tf.int64, 4)})
expected_outputs = {
'mean': np.array([1., 1073741824., float('nan'), 3.], np.float32)
}
self.assertAnalyzerOutputs(input_data, input_metadata, analyzer_fn,
expected_outputs)
def testStringToTFIDF(self):
def preprocessing_fn(inputs):
inputs_as_ints = tft.compute_and_apply_vocabulary(
tf.strings.split(inputs['a']))
out_index, out_values = tft.tfidf(inputs_as_ints, 6)
return {
'tf_idf': out_values,
'index': out_index
}
input_data = [{'a': 'hello hello world'},
{'a': 'hello goodbye hello world'},
{'a': 'I like pie pie pie'}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
# IDFs
# hello = log(4/3) = 0.28768
# world = log(4/3)
# goodbye = log(4/2) = 0.69314
# I = log(4/2)
# like = log(4/2)
# pie = log(4/2)
log_4_over_2 = 1.69314718056
log_4_over_3 = 1.28768207245
expected_transformed_data = [{
'tf_idf': [(2/3)*log_4_over_3, (1/3)*log_4_over_3],
'index': [0, 2]
}, {
'tf_idf': [(2/4)*log_4_over_3, (1/4)*log_4_over_3, (1/4)*log_4_over_2],
'index': [0, 2, 4]
}, {
'tf_idf': [(3/5)*log_4_over_2, (1/5)*log_4_over_2, (1/5)*log_4_over_2],
'index': [1, 3, 5]
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'tf_idf': tf.io.VarLenFeature(tf.float32),
'index': tf.io.VarLenFeature(tf.int64)
})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_transformed_data,
expected_metadata)
def testTFIDFNoData(self):
def preprocessing_fn(inputs):
inputs_as_ints = tft.compute_and_apply_vocabulary(
tf.strings.split(inputs['a']))
out_index, out_values = tft.tfidf(inputs_as_ints, 6)
return {
'tf_idf': out_values,
'index': out_index
}
input_data = [{'a': ''}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
expected_transformed_data = [{'tf_idf': [], 'index': []}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'tf_idf': tf.io.VarLenFeature(tf.float32),
'index': tf.io.VarLenFeature(tf.int64)
})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_transformed_data,
expected_metadata)
def testStringToTFIDFEmptyDoc(self):
def preprocessing_fn(inputs):
inputs_as_ints = tft.compute_and_apply_vocabulary(
tf.strings.split(inputs['a']))
out_index, out_values = tft.tfidf(inputs_as_ints, 6)
return {
'tf_idf': out_values,
'index': out_index
}
input_data = [{'a': 'hello hello world'},
{'a': ''},
{'a': 'hello goodbye hello world'},
{'a': 'I like pie pie pie'}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
log_5_over_2 = 1.91629073187
log_5_over_3 = 1.51082562376
expected_transformed_data = [{
'tf_idf': [(2/3)*log_5_over_3, (1/3)*log_5_over_3],
'index': [0, 2]
}, {
'tf_idf': [],
'index': []
}, {
'tf_idf': [(2/4)*log_5_over_3, (1/4)*log_5_over_3, (1/4)*log_5_over_2],
'index': [0, 2, 4]
}, {
'tf_idf': [(3/5)*log_5_over_2, (1/5)*log_5_over_2, (1/5)*log_5_over_2],
'index': [1, 3, 5]
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'tf_idf': tf.io.VarLenFeature(tf.float32),
'index': tf.io.VarLenFeature(tf.int64)
})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_transformed_data,
expected_metadata)
def testIntToTFIDF(self):
def preprocessing_fn(inputs):
out_index, out_values = tft.tfidf(inputs['a'], 13)
return {'tf_idf': out_values, 'index': out_index}
input_data = [{'a': [2, 2, 0]},
{'a': [2, 6, 2, 0]},
{'a': [8, 10, 12, 12, 12]},
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.VarLenFeature(tf.int64)})
log_4_over_2 = 1.69314718056
log_4_over_3 = 1.28768207245
expected_data = [{
'tf_idf': [(1/3)*log_4_over_3, (2/3)*log_4_over_3],
'index': [0, 2]
}, {
'tf_idf': [(1/4)*log_4_over_3, (2/4)*log_4_over_3, (1/4)*log_4_over_2],
'index': [0, 2, 6]
}, {
'tf_idf': [(1/5)*log_4_over_2, (1/5)*log_4_over_2, (3/5)*log_4_over_2],
'index': [8, 10, 12]
}]
expected_schema = tft_unit.metadata_from_feature_spec({
'tf_idf': tf.io.VarLenFeature(tf.float32),
'index': tf.io.VarLenFeature(tf.int64)
})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_schema)
def | |
<filename>funcs2run_bat_tools.py
import os
import subprocess
import sys
sys.path.append('/storage/work/jjd330/local/bat_data/BatML/HeasoftTools')
from gen_tools import run_ftool
from bat_tool_funcs import ev2dpi, mk_pc_img, mk_sky_img, run_batcelldetect
import time
import numpy as np
import multiprocessing as mp
import pandas as pd
from astropy.table import Table
from astropy.io import fits
from astropy.wcs import WCS
from sqlite_funcs import get_conn, write_cats2db, write_sigimg_line
import logging, traceback
def do_bkg(bkg_tstart, bkg_tstop, ev_fname, dmask, savedir, e0=14.0, e1=194.9):
# bkg_tstart = args.bkgt0
# bkg_tstop = args.bkgt0 + args.bkgdt
dpif = 'dpi_%.1f_%.1f_%.3f_%.3f_.dpi' %(e0, e1, bkg_tstart, bkg_tstop)
dpi_bkg_fname = os.path.join(savedir, dpif)
ev2dpi(ev_fname, dpi_bkg_fname, bkg_tstart, bkg_tstop, e0, e1, dmask)
return dpi_bkg_fname
def do_pc(dmask, att_fname, work_dir, ovrsmp=4, detapp=False):
pc_fname = os.path.join(work_dir, 'pc_%d.img' %(ovrsmp))
if not os.path.exists(pc_fname):
mk_pc_img(dmask, pc_fname, dmask, att_fname,\
ovrsmp=ovrsmp, detapp=detapp)
return pc_fname
def mk_sig_imgs_pix(tstarts, dts, evf, dpi_bkg_fname, pc_fname,\
attfile, dmask, savedir, work_dir, trig_time, conn, db_fname,\
e0=14.0, e1=194.9,\
oversamp=4, snr_cuts=None):
arg_dict_keys = ['tstart', 'dt', 'ev_fname',
'bkg_dpi',
'pc_fname', 'att_fname', 'dmask',
'savedir', 'pc_fname', 'e0', 'e1',
'oversamp']
args_dict_list = []
for i in range(len(tstarts)):
arg_dict = {'tstart':tstarts[i], 'dt':dts[i], 'ev_fname':evf,
'bkg_dpi':dpi_bkg_fname,
'att_fname':attfile, 'pc_fname':pc_fname,
'dmask':dmask, 'savedir':savedir, 'e0':e0,
'e1':e1, 'oversamp':oversamp}
args_dict_list.append(arg_dict)
t0 = time.time()
logging.info("%d images to make" %(len(args_dict_list)))
PC = fits.open(pc_fname)[0]
w_t = WCS(PC.header, key='T')
pc = PC.data
pc_bl = (pc>=0.1)
dtp = [('snr', np.float), ('imx', np.float), ('imy', np.float)]
Nimgs = len(args_dict_list)
for i in range(Nimgs):
img_fname, sig_img_fname = mk_sky_sig_img4mp(args_dict_list[i])
logging.debug("Made img %s" %(sig_img_fname))
sig_img = fits.open(sig_img_fname)[0].data
logging.debug("Opened fits file")
if snr_cuts is None:
snr_cut = 3.0
else:
snr_cut = snr_cuts[i]
snr_bl = (sig_img>=snr_cut)
img_bl = (pc_bl&snr_bl)
sig_arr = np.empty(np.sum(img_bl), dtype=dtp)
sig_arr['snr'] = sig_img[img_bl]
logging.info("%d pix pass cut" %(np.sum(img_bl)))
inds = np.where(img_bl)
imxys = w_t.all_pix2world(inds[1], inds[0], 0)
sig_arr['imx'] = imxys[0]
sig_arr['imy'] = imxys[1]
sig_pix_fname = os.path.join(work_dir, os.path.basename(sig_img_fname)[:-4])
np.save(sig_pix_fname, sig_arr)
logging.info("saved to %s" %(sig_pix_fname+'.npy'))
try:
write_sigimg_line(conn, args_dict_list[i]['tstart'],\
args_dict_list[i]['dt'], trig_time,\
sig_pix_fname+'.npy', np.sum(img_bl))
logging.info("written to DB")
except:
conn.close()
conn = get_conn(db_fname)
try:
write_sigimg_line(conn, args_dict_list[i]['tstart'],\
args_dict_list[i]['dt'], trig_time,\
sig_pix_fname+'.npy', np.sum(img_bl))
logging.info("written to DB")
except Exception as E:
logging.error(str(E))
logging.warn("Failed to write to DB")
try:
os.remove(sig_img_fname)
os.remove(img_fname)
logging.info("Deleted img files")
except:
logging.info("Failed to delete a file")
pass
logging.info("Done with all images")
logging.info("Took %.2f seconds, %.2f minutes"\
%(time.time()-t0,(time.time()-t0)/60.))
return
def mk_sig_imgs_mp(nproc, tstarts, dts, evf, dpi_bkg_fname, pc_fname,\
attfile, dmask, savedir, e0=14.0, e1=194.9,\
oversamp=4, detapp=False, rebal=True):
arg_dict_keys = ['tstart', 'dt', 'ev_fname',
'bkg_dpi',
'pc_fname', 'att_fname', 'dmask',
'savedir', 'pc_fname', 'e0', 'e1',
'oversamp']
args_dict_list = []
for i in range(len(tstarts)):
arg_dict = {'tstart':tstarts[i], 'dt':dts[i], 'ev_fname':evf,
'bkg_dpi':dpi_bkg_fname,
'att_fname':attfile, 'pc_fname':pc_fname,
'dmask':dmask, 'savedir':savedir, 'e0':e0,
'e1':e1, 'oversamp':oversamp, 'detapp':detapp,
'rebal':rebal}
args_dict_list.append(arg_dict)
t0 = time.time()
logging.info("%d images to make" %(len(args_dict_list)))
if nproc > 1:
p = mp.Pool(nproc)
logging.info("Starting %d procs" %(nproc))
sig_img_fnames = p.map(mk_sky_sig_img4mp, args_dict_list)
p.close()
p.join()
else:
sig_img_fnames = list(map(mk_sky_sig_img4mp, args_dict_list))
logging.info("Done with all images")
logging.info("Took %.2f seconds, %.2f minutes"\
%(time.time()-t0,(time.time()-t0)/60.))
return
def mk_sky_sig_img4mp(arg_dict):
img_fname, sig_img_fname = mk_sky_sig_img(arg_dict['tstart'],\
arg_dict['dt'], arg_dict['ev_fname'], arg_dict['bkg_dpi'],\
arg_dict['att_fname'], arg_dict['dmask'],\
arg_dict['savedir'], e0=arg_dict['e0'],\
e1=arg_dict['e1'], oversamp=arg_dict['oversamp'],\
detapp=arg_dict['detapp'], rebal=arg_dict['rebal'])
return img_fname, sig_img_fname
def get_sig_pix_mp(nproc, tstarts, dts, evf, dpi_bkg_fname, pc_fname,\
attfile, dmask, savedir, e0=14.0, e1=194.9,\
oversamp=4, db_fname=None, timeIDs=None, RateTSs=None):
arg_dict_keys = ['tstart', 'dt', 'ev_fname',
'bkg_dpi',
'pc_fname', 'att_fname', 'dmask',
'savedir', 'pc_fname', 'e0', 'e1',
'oversamp', 'db_fname']
args_dict_list = []
exp_bins = [.2, .3, .6, 2]
TS_cuts = [2.25, 2.0, 1.8, 1.7, 1.65]
exp_bins = np.digitize(dts, bins=exp_bins)
for i in range(len(tstarts)):
if RateTSs is not None:
if RateTSs[i] < TS_cuts[exp_bins[i]]:
continue
arg_dict = {'tstart':tstarts[i], 'dt':dts[i], 'ev_fname':evf,
'bkg_dpi':dpi_bkg_fname,
'att_fname':attfile, 'pc_fname':pc_fname,
'dmask':dmask, 'savedir':savedir, 'e0':e0,
'e1':e1, 'oversamp':oversamp, 'db_fname':db_fname}
if timeIDs is not None:
arg_dict['timeID'] = timeIDs[i]
arg_dict['RateTS'] = RateTSs[i]
args_dict_list.append(arg_dict)
t0 = time.time()
p = mp.Pool(nproc)
logging.info("%d images to make" %(len(args_dict_list)))
logging.info("Starting %d procs" %(nproc))
sig_img_fnames = p.map(get_sig_pix, args_dict_list)
p.close()
p.join()
logging.info("Done with all images")
logging.info("Took %.2f seconds, %.2f minutes"\
%(time.time()-t0,(time.time()-t0)/60.))
def get_sig_pix(arg_dict):
img_fname, sig_img_fname = mk_sky_sig_img(arg_dict['tstart'],\
arg_dict['dt'], arg_dict['ev_fname'], arg_dict['bkg_dpi'],\
arg_dict['att_fname'], arg_dict['dmask'],\
arg_dict['savedir'], e0=arg_dict['e0'],\
e1=arg_dict['e1'], oversamp=arg_dict['oversamp'])
PC = fits.open(arg_dict['pc_fname'])[0]
sig_img = fits.open(sig_img_fname)[0]
w_t = WCS(sig_img.header, key='T')
exp_bins = [.2, .3, .6, 2]
TSaims = [5.25, 5.0, 4.8, 4.6, 4.5]
exp_bin = np.digitize(arg_dict['dt'], bins=exp_bins)
snr_cut = max(2.*(TSaims[exp_bin] - arg_dict['RateTS']), 2.0)
bl = (PC.data>=.1)
bl_snr = (sig_img.data>snr_cut)&bl
if np.sum(bl_snr) < 1:
return
bl_snr_inds = np.where(bl_snr)
imxys = w_t.all_pix2world(bl_snr_inds[1], bl_snr_inds[0], 0)
SNRs = sig_img.data[bl_snr]
bins = [np.linspace(-2,2,10*4+1),
np.linspace(-1,1,10*2+1)]
imx_inds = np.digitize(imxys[0], bins=bins[0]) - 1
imy_inds = np.digitize(imxys[1], bins=bins[1]) - 1
job_inds = np.arange(19)
job_ids = -1*np.ones(len(imx_inds), dtype=np.int)
imx_bins0 = [-1.2, -1.2, -0.8, -0.8, -0.4,
-0.4, 0.0, 0.0, 0.4, 0.4, 0.8,
0.8, -1.5, 0.0, -1.5, 0.0, -2.0,
1.2, -2.0]
imx_bins1 = [-0.8, -0.8, -0.4, -0.4, 0.0,
0.0, 0.4, 0.4, 0.8, 0.8, 1.2,
1.2, 0.0, 1.5, 0.0, 1.5, 2.0,
2.0, -1.2]
imy_bins0 = [-0.3, 0.2, -0.3, 0.2, -0.3,
0.2, -0.3, 0.2, -0.3, 0.2,
-0.3, 0.2, -0.5, -0.5, -1.0,
-1.0, 0.7, -0.3, -0.3]
imy_bins1 = [0.2, 0.7, 0.2, 0.7, 0.2, 0.7,
0.2, 0.7, 0.2, 0.7, 0.2, 0.7,
-0.3, -0.3, -0.5, -0.5, 1,
0.7, 0.7]
for i in job_inds:
bl_bin = (imxys[0]>=imx_bins0[i])&(imxys[0]<imx_bins1[i])&\
(imxys[1]>=imy_bins0[i])&(imxys[1]<imy_bins1[i])
job_ids[bl_bin] = i
if arg_dict['db_fname'] is not None:
df_dict = {}
df_dict['timeID'] = arg_dict['timeID']
df_dict['imx_ind'] = imx_inds
df_dict['imy_ind'] = imy_inds
df_dict['imx'] = imxys[0]
df_dict['imy'] = imxys[1]
df_dict['snr'] = SNRs
df_dict['proc_group'] = job_ids
df_dict['done'] = 0
df = pd.DataFrame(df_dict)
conn = get_conn(arg_dict['db_fname'])
df.to_sql('ImageSigs', conn, if_exists='append', index=False)
return sig_img_fname
def mk_sky_sig_img(tstart, dt, evf, dpi_bkg_fname, attfile,\
dmask, savedir, e0=14.0, e1=194.9, oversamp=4,\
detapp=False, rebal=True):
tstop = tstart + dt
dpif = 'dpi_%.1f_%.1f_%.3f_%.3f_.dpi' %(e0, e1, tstart, tstop)
dpi_fname = os.path.join(savedir, dpif)
if not os.path.exists(dpi_fname):
ev2dpi(evf, dpi_fname, tstart, tstop, e0, e1, dmask)
img_fname = os.path.join(savedir, 'sky_%.1f_%.1f_%.3f_%.3f_os%d_.img'\
%(e0, e1, tstart, tstop, oversamp))
sig_img_fname = os.path.join(savedir, 'sig_%.1f_%.1f_%.3f_%.3f_os%d_.img'\
%(e0, e1, tstart, tstop, oversamp))
mk_sky_img(dpi_fname, img_fname, dmask, attfile,\
bkg_file=dpi_bkg_fname, ovrsmp=oversamp,\
sig_map=sig_img_fname, detapp=detapp,\
rebal=rebal)
return img_fname, sig_img_fname
def do_bkg_ebins(args, ebins):
bkg_tstart = args.bkgt0
bkg_tstop = args.bkgt0 + args.bkgdt
dpif = 'dpi_bkg_ebins_%.3f_%.3f_.dpi' %(bkg_tstart, bkg_tstop)
dpi_bkg_fname = os.path.join(args.savedir, args.obsid, dpif)
ev2dpi_ebins(args.evf, dpi_bkg_fname, bkg_tstart, bkg_tstop, ebins, args.dmask)
return dpi_bkg_fname
def std_grb(tstart, dt, evf, dpi_bkg_fnames, attfile,\
dmask, savedir,\
pc="NONE", e0=14.0, e1=194.9, oversamp=4,\
sigmap=False, bkgvar=False, detapp=False):
# 1 make dpi e0-e1 for sig and bkg times
# 2 make bkg subtracted sky image
# 3 run batcelldetect
tstop = tstart + dt
# 1
# obsid_dir = args.obsid
# aux_dir = os.path.join(obsid_dir, 'auxil')
# attfile = os.path.join(aux_dir, [fname for fname in os.listdir(aux_dir) if 'pat' in fname][0])
if np.isscalar(e0):
e0 = [e0]
e1 = [e1]
cat_fnames = []
for i in range(len(e0)):
dpif = 'dpi_%.1f_%.1f_%.3f_%.3f_.dpi' %(e0[i], e1[i], tstart, tstop)
dpi_fname = os.path.join(savedir, dpif)
ev2dpi(evf, dpi_fname, tstart, tstop, e0[i], e1[i], dmask)
# 2
img_fname = os.path.join(savedir, 'sky_%.1f_%.1f_%.3f_%.3f_os%d_.img' %(e0[i], e1[i], tstart,\
tstop, oversamp))
mk_sky_img(dpi_fname, img_fname, dmask, attfile,\
bkg_file=dpi_bkg_fnames[i], ovrsmp=oversamp,\
detapp=detapp)
# 3
cat_fname = os.path.join(savedir, 'cat_%.1f_%.1f_%.3f_%.3f_os%d_.fits' %(e0[i], e1[i], tstart,\
tstop, oversamp))
cat_fnames.append(cat_fname)
if sigmap:
sig_fname = os.path.join(savedir, 'sig_%.1f_%.1f_%.3f_%.3f_os%d_.img' %(e0[i], e1[i], tstart,\
tstop, oversamp))
else:
sig_fname = None
if bkgvar:
bkgvar_fname = os.path.join(savedir, 'bkgvar_%.1f_%.1f_%.3f_%.3f_os%d_.img' %(e0[i], e1[i], tstart,\
tstop, oversamp))
else:
bkgvar_fname = None
run_batcelldetect(img_fname, cat_fname,\
ovrsmp=oversamp, pcode=pc,\
sigmap=sig_fname, bkgvar=bkgvar_fname)
return cat_fnames
def mk_sky_imgs4time_list(tstarts, dts, evf, attfile,\
dmask, savedir, e0=14.0, e1=194.9, oversamp=4,\
sigmap=False, bkgvar=False, detapp=False,\
bkg_dpi="None"):
if np.isscalar(tstarts):
tstarts = [tstarts]
dts = [dts]
Ntimes = len(tstarts)
for i in range(Ntimes):
tstart = tstarts[i]
tstop = tstart + dts[i]
dpif = 'dpi_%.1f_%.1f_%.3f_%.3f_.dpi' %(e0, e1, tstart, tstop)
dpi_fname = os.path.join(savedir, dpif)
ev2dpi(evf, dpi_fname, tstart, tstop, e0, e1, dmask)
# 2
img_fname = os.path.join(savedir, 'sky_%.1f_%.1f_%.3f_%.3f_os%d_.img' %(e0, e1, tstart,\
tstop, oversamp))
if bkgvar:
bkgvar_fname = os.path.join(savedir, 'bkgvar_%.1f_%.1f_%.3f_%.3f_os%d_.img' %(e0, e1, tstart,\
tstop, oversamp))
else:
bkgvar_fname = "NONE"
mk_sky_img(dpi_fname, img_fname, dmask, attfile,\
bkg_file=bkg_dpi, ovrsmp=oversamp,\
detapp=detapp, bkgvar_map=bkgvar_fname)
def do_search(arg_dict):
cat_fnames = std_grb(arg_dict['tstart'], arg_dict['dt'], arg_dict['ev_fname'],\
arg_dict['bkg_dpis'],\
arg_dict['att_fname'], arg_dict['dmask'], arg_dict['savedir'],\
pc=arg_dict['pc_fname'], e0=arg_dict['e0'], e1=arg_dict['e1'],\
oversamp=arg_dict['oversamp'])
if arg_dict['db_fname'] is not None:
conn = get_conn(arg_dict['db_fname'], timeout=30.0)
try:
write_cats2db(conn, cat_fnames, arg_dict['timeID'])
logging.info("Wrote results from timeID " +\
str(arg_dict['timeID']) + " into DB")
except Exception as E:
logging.error(str(E))
logging.error(traceback.format_exc())
logging.warning("Failed to write results from timeID " +\
str(arg_dict['timeID']) + " into DB")
logging.info("Trying again")
conn.close()
time.sleep(1.0)
conn = get_conn(arg_dict['db_fname'], timeout=60.0)
try:
write_cats2db(conn, cat_fnames, arg_dict['timeID'])
logging.info("Wrote results from timeID " +\
str(arg_dict['timeID']) + " into DB")
except Exception as E:
logging.error(E)
logging.error(traceback.format_exc())
logging.error("Failed to write results from timeID "\
+ str(arg_dict['timeID']) + " into DB")
logging.error("And not trying again")
conn.close()
return cat_fnames
def do_search_mp(nproc, tstarts, dts, ev_fname, bkg_dpis, pc_fname, att_fname,\
dmask, savedir, e0=14.0, e1=194.9, oversamp=4,\
db_fname=None, timeIDs=None):
arg_dict_keys = ['tstart', 'dt', 'ev_fname',
'bkg_dpis',
'pc_fname', 'att_fname', 'dmask',
'savedir', 'pc_fname', 'e0', 'e1',
'oversamp', 'db_fname']
args_dict_list = []
for i in range(len(tstarts)):
arg_dict = {'tstart':tstarts[i], 'dt':dts[i], 'ev_fname':ev_fname,
'bkg_dpis':bkg_dpis,
'pc_fname':pc_fname, 'att_fname':att_fname,
'dmask':dmask, 'savedir':savedir, 'e0':e0,
'e1':e1, 'oversamp':oversamp, 'db_fname':db_fname}
if timeIDs | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.networkconnectivity_v1.services.hub_service import pagers
from google.cloud.networkconnectivity_v1.types import common
from google.cloud.networkconnectivity_v1.types import hub
from google.cloud.networkconnectivity_v1.types import hub as gcn_hub
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import HubServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import HubServiceGrpcAsyncIOTransport
from .client import HubServiceClient
class HubServiceAsyncClient:
"""Network Connectivity Center is a hub-and-spoke abstraction
for network connectivity management in Google Cloud. It reduces
operational complexity through a simple, centralized
connectivity management model.
"""
_client: HubServiceClient
DEFAULT_ENDPOINT = HubServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = HubServiceClient.DEFAULT_MTLS_ENDPOINT
hub_path = staticmethod(HubServiceClient.hub_path)
parse_hub_path = staticmethod(HubServiceClient.parse_hub_path)
instance_path = staticmethod(HubServiceClient.instance_path)
parse_instance_path = staticmethod(HubServiceClient.parse_instance_path)
interconnect_attachment_path = staticmethod(
HubServiceClient.interconnect_attachment_path
)
parse_interconnect_attachment_path = staticmethod(
HubServiceClient.parse_interconnect_attachment_path
)
network_path = staticmethod(HubServiceClient.network_path)
parse_network_path = staticmethod(HubServiceClient.parse_network_path)
spoke_path = staticmethod(HubServiceClient.spoke_path)
parse_spoke_path = staticmethod(HubServiceClient.parse_spoke_path)
vpn_tunnel_path = staticmethod(HubServiceClient.vpn_tunnel_path)
parse_vpn_tunnel_path = staticmethod(HubServiceClient.parse_vpn_tunnel_path)
common_billing_account_path = staticmethod(
HubServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
HubServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(HubServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(HubServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(HubServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(
HubServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(HubServiceClient.common_project_path)
parse_common_project_path = staticmethod(HubServiceClient.parse_common_project_path)
common_location_path = staticmethod(HubServiceClient.common_location_path)
parse_common_location_path = staticmethod(
HubServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HubServiceAsyncClient: The constructed client.
"""
return HubServiceClient.from_service_account_info.__func__(HubServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HubServiceAsyncClient: The constructed client.
"""
return HubServiceClient.from_service_account_file.__func__(HubServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> HubServiceTransport:
"""Returns the transport used by the client instance.
Returns:
HubServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(HubServiceClient).get_transport_class, type(HubServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, HubServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the hub service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.HubServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = HubServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_hubs(
self,
request: hub.ListHubsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListHubsAsyncPager:
r"""Lists hubs in a given project.
Args:
request (:class:`google.cloud.networkconnectivity_v1.types.ListHubsRequest`):
The request object. Request for
[HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]
method.
parent (:class:`str`):
Required. The parent resource's name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.services.hub_service.pagers.ListHubsAsyncPager:
Response for
[HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]
method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = hub.ListHubsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_hubs,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListHubsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_hub(
self,
request: hub.GetHubRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hub.Hub:
r"""Gets details about the specified hub.
Args:
request (:class:`google.cloud.networkconnectivity_v1.types.GetHubRequest`):
The request object. Request for
[HubService.GetHub][google.cloud.networkconnectivity.v1.HubService.GetHub]
method.
name (:class:`str`):
Required. The name of the hub
resource to get.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.types.Hub:
A hub is essentially a collection of
spokes. A single hub can contain spokes
from multiple regions. However, all of a
hub's spokes must be associated with
resources that reside in the same VPC
network.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = hub.GetHubRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_hub,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0513108,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.10198,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0878569,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.14171,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0715304,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.301097,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.100483,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01937,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00368511,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0266481,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0272537,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0266481,
'Execution Unit/Register Files/Runtime Dynamic': 0.0309388,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0561401,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.163658,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.10376,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00046177,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00046177,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000405769,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000159031,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000391501,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00172081,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00429994,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0261996,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.66652,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0512959,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0889857,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.96592,
'Instruction Fetch Unit/Runtime Dynamic': 0.172502,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0317369,
'L2/Runtime Dynamic': 0.00751808,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.25971,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.502355,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0330833,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0330834,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.41594,
'Load Store Unit/Runtime Dynamic': 0.698595,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0815777,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.163156,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0289522,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0294284,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.103618,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00841071,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.309462,
'Memory Management Unit/Runtime Dynamic': 0.0378392,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.3319,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00396386,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0462793,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
a request is made to return ``maxResults`` number of items, ``NextToken`` allows you to return more items in your list starting at the location pointed to by the next token.
:type NextToken: string
:param NextToken:
The next item following a partial list of returned items. For example, if a request is made to return ``maxResults`` number of items, ``NextToken`` allows you to return more items in your list starting at the location pointed to by the next token.
:type MaxResults: integer
:param MaxResults:
The maximum number of items to be returned.
:rtype: dict
:returns:
"""
pass
def list_recovery_points_by_backup_vault(self, BackupVaultName: str, NextToken: str = None, MaxResults: int = None, ByResourceArn: str = None, ByResourceType: str = None, ByBackupPlanId: str = None, ByCreatedBefore: datetime = None, ByCreatedAfter: datetime = None) -> Dict:
"""
Returns detailed information about the recovery points stored in a backup vault.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByBackupVault>`_
**Request Syntax**
::
response = client.list_recovery_points_by_backup_vault(
BackupVaultName='string',
NextToken='string',
MaxResults=123,
ByResourceArn='string',
ByResourceType='string',
ByBackupPlanId='string',
ByCreatedBefore=datetime(2015, 1, 1),
ByCreatedAfter=datetime(2015, 1, 1)
)
**Response Syntax**
::
{
'NextToken': 'string',
'RecoveryPoints': [
{
'RecoveryPointArn': 'string',
'BackupVaultName': 'string',
'BackupVaultArn': 'string',
'ResourceArn': 'string',
'ResourceType': 'string',
'CreatedBy': {
'BackupPlanId': 'string',
'BackupPlanArn': 'string',
'BackupPlanVersion': 'string',
'BackupRuleId': 'string'
},
'IamRoleArn': 'string',
'Status': 'COMPLETED'|'PARTIAL'|'DELETING'|'EXPIRED',
'CreationDate': datetime(2015, 1, 1),
'CompletionDate': datetime(2015, 1, 1),
'BackupSizeInBytes': 123,
'CalculatedLifecycle': {
'MoveToColdStorageAt': datetime(2015, 1, 1),
'DeleteAt': datetime(2015, 1, 1)
},
'Lifecycle': {
'MoveToColdStorageAfterDays': 123,
'DeleteAfterDays': 123
},
'EncryptionKeyArn': 'string',
'IsEncrypted': True|False,
'LastRestoreTime': datetime(2015, 1, 1)
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
The next item following a partial list of returned items. For example, if a request is made to return ``maxResults`` number of items, ``NextToken`` allows you to return more items in your list starting at the location pointed to by the next token.
- **RecoveryPoints** *(list) --*
An array of objects that contain detailed information about recovery points saved in a backup vault.
- *(dict) --*
Contains detailed information about the recovery points stored in a backup vault.
- **RecoveryPointArn** *(string) --*
An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, ``arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45`` .
- **BackupVaultName** *(string) --*
The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.
- **BackupVaultArn** *(string) --*
An ARN that uniquely identifies a backup vault; for example, ``arn:aws:backup:us-east-1:123456789012:vault:aBackupVault`` .
- **ResourceArn** *(string) --*
An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.
- **ResourceType** *(string) --*
The type of AWS resource saved as a recovery point; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.
- **CreatedBy** *(dict) --*
Contains identifying information about the creation of a recovery point, including the ``BackupPlanArn`` , ``BackupPlanId`` , ``BackupPlanVersion`` , and ``BackupRuleId`` of the backup plan that is used to create it.
- **BackupPlanId** *(string) --*
Uniquely identifies a backup plan.
- **BackupPlanArn** *(string) --*
An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for example, ``arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50`` .
- **BackupPlanVersion** *(string) --*
Version IDs are unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. They cannot be edited.
- **BackupRuleId** *(string) --*
Uniquely identifies a rule used to schedule the backup of a selection of resources.
- **IamRoleArn** *(string) --*
Specifies the IAM role ARN used to create the target recovery point; for example, ``arn:aws:iam::123456789012:role/S3Access`` .
- **Status** *(string) --*
A status code specifying the state of the recovery point.
- **CreationDate** *(datetime) --*
The date and time a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of ``CreationDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
- **CompletionDate** *(datetime) --*
The date and time a job to restore a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of ``CompletionDate`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
- **BackupSizeInBytes** *(integer) --*
The size, in bytes, of a backup.
- **CalculatedLifecycle** *(dict) --*
A ``CalculatedLifecycle`` object containing ``DeleteAt`` and ``MoveToColdStorageAt`` timestamps.
- **MoveToColdStorageAt** *(datetime) --*
A timestamp that specifies when to transition a recovery point to cold storage.
- **DeleteAt** *(datetime) --*
A timestamp that specifies when to delete a recovery point.
- **Lifecycle** *(dict) --*
The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.
- **MoveToColdStorageAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is moved to cold storage.
- **DeleteAfterDays** *(integer) --*
Specifies the number of days after creation that a recovery point is deleted. Must be greater than ``MoveToColdStorageAfterDays`` .
- **EncryptionKeyArn** *(string) --*
The server-side encryption key that is used to protect your backups; for example, ``arn:aws:kms:us-west-2:111122223333:key/<KEY>`` .
- **IsEncrypted** *(boolean) --*
A Boolean value that is returned as ``TRUE`` if the specified recovery point is encrypted, or ``FALSE`` if the recovery point is not encrypted.
- **LastRestoreTime** *(datetime) --*
The date and time a recovery point was last restored, in Unix format and Coordinated Universal Time (UTC). The value of ``LastRestoreTime`` is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
:type BackupVaultName: string
:param BackupVaultName: **[REQUIRED]**
The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.
:type NextToken: string
:param NextToken:
The next item following a partial list of returned items. For example, if a request is made to return ``maxResults`` number of items, ``NextToken`` allows you to return more items in your list starting at the location pointed to by the next token.
:type MaxResults: integer
:param MaxResults:
The maximum number of items to be returned.
:type ByResourceArn: string
:param ByResourceArn:
Returns only recovery points that match the specified resource Amazon Resource Name (ARN).
:type ByResourceType: string
:param ByResourceType:
Returns only recovery points that match the specified resource type.
:type ByBackupPlanId: string
:param ByBackupPlanId:
Returns only recovery points that match the specified backup plan ID.
:type ByCreatedBefore: datetime
:param ByCreatedBefore:
Returns only recovery points that were created before the specified timestamp.
:type ByCreatedAfter: datetime
:param ByCreatedAfter:
Returns only recovery points that were created after the specified timestamp.
:rtype: dict
:returns:
"""
pass
def list_recovery_points_by_resource(self, ResourceArn: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Returns detailed information about recovery points of the type specified by a resource Amazon Resource Name (ARN).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByResource>`_
**Request Syntax**
::
response = client.list_recovery_points_by_resource(
ResourceArn='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'NextToken': 'string',
'RecoveryPoints': [
{
'RecoveryPointArn': 'string',
'CreationDate': datetime(2015, 1, 1),
'Status': 'COMPLETED'|'PARTIAL'|'DELETING'|'EXPIRED',
'EncryptionKeyArn': 'string',
'BackupSizeBytes': 123,
'BackupVaultName': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NextToken** *(string) --*
The next | |
return [True, None]
def get_views_list(self):
res = requests.get(self.url + '/api/defaultDashboards', headers=self.hdrs,
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def get_view(self, name):
gvres = self.get_views_list()
if gvres[0] is False:
return gvres
vlist = gvres[1]['defaultDashboards']
id = None
for v in vlist:
if v['name'] == name:
id = v['id']
break
if not id:
return [False, 'view ' + name + ' not found']
res = requests.get(self.url + '/api/defaultDashboards/' + id, headers=self.hdrs,
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def get_dashboards(self):
'''**Description**
Return the list of dashboards available under the given user account. This includes the dashboards created by the user and the ones shared with her by other users.
**Success Return Value**
A dictionary containing the list of available sampling intervals.
**Example**
`examples/list_dashboards.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_dashboards.py>`_
'''
res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def find_dashboard_by(self, name=None):
'''**Description**
Finds dashboards with the specified name. You can then delete the dashboard (with :func:`~SdcClient.delete_dashboard`) or edit panels (with :func:`~SdcClient.add_dashboard_panel` and :func:`~SdcClient.remove_dashboard_panel`)
**Arguments**
- **name**: the name of the dashboards to find.
**Success Return Value**
A list of dictionaries of dashboards matching the specified name.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
'''
res = self.get_dashboards()
if res[0] is False:
return res
else:
def filter_fn(configuration):
return configuration['name'] == name
def create_item(configuration):
return {'dashboard': configuration}
dashboards = list(map(create_item, list(filter(filter_fn, res[1]['dashboards']))))
return [True, dashboards]
def create_dashboard_with_configuration(self, configuration):
res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': configuration}),
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
return [True, res.json()]
def create_dashboard(self, name):
'''
**Description**
Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``.
**Arguments**
- **name**: the name of the dashboard that will be created.
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
'''
dashboard_configuration = {
'name': name,
'schema': 1,
'items': []
}
#
# Create the new dashboard
#
res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}),
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
return [True, res.json()]
def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None):
"""**Description**
Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel.
**Arguments**
- **dashboard**: dashboard to edit
- **name**: name of the new panel
- **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number``
- **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys:
- ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key
- ``top``: 1 or more metrics OR 1 metric + 1 grouping key
- ``number``: 1 metric only
- **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*.
- **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``)
- **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues
- **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height).
**Success Return Value**
A dictionary showing the details of the edited dashboard.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
"""
panel_configuration = {
'name': name,
'showAs': None,
'showAsType': None,
'metrics': [],
'gridConfiguration': {
'col': 1,
'row': 1,
'size_x': 12,
'size_y': 6
}
}
if panel_type == 'timeSeries':
#
# In case of a time series, the current dashboard implementation
# requires the timestamp to be explicitly specified as "key".
# However, this function uses the same abstraction of the data API
# that doesn't require to specify a timestamp key (you only need to
# specify time window and sampling)
#
metrics = copy.copy(metrics)
metrics.insert(0, {'id': 'timestamp'})
#
# Convert list of metrics to format used by Sysdig Monitor
#
property_names = {}
k_count = 0
v_count = 0
for i, metric in enumerate(metrics):
property_name = 'v' if 'aggregations' in metric else 'k'
if property_name == 'k':
i = k_count
k_count += 1
else:
i = v_count
v_count += 1
property_names[metric['id']] = property_name + str(i)
panel_configuration['metrics'].append({
'metricId': metric['id'],
'aggregation': metric['aggregations']['time'] if 'aggregations' in metric else None,
'groupAggregation': metric['aggregations']['group'] if 'aggregations' in metric else None,
'propertyName': property_name + str(i)
})
#
# Convert scope to format used by Sysdig Monitor
#
if scope != None:
filter_expressions = scope.strip(' \t\n\r?!.').split(" and ")
filters = []
for filter_expression in filter_expressions:
values = filter_expression.strip(' \t\n\r?!.').split("=")
if len(values) != 2:
return [False, "invalid scope format"]
filters.append({
'metric': values[0].strip(' \t\n\r?!.'),
'op': '=',
'value': values[1].strip(' \t\n\r"?!.'),
'filters': None
})
if len(filters) > 0:
panel_configuration['filter'] = {
'filters': {
'logic': 'and',
'filters': filters
}
}
#
# Configure panel type
#
if panel_type == 'timeSeries':
panel_configuration['showAs'] = 'timeSeries'
panel_configuration['showAsType'] = 'line'
if limit != None:
panel_configuration['paging'] = {
'from': 0,
'to': limit - 1
}
elif panel_type == 'number':
panel_configuration['showAs'] = 'summary'
panel_configuration['showAsType'] = 'summary'
elif panel_type == 'top':
panel_configuration['showAs'] = 'top'
panel_configuration['showAsType'] = 'bars'
if sort_by is None:
panel_configuration['sorting'] = [{
'id': 'v0',
'mode': 'desc'
}]
else:
panel_configuration['sorting'] = [{
'id': property_names[sort_by['metric']],
'mode': sort_by['mode']
}]
if limit is None:
panel_configuration['paging'] = {
'from': 0,
'to': 10
}
else:
panel_configuration['paging'] = {
'from': 0,
'to': limit - 1
}
#
# Configure layout
#
if layout != None:
panel_configuration['gridConfiguration'] = layout
#
# Clone existing dashboard...
#
dashboard_configuration = copy.deepcopy(dashboard)
dashboard_configuration['id'] = None
#
# ... and add the new panel
#
dashboard_configuration['items'].append(panel_configuration)
#
# Update dashboard
#
res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}),
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
return [True, res.json()]
def remove_dashboard_panel(self, dashboard, panel_name):
'''**Description**
Removes a panel from the dashboard. The panel to remove is identified by the specified ``name``.
**Arguments**
- **name**: name of the panel to find and remove
**Success Return Value**
A dictionary showing the details of the edited dashboard.
**Example**
`examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
'''
#
# Clone existing dashboard...
#
dashboard_configuration = copy.deepcopy(dashboard)
dashboard_configuration['id'] = None
#
# ... find the panel
#
def filter_fn(panel):
return panel['name'] == panel_name
panels = list(filter(filter_fn, dashboard_configuration['items']))
if len(panels) > 0:
#
# ... and remove it
#
for panel in panels:
dashboard_configuration['items'].remove(panel)
#
# Update dashboard
#
res = requests.put(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, data=json.dumps({'dashboard': dashboard_configuration}),
verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
| |
[1] Nucci et al., The problem of synthetically generating IP traffic
matrices: initial recommendations, ACM SIGCOMM Computer Communication
Review, 35(3), 2005
"""
try:
mean = float(mean)
stddev = float(stddev)
except ValueError:
raise ValueError('mean and stddev must be of type float')
if mean < 0 or stddev < 0:
raise ValueError('mean and stddev must be not negative')
topology = topology.copy() if topology.is_directed() \
else topology.to_directed()
volume_unit = topology.graph['capacity_unit']
mu = log(mean ** 2 / sqrt(stddev ** 2 + mean ** 2))
sigma = sqrt(log((stddev ** 2 / mean ** 2) + 1))
if origin_nodes is None and destination_nodes is None:
od_pairs = od_pairs_from_topology(topology)
else:
all_nodes = topology.nodes()
origins = origin_nodes or all_nodes
destinations = destination_nodes or all_nodes
od_pairs = [(o, d) for o in origins for d in destinations if o != d]
nr_pairs = len(od_pairs)
volumes = sorted(lognormal(mu, sigma, size=nr_pairs))
# volumes = sorted([lognormvariate(mu, sigma) for _ in range(nr_pairs)])
if any(isinf(vol) for vol in volumes):
raise ValueError('Some volumes are too large to be handled by a '\
'float type. Set a lower value of mu and try again.')
sorted_od_pairs = __ranking_metrics_heuristic(topology, od_pairs)
# check if the matrix matches and scale if needed
assignments = dict(zip(sorted_od_pairs, volumes))
if max_u is not None:
if origin_nodes is not None:
shortest_path = dict(
(node, nx.single_source_dijkstra_path(topology,
node,
weight='weight'))
for node in origin_nodes)
# remove OD pairs not connected
for o, d in itertools.product(shortest_path, destinations):
if o != d and d not in shortest_path[o]:
od_pairs.remove((o, d))
else:
shortest_path = dict(nx.all_pairs_dijkstra_path(topology,
weight='weight'))
for u, v in topology.edges():
topology.adj[u][v]['load'] = 0.0
# Find max u
for o, d in od_pairs:
path = shortest_path[o][d]
if len(path) > 1:
for u, v in zip(path[:-1], path[1:]):
topology.adj[u][v]['load'] += assignments[(o, d)]
# Calculate scaling
current_max_u = max((float(topology.adj[u][v]['load']) \
/ float(topology.adj[u][v]['capacity'])
for u, v in topology.edges()))
norm_factor = max_u / current_max_u
for od_pair in assignments:
assignments[od_pair] *= norm_factor
# write to traffic matrix
traffic_matrix = TrafficMatrix(volume_unit=volume_unit)
for (o, d), flow in assignments.items():
traffic_matrix.add_flow(o, d, flow)
return traffic_matrix
def stationary_traffic_matrix(topology, mean, stddev, gamma, log_psi, n,
max_u=0.9,
origin_nodes=None, destination_nodes=None):
"""
Return a stationary sequence of traffic matrices.
The sequence is generated by first generating a single matrix assigning
traffic volumes drawn from a lognormal distribution and assigned to
specific origin-destination pairs using the Ranking Metrics Heuristic
method proposed by Nucci et al. [2]_. Then, all matrices of the sequence
are generated by adding zero-mean normal fluctuation in the traffic
volumes. This process was originally proposed by [2]_
Stationary sequences of traffic matrices are generally suitable for
modeling network traffic over short periods (up to 1.5 hours). Over longer
periods, real traffic exhibits diurnal patterns and they are better
modelled by cyclostationary sequences
Parameters
----------
topology : topology
The topology for which the traffic matrix is calculated. This topology
can either be directed or undirected. If it is undirected, this
function assumes that all links are full-duplex.
mean : float
The mean volume of traffic among all origin-destination pairs
stddev : float
The standard deviation of volumes among all origin-destination pairs.
gamma : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
log_psi : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
n : int
Number of matrices in the sequence
max_u : float, optional
Represent the max link utilization. If specified, traffic volumes are
scaled so that the most utilized link of the network has an utilization
equal to max_u. If None, volumes are not scaled, but in this case links
may end up with an utilization factor greater than 1.0
origin_nodes : list, optional
A list of all nodes which can be traffic sources. If not specified
all nodes of the topology are traffic sources
destination_nodes : list, optional
A list of all nodes which can be traffic destinations. If not specified
all nodes of the topology are traffic destinations
Returns
-------
tms : TrafficMatrixSequence
References
----------
.. [2] Nucci et al., The problem of synthetically generating IP traffic
matrices: initial recommendations, ACM SIGCOMM Computer Communication
Review, 35(3), 2005
"""
tm_sequence = TrafficMatrixSequence()
static_tm = static_traffic_matrix(topology, mean, stddev, max_u=None,
origin_nodes=origin_nodes,
destination_nodes=destination_nodes)
volume_unit = static_tm.attrib['volume_unit']
mean_dict = static_tm.flows()
psi = exp(log_psi)
if psi == 0.0:
raise ValueError("The value of log_psi provided is too small and "
"causes psi=0.0, which makes the standard deviation "
"of random fluctuation to become infinite. Try with "
"a greater value of log_psi")
std_dict = {(o, d): (m / psi) ** (1.0 / gamma)
for (o, d), m in mean_dict.items()}
if any(isinf(std) for std in std_dict.values()):
raise ValueError("The value of log_psi or gamma provided are too "
"small and causes the standard deviation of random "
"fluctuations to become infinite. Try with a greater "
"value of log_psi and/or gamma")
flows = {}
for o, d in mean_dict:
# Implementation without Numpy:
# flows[(o, d)] = [max([0, normalvariate(mean_dict[(o, d)],
# std_dict[(o, d)])]) for _ in range(n)]
flows[(o, d)] = [max((0, normal(mean_dict[(o, d)], std_dict[(o, d)])))\
for _ in range(n)]
for i in range(n):
traffic_marix = TrafficMatrix(volume_unit=volume_unit)
for o, d in mean_dict:
traffic_marix.add_flow(o, d, flows[(o, d)][i])
tm_sequence.append(traffic_marix)
if max_u is not None:
if origin_nodes is not None:
shortest_path = dict(
(node, nx.single_source_dijkstra_path(topology,
node,
weight='weight'))
for node in origin_nodes)
else:
shortest_path = dict(nx.all_pairs_dijkstra_path(topology,
weight='weight'))
current_max_u = max((max(link_loads(topology,
tm_sequence.get(i),
shortest_path
).values())
for i in range(n)))
norm_factor = max_u / current_max_u
for i in range(n):
for o, d in mean_dict:
tm_sequence.matrix[i].flow[o][d] *= norm_factor
return tm_sequence
def sin_cyclostationary_traffic_matrix(topology, mean, stddev, gamma, log_psi,
delta=0.2, n=24, periods=1, max_u=0.9,
origin_nodes=None,
destination_nodes=None):
"""
Return a cyclostationary sequence of traffic matrices, where traffic
volumes evolve over time as sin waves.
The sequence is generated by first generating a single matrix assigning
traffic volumes drawn from a lognormal distribution and assigned to
specific origin-destination pairs using the Ranking Metrics Heuristic
method proposed by Nucci et al. [3]_. Then, all matrices of the sequence
are generated by adding zero-mean normal fluctuation in the traffic
volumes. Finally, traffic volumes are multiplied by a sin function with
unitary mean to model periodic fluctuations.
This process was originally proposed by [3]_.
Cyclostationary sequences of traffic matrices are generally suitable for
modeling real network traffic over long periods, up to several days. In
fact, real traffic exhibits diurnal patterns well modelled by
cyclostationary sequences.
Parameters
----------
topology : topology
The topology for which the traffic matrix is calculated. This topology
can either be directed or undirected. If it is undirected, this
function assumes that all links are full-duplex.
mean : float
The mean volume of traffic among all origin-destination pairs
stddev : float
The standard deviation of volumes among all origin-destination pairs.
gamma : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
log_psi : float
Parameter expressing relation between mean and standard deviation of
traffic volumes of a specific flow over the time
delta : float [0, 1]
A parameter indicating the intensity of variation of traffic volumes
over a period. Specifically, let x be the mean volume over a specific
OD pair, the minimum and maximum traffic volumes for that OD pair
(excluding random fluctuations) are respectively :math:`x*(1 - delta)`
and :math:`x*(1 + delta)`
n : int
Number of traffic matrices per period. For example, if it is desired to
model traffic varying cyclically over a 24 hour period, and n is set to
24, therefore, the time interval between subsequent traffic matrices is
is 1 hour.
periods : int
Number of periods. In total the sequence is composed of
:math:`n * periods` traffic matrices.
max_u : float, optional
Represent the max link utilization. If specified, traffic volumes are
scaled so that the most utilized link of the network has an utilization
equal to max_u. If | |
#!/usr/bin/env python
# encoding: utf-8
"""
turbine.py
Created by <NAME> and <NAME> on 2014-01-13.
Copyright (c) NREL. All rights reserved.
"""
from openmdao.main.api import Assembly, Component
from openmdao.main.datatypes.api import Float, Array, Enum, Bool, Int
from openmdao.lib.drivers.api import FixedPointIterator
import numpy as np
#from rotorse.rotor import RotorSE
#from towerse.tower import TowerSE
#from commonse.rna import RNAMass, RotorLoads
from drivewpact.drive import DriveWPACT
from drivewpact.hub import HubWPACT
from commonse.csystem import DirectionVector
from commonse.utilities import interp_with_deriv, hstack, vstack
from drivese.drive import Drive4pt, Drive3pt
from drivese.drivese_utils import blade_moment_transform, blade_force_transform
from drivese.hub import HubSE, Hub_System_Adder_drive
from SEAMLoads.SEAMLoads import SEAMLoads
from SEAMTower.SEAMTower import SEAMTower
from SEAMAero.SEAM_AEP import SEAM_PowerCurve
from SEAMRotor.SEAMRotor import SEAMBladeStructure
# from SEAMGeometry.SEAMGeometry import SEAMGeometry
def connect_io(top, cls):
cls_name = cls.name
for name in cls.list_inputs():
try:
top.connect(name, cls_name + '.%s' % name)
except:
# print 'failed connecting', cls_name, name
pass
for name in cls.list_outputs():
try:
top.connect(cls_name + '.%s' % name, name)
except:
pass
def configure_turbine(assembly, with_new_nacelle=True, flexible_blade=False, with_3pt_drive=False):
"""a stand-alone configure method to allow for flatter assemblies
Parameters
----------
assembly : Assembly
an openmdao assembly to be configured
with_new_nacelle : bool
False uses the default implementation, True uses an experimental implementation designed
to smooth out discontinities making in amenable for gradient-based optimization
flexible_blade : bool
if True, internally solves the coupled aero/structural deflection using fixed point iteration.
Note that the coupling is currently only in the flapwise deflection, and is primarily
only important for highly flexible blades. If False, the aero loads are passed
to the structure but there is no further iteration.
"""
#SEAM variables ----------------------------------
#d2e = Float(0.73, iotype='in', desc='Dollars to Euro ratio'
assembly.add('rated_power',Float(3000., iotype='in', units='kW', desc='Turbine rated power', group='Global'))
assembly.add('hub_height', Float(100., iotype='in', units='m', desc='Hub height', group='Global'))
assembly.add('rotor_diameter', Float(110., iotype='in', units='m', desc='Rotor diameter', group='Global'))
# assembly.add('site_type',Enum('onshore', values=('onshore', 'offshore'), iotype='in', desc='Site type', group='Global'))
assembly.add('tower_bottom_diameter', Float(4., iotype='in', desc='Tower bottom diameter', group='Global'))
assembly.add('tower_top_diameter', Float(2., iotype='in', desc='Tower top diameter', group='Global'))
assembly.add('project_lifetime', Float(iotype = 'in', desc='Operating years', group='Global'))
assembly.add('rho_steel', Float(7.8e3, iotype='in', desc='density of steel', group='Tower'))
assembly.add('lifetime_cycles', Float(1.e7, iotype='in', desc='Equivalent lifetime cycles', group='Rotor'))
assembly.add('stress_limit_extreme_tower', Float(iotype='in', units='MPa', desc='Tower ultimate strength', group='Tower'))
assembly.add('stress_limit_fatigue_tower', Float(iotype='in', units='MPa', desc='Tower fatigue strength', group='Tower'))
assembly.add('safety_factor_tower', Float(iotype='in', desc='Tower loads safety factor', group='Tower'))
assembly.add('PMtarget_tower', Float(1., iotype='in', desc='', group='Tower'))
assembly.add('wohler_exponent_tower', Float(4., iotype='in', desc='Tower fatigue Wohler exponent', group='Tower'))
assembly.add('tower_z', Array(iotype='out', desc='Tower discretization'))
assembly.add('tower_wall_thickness', Array(iotype='out', units='m', desc='Tower wall thickness'))
assembly.add('tower_mass', Float(iotype='out', units='kg', desc='Tower mass'))
assembly.add('tsr', Float(iotype='in', units='m', desc='Design tip speed ratio', group='Aero'))
assembly.add('F', Float(iotype='in', desc='Rotor power loss factor', group='Aero'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Wohler Exponent blade flap', group='Rotor'))
assembly.add('nSigma4fatFlap', Float(iotype='in', desc='', group='Loads'))
assembly.add('nSigma4fatTower', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_flap', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_tower', Float(iotype='in', desc='', group='Loads'))
assembly.add('blade_edge_dynload_factor_ext', Float(iotype='in', desc='Extreme dynamic edgewise loads factor', group='Loads'))
assembly.add('blade_edge_dynload_factor_fat', Float(iotype='in', desc='Fatigue dynamic edgewise loads factor', group='Loads'))
assembly.add('PMtarget_blades', Float(1., iotype='in', desc='', group='Rotor'))
assembly.add('max_tipspeed', Float(iotype='in', desc='Maximum tip speed', group='Aero'))
assembly.add('n_wsp', Int(iotype='in', desc='Number of wind speed bins', group='Aero'))
assembly.add('min_wsp', Float(0.0, iotype = 'in', units = 'm/s', desc = 'min wind speed', group='Aero'))
assembly.add('max_wsp', Float(iotype = 'in', units = 'm/s', desc = 'max wind speed', group='Aero'))
assembly.add('turbulence_int', Float(iotype='in', desc='Reference turbulence intensity', group='Plant_AEP'))
# assembly.add('WeibullInput', Bool(True, iotype='in', desc='Flag for Weibull input', group='AEP'))
assembly.add('weibull_C', Float(iotype = 'in', units='m/s', desc = 'Weibull scale factor', group='AEP'))
assembly.add('weibull_k', Float(iotype = 'in', desc='Weibull shape or form factor', group='AEP'))
assembly.add('blade_sections', Int(iotype='in', desc='number of sections along blade', group='Rotor'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Blade flap fatigue Wohler exponent', group='Rotor'))
assembly.add('MaxChordrR', Float(iotype='in', units='m', desc='Spanwise position of maximum chord', group='Rotor'))
assembly.add('tif_blade_root_flap_ext', Float(1., iotype='in', desc='Technology improvement factor flap extreme', group='Rotor'))
assembly.add('tif_blade_root_edge_ext', Float(1., iotype='in', desc='Technology improvement factor edge extreme', group='Rotor'))
assembly.add('tif_blade_root_flap_fat', Float(1., iotype='in', desc='Technology improvement factor flap LEQ', group='Rotor'))
assembly.add('sc_frac_flap', Float(iotype='in', desc='spar cap fraction of chord', group='Rotor'))
assembly.add('sc_frac_edge', Float(iotype='in', desc='spar cap fraction of thickness', group='Rotor'))
assembly.add('safety_factor_blade', Float(iotype='in', desc='Blade loads safety factor', group='Rotor'))
assembly.add('stress_limit_extreme_blade', Float(iotype='in', units='MPa', desc='Blade ultimate strength', group='Rotor'))
assembly.add('stress_limit_fatigue_blade', Float(iotype='in', units='MPa', desc='Blade fatigue strength', group='Rotor'))
assembly.add('AddWeightFactorBlade', Float(iotype='in', desc='Additional weight factor for blade shell', group='Rotor'))
assembly.add('blade_material_density', Float(iotype='in', units='kg/m**3', desc='Average density of blade materials', group='Rotor'))
assembly.add('blade_mass', Float(iotype = 'out', units = 'kg', desc = 'Blade mass'))
# assembly.add('mean_wsp', Float(iotype = 'in', units = 'm/s', desc = 'mean wind speed', group='Aero')) # [m/s]
assembly.add('air_density', Float(iotype = 'in', units = 'kg/m**3', desc = 'density of air', group='Plant_AEP')) # [kg / m^3]
assembly.add('max_Cp', Float(iotype = 'in', desc = 'max CP', group='Aero'))
assembly.add('gearloss_const', Float(iotype = 'in', desc = 'Gear loss constant', group='Drivetrain'))
assembly.add('gearloss_var', Float(iotype = 'in', desc = 'Gear loss variable', group='Drivetrain'))
assembly.add('genloss', Float(iotype = 'in', desc = 'Generator loss', group='Drivetrain'))
assembly.add('convloss', Float(iotype = 'in', desc = 'Converter loss', group='Drivetrain'))
# Outputs
assembly.add('rated_wind_speed', Float(units = 'm / s', iotype='out', desc='wind speed for rated power'))
assembly.add('ideal_power_curve', Array(iotype='out', units='kW', desc='total power before losses and turbulence'))
assembly.add('power_curve', Array(iotype='out', units='kW', desc='total power including losses and turbulence'))
assembly.add('wind_curve', Array(iotype='out', units='m/s', desc='wind curve associated with power curve'))
assembly.add('aep', Float(iotype = 'out', units='mW*h', desc='Annual energy production in mWh'))
assembly.add('total_aep', Float(iotype = 'out', units='mW*h', desc='AEP for total years of production'))
# END SEAM Variables ----------------------
# Add SEAM components and connections
assembly.add('loads', SEAMLoads())
assembly.add('tower_design', SEAMTower(21))
assembly.add('blade_design', SEAMBladeStructure())
assembly.add('aep_calc', SEAM_PowerCurve())
assembly.driver.workflow.add(['loads', 'tower_design', 'blade_design', 'aep_calc'])
assembly.connect('loads.tower_bottom_moment_max', 'tower_design.tower_bottom_moment_max')
assembly.connect('loads.tower_bottom_moment_leq', 'tower_design.tower_bottom_moment_leq')
assembly.connect('loads.blade_root_flap_max', 'blade_design.blade_root_flap_max')
assembly.connect('loads.blade_root_edge_max', 'blade_design.blade_root_edge_max')
assembly.connect('loads.blade_root_flap_leq', 'blade_design.blade_root_flap_leq')
assembly.connect('loads.blade_root_edge_leq', 'blade_design.blade_root_edge_leq')
connect_io(assembly, assembly.aep_calc)
connect_io(assembly, assembly.loads)
connect_io(assembly, assembly.tower_design)
connect_io(assembly, assembly.blade_design)
# End SEAM add components and connections -------------
if with_new_nacelle:
assembly.add('hub',HubSE())
assembly.add('hubSystem',Hub_System_Adder_drive())
if with_3pt_drive:
assembly.add('nacelle', Drive3pt())
else:
assembly.add('nacelle', Drive4pt())
else:
assembly.add('nacelle', DriveWPACT())
assembly.add('hub', HubWPACT())
assembly.driver.workflow.add(['hub', 'nacelle'])
if with_new_nacelle:
assembly.driver.workflow.add(['hubSystem'])
# connections to hub and hub system
assembly.connect('blade_design.blade_mass', 'hub.blade_mass')
assembly.connect('loads.blade_root_flap_max', 'hub.rotor_bending_moment')
assembly.connect('rotor_diameter', ['hub.rotor_diameter'])
assembly.connect('blade_design.blade_root_diameter', 'hub.blade_root_diameter')
assembly.add('blade_number',Int(3,iotype='in',desc='number of blades', group='Aero'))
assembly.connect('blade_number', 'hub.blade_number')
if with_new_nacelle:
assembly.connect('rated_power','hub.machine_rating')
assembly.connect('rotor_diameter', ['hubSystem.rotor_diameter'])
assembly.connect('nacelle.MB1_location','hubSystem.MB1_location') # TODO: bearing locations
assembly.connect('nacelle.L_rb','hubSystem.L_rb')
assembly.add('rotor_tilt', Float(5.0, iotype='in', desc='rotor tilt', group='Rotor'))
assembly.connect('rotor_tilt','hubSystem.shaft_angle')
assembly.connect('hub.hub_diameter','hubSystem.hub_diameter')
assembly.connect('hub.hub_thickness','hubSystem.hub_thickness')
assembly.connect('hub.hub_mass','hubSystem.hub_mass')
assembly.connect('hub.spinner_mass','hubSystem.spinner_mass')
assembly.connect('hub.pitch_system_mass','hubSystem.pitch_system_mass')
# connections to nacelle #TODO: fatigue option variables
assembly.connect('rotor_diameter', 'nacelle.rotor_diameter')
assembly.connect('1.5 * aep_calc.rated_torque', 'nacelle.rotor_torque')
assembly.connect('loads.max_thrust', 'nacelle.rotor_thrust')
assembly.connect('aep_calc.rated_speed', 'nacelle.rotor_speed')
assembly.connect('rated_power', 'nacelle.machine_rating')
assembly.add('generator_speed',Float(1173.7,iotype='in',units='rpm',desc='speed of generator', group='Drivetrain')) # - should be in nacelle
assembly.connect('generator_speed/aep_calc.rated_speed', 'nacelle.gear_ratio')
assembly.connect('tower_top_diameter', 'nacelle.tower_top_diameter')
assembly.connect('blade_number * blade_design.blade_mass + hub.hub_system_mass', 'nacelle.rotor_mass') # assuming not already in rotor force / moments
# variable connections for new nacelle
if with_new_nacelle:
assembly.connect('blade_number','nacelle.blade_number')
assembly.connect('rotor_tilt','nacelle.shaft_angle')
assembly.connect('333.3 * rated_power / 1000.0','nacelle.shrink_disc_mass')
assembly.connect('blade_design.blade_root_diameter','nacelle.blade_root_diameter')
#moments - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Mxyz_0','moments.b1')
#assembly.connect('rotor.Mxyz_120','moments.b2')
#assembly.connect('rotor.Mxyz_240','moments.b3')
#assembly.connect('rotor.Pitch','moments.pitch_angle')
#assembly.connect('rotor.TotalCone','moments.cone_angle')
assembly.connect('1.5 * aep_calc.rated_torque','nacelle.rotor_bending_moment_x') #accounted for in ratedConditions.Q
#assembly.connect('moments.My','nacelle.rotor_bending_moment_y')
#assembly.connect('moments.Mz','nacelle.rotor_bending_moment_z')
#forces - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Fxyz_0','forces.b1')
#assembly.connect('rotor.Fxyz_120','forces.b2')
#assembly.connect('rotor.Fxyz_240','forces.b3')
#assembly.connect('rotor.Pitch','forces.pitch_angle')
#assembly.connect('rotor.TotalCone','forces.cone_angle')
assembly.connect('loads.max_thrust','nacelle.rotor_force_x')
#assembly.connect('forces.Fy','nacelle.rotor_force_y')
#assembly.connect('forces.Fz','nacelle.rotor_force_z')
class Turbine_SE_SEAM(Assembly):
def configure(self):
configure_turbine(self)
if __name__ == '__main__':
turbine = Turbine_SE_SEAM()
#=========== SEAM inputs
turbine.AddWeightFactorBlade = 1.2
turbine.blade_material_density = 2100.0
turbine.tower_bottom_diameter = 6.
turbine.tower_top_diameter = 3.78
turbine.blade_edge_dynload_factor_ext = 2.5
turbine.blade_edge_dynload_factor_fat = 0.75
turbine.F = 0.777
turbine.MaxChordrR = 0.2
turbine.project_lifetime = 20.0
turbine.lifetime_cycles = 10000000.0
turbine.blade_sections = 21
turbine.PMtarget_tower = 1.0
turbine.PMtarget_blades = 1.0
turbine.safety_factor_blade = 1.1
turbine.safety_factor_tower = 1.5
turbine.stress_limit_extreme_tower = 235.0
turbine.stress_limit_fatigue_tower = 14.885
turbine.stress_limit_extreme_blade = 200.0
turbine.stress_limit_fatigue_blade = 27.0
turbine.tif_blade_root_flap_ext = 1.0
turbine.tif_blade_root_flap_fat = 1.0
turbine.tif_blade_root_edge_ext = 1.0
turbine.weibull_C = 11.0
turbine.weibull_k = 2.0
turbine.wohler_exponent_blade_flap = 10.0
turbine.wohler_exponent_tower = 4.0
turbine.dLoad_dU_factor_flap = 0.9
turbine.dLoad_dU_factor_tower = 0.8
turbine.hub_height = 90.0
turbine.max_tipspeed = 80.0
turbine.n_wsp = 26
turbine.min_wsp = 0.0
turbine.max_wsp = 25.0
turbine.nSigma4fatFlap = 1.2
turbine.nSigma4fatTower = 0.8
turbine.rated_power = 5000.0
turbine.rho_steel = 7800.0
turbine.rotor_diameter = 126.0
turbine.sc_frac_edge = 0.8
turbine.sc_frac_flap = 0.3
turbine.tsr = 8.0
turbine.air_density = 1.225
turbine.turbulence_int = 0.16
turbine.max_Cp = 0.49
turbine.gearloss_const = 0.01 # Fraction
turbine.gearloss_var = 0.014 # Fraction
turbine.genloss = 0.03 # Fraction
turbine.convloss = 0.03 # Fraction
#==============
# === nacelle ======
turbine.blade_number = 3 # turbine level that must be added for SEAM
turbine.rotor_tilt = 5.0 # turbine level that must be added for SEAM
turbine.generator_speed = 1173.7
turbine.nacelle.L_ms = 1.0 # (Float, m): main shaft length downwind of main bearing in low-speed shaft
turbine.nacelle.L_mb = 2.5 # (Float, m): main shaft length in low-speed shaft
turbine.nacelle.h0_front = 1.7 # (Float, m): height of Ibeam in bedplate front
turbine.nacelle.h0_rear = 1.35 # (Float, m): height of Ibeam in bedplate rear
turbine.nacelle.drivetrain_design = 'geared'
turbine.nacelle.crane = True # (Bool): flag for presence of crane
turbine.nacelle.bevel = 0 # (Int): Flag for the presence of a bevel stage - 1 if present, 0 if not
turbine.nacelle.gear_configuration = 'eep' # (Str): tring that represents the configuration of the gearbox (stage number and types)
turbine.nacelle.Np = [3, 3, 1] # (Array): number of planets in each stage
turbine.nacelle.ratio_type = 'optimal' # | |
<filename>archive/pose_graph.py
#!/usr/bin/env python
"""
Data Collection Module for recording data streamed from a phone to construct a pose graph of phone and landmarks positions
at multiple time steps.
by <NAME>, 2018
Last Modified August, 2018
This script will:
- Communicate with Frames class defined in frames.py for broadcasting necessary frames in the tf tree.
- Record data streamed from a phone to construct a pose graph as defined in pose_graph.py
- Record data to a new data file
- Record positions of phone at different time steps as vertices in pose graph.
- Record positions of each april tag seen as vertices in pose graph.
- Record positions of each waypoints as vertices in pose graph.
- Record transformations between consecutive phone positions at two consecutive time stamp as edges.
- Record transformations between phone position and a tag position each time a tag is seen as edges.
- Record transformations between phone position and a waypoint position each time a waypoint is seen as edges.
- Store the data as a pickle file in data/raw_data folder. The default file is named as data_collection.pkl and a copy
of the file is user named.
"""
from collections import OrderedDict
from scipy.spatial.transform import Rotation as R
from os import path, system
import numpy as np
from rospkg import RosPack
from collections import deque
class Vertex(object):
def __init__(self, id, trans, rot, type, fix_status=False):
self.id = id
self.type = type
self.translation = trans
self.rotation = rot
self.fix_status = fix_status
def write_to_g2o(self, datatype="VERTEX_SE3:QUAT "):
"""
Write to g2o for recorded vertices
"""
content = datatype + \
"%i %f %f %f %f %f %f %f\n" % tuple(
[self.id] + self.translation + self.rotation)
if self.fix_status:
return content + "FIX %i\n" % self.id
else:
return content
class Edge(object):
def __init__(self, v_start, v_end, trans, rot, damping_status=False):
self.start = v_start
self.end = v_end
self.translation = trans
self.rotation = rot
self.damping_status = damping_status
self.translation_computed = None
self.rotation_computed = None
self.translation_diff = None
self.rotation_diff = None # in euler angles
self.optimization_cost = None
#### importance ####
self.importance_matrix = None
self.eigenvalue_offset = 10 ** -3
self.odometry_importance = 1
self.tag_importance = 100
self.waypoint_importance = 100
self.yaw_importance = 0.001 # dummy node
self.pitch_importance = 1000
self.roll_importance = 1000
self.eigenvalue_PSD = False
@staticmethod
def null(matrix, rtol=1e-5):
u, s, v = np.linalg.svd(matrix)
rank = (s > rtol * s[0]).sum()
return rank, v[rank:].T.copy()
@staticmethod
def compute_basis_vector(rot, yaw, pitch, roll):
# Generate a rotation matrix to rotate a small amount around the z axis
q2 = R.from_euler('z', 0.05)
# Rotate current pose by 0.05 degrees in yaw
qsecondrotation = R.from_quat(rot) * q2
# Get difference in rotated pose with current pose.
change = (qsecondrotation.as_quat()[0:3] - rot[0:3])
# Determine which direction is the yaw direction and then make sure that direction is diminished in the information matrix
change = change / np.linalg.norm(change)
v = change / np.linalg.norm(change)
_, u = Edge.null(v[np.newaxis])
basis = np.hstack((v[np.newaxis].T, u))
# place high information content on pitch and roll and low on changes in yaw
I = basis.dot(np.diag([yaw, pitch, roll])).dot(basis.T)
return I
@staticmethod
def convert_uppertri_to_matrix(uppertri, size):
"""
Convert a matrix in uppertriangular form to full matrix form.
"""
tri = np.zeros((size, size))
tri[np.triu_indices(size, 0)] = uppertri
tri_updated = tri + np.tril(tri.T, -1)
return tri_updated
@staticmethod
def convert_matrix_uppertri_list(matrix, size):
return list(matrix[np.triu_indices(size)])
def compute_importance_matrix(self):
if self.damping_status: # if the edge is for damping correction
I = Edge.compute_basis_vector(self.end.rotation, self.yaw_importance, self.pitch_importance,
self.roll_importance)
# get indices of upper triangular entry of a 3x3 matrix
indeces = np.triu_indices(3)
importance = [0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0] + list(I[indeces])
# increase eigenvalue of rotation importance
for ind in np.cumsum([0] + range(6, 1, -1))[3:6]:
importance[ind] += self.eigenvalue_offset
self.importance_matrix = Edge.convert_uppertri_to_matrix(
importance, 6)
elif self.end.type == "tag": # if the edge is between current position and a tag detected
w_t = self.tag_importance
importance = [w_t, 0, 0, 0, 0, 0, w_t, 0, 0,
0, 0, w_t, 0, 0, 0, w_t, 0, 0, w_t, 0, w_t]
self.importance_matrix = Edge.convert_uppertri_to_matrix(
importance, 6)
elif self.end.type == "waypoint": # if the edge is between current position and a waypoint
w_w = self.waypoint_importance
importance = [w_w, 0, 0, 0, 0, 0, w_w, 0, 0,
0, 0, w_w, 0, 0, 0, w_w, 0, 0, w_w, 0, w_w]
self.importance_matrix = Edge.convert_uppertri_to_matrix(
importance, 6)
else: # if the edge is between past pose to current pose
w_o = self.odometry_importance
importance = [w_o, 0, 0, 0, 0, 0, w_o, 0, 0,
0, 0, w_o, 0, 0, 0, w_o, 0, 0, w_o, 0, w_o]
self.importance_matrix = Edge.convert_uppertri_to_matrix(
importance, 6)
def check_importance_matrix_PSD(self):
value = np.linalg.eigvals(self.importance_matrix)
if min(value) < self.eigenvalue_offset and min(value) != 0:
print("Found an unexpectedly low Eigenvalue", min(value))
return False
else:
return True
def write_to_g2o(self, datatype="EDGE_SE3:QUAT "):
"""
Write to g2o for recorded edges
"""
return datatype + "%i %i %f %f %f %f %f %f %f" % tuple(
[self.start.id, self.end.id] + self.translation + self.rotation)
def write_to_g2o_importance(self):
importance_uppertri = Edge.convert_matrix_uppertri_list(
self.importance_matrix, 6)
return "%f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n" % tuple(importance_uppertri)
def compute_optimization_cost(self):
transformation = np.array(self.translation_diff + self.rotation_diff)
self.optimization_cost = np.matmul(
np.matmul(transformation, self.importance_matrix), transformation.T)
class PoseGraph(object):
def __init__(self, num_tags=587):
#### tag and waypoints recording specific parameters ####
self.num_tags = num_tags
self.origin_tag = None # First tag seen
self.origin_tag_pose = None
self.supplement_tags = []
self.waypoint_start_id = None
self.waypoint_id_to_name = {}
self.waypoint_x_offset = 0.01
self.distance_traveled = [0, 0, 0]
#### vertices, edges ####
self.odometry_vertices = OrderedDict()
self.odometry_edges = OrderedDict()
self.tag_vertices = OrderedDict()
self.odometry_tag_edges = OrderedDict()
self.waypoints_vertices = OrderedDict()
self.odometry_waypoints_edges = OrderedDict()
#### graph search algorithm parameters ####
self.graph = {}
self.visited_nodes = deque([])
#### g2o parameters ####
self.optimization_cost = None
def add_odometry_vertices(self, id, trans, rot, fix_status):
self.odometry_vertices[id] = Vertex(
id, trans, rot, "odometry", fix_status)
return self.odometry_vertices[id]
def add_odometry_edges(self, v_start, v_end, trans, rot, damping_status):
if v_start.id not in self.odometry_edges.keys():
self.odometry_edges[v_start.id] = {}
self.odometry_edges[v_start.id][v_end.id] = Edge(
v_start, v_end, trans, rot, damping_status)
return self.odometry_edges[v_start.id][v_end.id]
def add_tag_vertices(self, id, trans, rot, transformed_pose):
if self.origin_tag is None:
self.origin_tag = id
self.origin_tag_pose = transformed_pose # make this tag the origin tag
self.tag_vertices[id] = Vertex(id, trans, rot, "tag", True)
print("AR_CALIBRATION: Origin Tag Found: " + str(id))
elif not (id == self.origin_tag or id in self.supplement_tags):
self.supplement_tags.append(id) # set new supplemental AR Tag
self.tag_vertices[id] = Vertex(id, trans, rot, "tag", False)
print("AR_CALIBRATION: Supplementary Tag Found: " + str(id))
print(self.supplement_tags)
elif id == self.origin_tag:
self.origin_tag_pose = transformed_pose # Reset the origin tag
print("AR_CALIBRATION: Origin Tag Refound: " + str(id))
else:
print("AR_CALIBRATION: Found Old Tag: " + str(id))
def add_odometry_tag_edges(self, v_odom, v_tag, trans, rot):
if v_tag.id not in self.odometry_tag_edges.keys():
self.odometry_tag_edges[v_tag.id] = {}
self.odometry_tag_edges[v_tag.id][v_odom.id] = Edge(
v_odom, v_tag, trans, rot)
return self.odometry_tag_edges[v_tag.id][v_odom.id]
def add_waypoint_vertices(self, id, curr_pose):
if id not in self.waypoints_vertices.keys():
self.waypoints_vertices[id] = Vertex(
id, list(curr_pose.translation), list(curr_pose.rotation), "waypoint")
print("AR_CALIBRATION: Waypoint Found: " + str(id))
print(self.waypoints_vertices.keys())
return self.waypoints_vertices[id]
else:
print("AR_CALIBRATION: Found Old Waypoint: " + str(id))
def map_waypoint_name_to_number(self):
self.waypoint_id_to_name = {}
self.waypoint_start_id = sorted(self.odometry_vertices.keys())[-1] + 1
waypoint_id = self.waypoint_start_id
for waypoint in self.waypoints_vertices.keys():
self.waypoint_id_to_name[waypoint_id] = self.waypoints_vertices[waypoint].id
self.waypoints_vertices[waypoint].id = waypoint_id
waypoint_id += 1
def translation_offset_to_waypoint_vertices(self):
self.waypoint_x_offset = 0.01
for waypoint in self.waypoints_vertices.keys():
new_trans = list(self.waypoints_vertices[waypoint].translation)
new_trans[0] += self.waypoint_x_offset
self.waypoints_vertices[waypoint].translation = list(
new_trans) # add 1cm offset o waypoint position
def add_odometry_waypoint_edges(self, v_odom, v_waypoints):
if v_waypoints not in self.odometry_waypoints_edges.keys():
self.odometry_waypoints_edges[v_waypoints.id] = {}
self.odometry_waypoints_edges[v_waypoints.id][v_odom.id] = Edge(v_odom, v_waypoints,
[self.waypoint_x_offset, 0, 0], [0, 0, 0, 1])
return self.odometry_waypoints_edges[v_waypoints.id][v_odom.id]
def add_damping(self, curr_pose):
"""
Add a vertex and edge for correcting damping
:param curr_pose: Vertex object for current pose
"""
damping_vertex = self.add_odometry_vertices(
curr_pose.id + 1, [0, 0, 0], curr_pose.rotation, True)
damping_edge = self.add_odometry_edges(curr_pose, damping_vertex, [
0, 0, 0], [0, 0, 0, 1], True)
# compute importance matrix
damping_edge.compute_importance_matrix()
if damping_edge.check_importance_matrix_PSD():
damping_edge.eigenvalue_PSD = True
def add_pose_to_pose(self, curr_pose, trans, rot, importance):
"""
Add an edge between vertices of current pose and last pose
:param curr_pose: Vertex object of current pose
:param trans: translation
:param rot: rotation
:param importance: Importance of this new edge
"""
pose_edge = self.add_odometry_edges(
self.odometry_vertices[curr_pose.id - 2], curr_pose, trans, rot, False)
pose_edge.odometry_importance = importance
# compute importance matrix
| |
"""Utilities to support packages."""
z functools zaimportuj singledispatch jako simplegeneric
zaimportuj importlib
zaimportuj importlib.util
zaimportuj importlib.machinery
zaimportuj os
zaimportuj os.path
zaimportuj sys
z types zaimportuj ModuleType
zaimportuj warnings
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules', 'get_data',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def _get_spec(finder, name):
"""Return the finder-specific module spec."""
# Works przy legacy finders.
spróbuj:
find_spec = finder.find_spec
wyjąwszy AttributeError:
loader = finder.find_module(name)
jeżeli loader jest Nic:
zwróć Nic
zwróć importlib.util.spec_from_loader(name, loader)
inaczej:
zwróć find_spec(name)
def read_code(stream):
# This helper jest needed w order dla the PEP 302 emulation to
# correctly handle compiled files
zaimportuj marshal
magic = stream.read(4)
jeżeli magic != importlib.util.MAGIC_NUMBER:
zwróć Nic
stream.read(8) # Skip timestamp oraz size
zwróć marshal.load(stream)
def walk_packages(path=Nic, prefix='', onerror=Nic):
"""Yields (module_loader, name, ispkg) dla all modules recursively
on path, or, jeżeli path jest Nic, all accessible modules.
'path' should be either Nic albo a list of paths to look for
modules in.
'prefix' jest a string to output on the front of every module name
on output.
Note that this function must zaimportuj all *packages* (NOT all
modules!) on the given path, w order to access the __path__
attribute to find submodules.
'onerror' jest a function which gets called przy one argument (the
name of the package which was being imported) jeżeli any exception
occurs dopóki trying to zaimportuj a package. If no onerror function jest
supplied, ImportErrors are caught oraz ignored, dopóki all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
jeżeli p w m:
zwróć Prawda
m[p] = Prawda
dla importer, name, ispkg w iter_modules(path, prefix):
uzyskaj importer, name, ispkg
jeżeli ispkg:
spróbuj:
__import__(name)
wyjąwszy ImportError:
jeżeli onerror jest nie Nic:
onerror(name)
wyjąwszy Exception:
jeżeli onerror jest nie Nic:
onerror(name)
inaczej:
podnieś
inaczej:
path = getattr(sys.modules[name], '__path__', Nic) albo []
# don't traverse path items we've seen before
path = [p dla p w path jeżeli nie seen(p)]
uzyskaj z walk_packages(path, name+'.', onerror)
def iter_modules(path=Nic, prefix=''):
"""Yields (module_loader, name, ispkg) dla all submodules on path,
or, jeżeli path jest Nic, all top-level modules on sys.path.
'path' should be either Nic albo a list of paths to look for
modules in.
'prefix' jest a string to output on the front of every module name
on output.
"""
jeżeli path jest Nic:
importers = iter_importers()
inaczej:
importers = map(get_importer, path)
uzyskajed = {}
dla i w importers:
dla name, ispkg w iter_importer_modules(i, prefix):
jeżeli name nie w uzyskajed:
uzyskajed[name] = 1
uzyskaj i, name, ispkg
@simplegeneric
def iter_importer_modules(importer, prefix=''):
jeżeli nie hasattr(importer, 'iter_modules'):
zwróć []
zwróć importer.iter_modules(prefix)
# Implement a file walker dla the normal importlib path hook
def _iter_file_finder_modules(importer, prefix=''):
jeżeli importer.path jest Nic albo nie os.path.isdir(importer.path):
zwróć
uzyskajed = {}
zaimportuj inspect
spróbuj:
filenames = os.listdir(importer.path)
wyjąwszy OSError:
# ignore unreadable directories like zaimportuj does
filenames = []
filenames.sort() # handle packages before same-named modules
dla fn w filenames:
modname = inspect.getmodulename(fn)
jeżeli modname=='__init__' albo modname w uzyskajed:
kontynuuj
path = os.path.join(importer.path, fn)
ispkg = Nieprawda
jeżeli nie modname oraz os.path.isdir(path) oraz '.' nie w fn:
modname = fn
spróbuj:
dircontents = os.listdir(path)
wyjąwszy OSError:
# ignore unreadable directories like zaimportuj does
dircontents = []
dla fn w dircontents:
subname = inspect.getmodulename(fn)
jeżeli subname=='__init__':
ispkg = Prawda
przerwij
inaczej:
continue # nie a package
jeżeli modname oraz '.' nie w modname:
uzyskajed[modname] = 1
uzyskaj prefix + modname, ispkg
iter_importer_modules.register(
importlib.machinery.FileFinder, _iter_file_finder_modules)
def _import_imp():
global imp
przy warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
imp = importlib.import_module('imp')
klasa ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" zaimportuj algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(Nic) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen albo built-in.
Note that ImpImporter does nie currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=Nic):
global imp
warnings.warn("This emulation jest deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.path = path
def find_module(self, fullname, path=Nic):
# Note: we ignore 'path' argument since it jest only used via meta_path
subname = fullname.split(".")[-1]
jeżeli subname != fullname oraz self.path jest Nic:
zwróć Nic
jeżeli self.path jest Nic:
path = Nic
inaczej:
path = [os.path.realpath(self.path)]
spróbuj:
file, filename, etc = imp.find_module(subname, path)
wyjąwszy ImportError:
zwróć Nic
zwróć ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
jeżeli self.path jest Nic albo nie os.path.isdir(self.path):
zwróć
uzyskajed = {}
zaimportuj inspect
spróbuj:
filenames = os.listdir(self.path)
wyjąwszy OSError:
# ignore unreadable directories like zaimportuj does
filenames = []
filenames.sort() # handle packages before same-named modules
dla fn w filenames:
modname = inspect.getmodulename(fn)
jeżeli modname=='__init__' albo modname w uzyskajed:
kontynuuj
path = os.path.join(self.path, fn)
ispkg = Nieprawda
jeżeli nie modname oraz os.path.isdir(path) oraz '.' nie w fn:
modname = fn
spróbuj:
dircontents = os.listdir(path)
wyjąwszy OSError:
# ignore unreadable directories like zaimportuj does
dircontents = []
dla fn w dircontents:
subname = inspect.getmodulename(fn)
jeżeli subname=='__init__':
ispkg = Prawda
przerwij
inaczej:
continue # nie a package
jeżeli modname oraz '.' nie w modname:
uzyskajed[modname] = 1
uzyskaj prefix + modname, ispkg
klasa ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" zaimportuj algorithm
"""
code = source = Nic
def __init__(self, fullname, file, filename, etc):
warnings.warn("This emulation jest deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
spróbuj:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
w_końcu:
jeżeli self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this jest just a wrapper dla standard zaimportuj machinery
zwróć mod
def get_data(self, pathname):
przy open(pathname, "rb") jako file:
zwróć file.read()
def _reopen(self):
jeżeli self.file oraz self.file.closed:
mod_type = self.etc[2]
jeżeli mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'r')
albo_inaczej mod_type w (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
jeżeli fullname jest Nic:
fullname = self.fullname
albo_inaczej fullname != self.fullname:
podnieś ImportError("Loader dla module %s cannot handle "
"module %s" % (self.fullname, fullname))
zwróć fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
zwróć self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=Nic):
fullname = self._fix_name(fullname)
jeżeli self.code jest Nic:
mod_type = self.etc[2]
jeżeli mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
albo_inaczej mod_type==imp.PY_COMPILED:
self._reopen()
spróbuj:
self.code = read_code(self.file)
w_końcu:
self.file.close()
albo_inaczej mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
zwróć self.code
def get_source(self, fullname=Nic):
fullname = self._fix_name(fullname)
jeżeli self.source jest Nic:
mod_type = self.etc[2]
jeżeli mod_type==imp.PY_SOURCE:
self._reopen()
spróbuj:
self.source = self.file.read()
w_końcu:
self.file.close()
albo_inaczej mod_type==imp.PY_COMPILED:
jeżeli os.path.exists(self.filename[:-1]):
przy open(self.filename[:-1], 'r') jako f:
self.source = f.read()
albo_inaczej mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
zwróć self.source
def _get_delegate(self):
finder = ImpImporter(self.filename)
spec = _get_spec(finder, '__init__')
zwróć spec.loader
def get_filename(self, fullname=Nic):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
jeżeli mod_type==imp.PKG_DIRECTORY:
zwróć self._get_delegate().get_filename()
albo_inaczej mod_type w (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
zwróć self.filename
zwróć Nic
spróbuj:
zaimportuj zipimport
z zipzaimportuj zaimportuj zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = sorted(zipimport._zip_directory_cache[importer.archive])
_prefix = importer.prefix
plen = len(_prefix)
uzyskajed = {}
zaimportuj inspect
dla fn w dirlist:
jeżeli nie fn.startswith(_prefix):
kontynuuj
fn = fn[plen:].split(os.sep)
jeżeli len(fn)==2 oraz fn[1].startswith('__init__.py'):
jeżeli fn[0] nie w uzyskajed:
uzyskajed[fn[0]] = 1
uzyskaj fn[0], Prawda
jeżeli len(fn)!=1:
kontynuuj
modname = inspect.getmodulename(fn[0])
jeżeli modname=='__init__':
kontynuuj
jeżeli modname oraz '.' nie w modname oraz modname nie w uzyskajed:
uzyskajed[modname] = 1
uzyskaj prefix + modname, Nieprawda
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
wyjąwszy ImportError:
dalej
def get_importer(path_item):
"""Retrieve a PEP 302 importer dla the given path item
The returned importer jest cached w sys.path_importer_cache
jeżeli it was newly created by a path hook.
The cache (or part of it) can be cleared manually jeżeli a
rescan of sys.path_hooks jest necessary.
"""
spróbuj:
importer = sys.path_importer_cache[path_item]
wyjąwszy KeyError:
dla path_hook w sys.path_hooks:
spróbuj:
importer = path_hook(path_item)
sys.path_importer_cache.setdefault(path_item, importer)
przerwij
wyjąwszy ImportError:
dalej
inaczej:
importer = Nic
zwróć importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers dla the given module name
If fullname contains a '.', the importers will be dla the package
containing fullname, otherwise they will be all registered top level
importers (i.e. those on both sys.meta_path oraz sys.path_hooks).
If the | |
object_bounding_boxes, placements
):
break
placement_trial_count += 1
if placement_trial_count > max_placement_trial_count_per_object:
return np.zeros((n_objects, len(placement))), False
placements.append(placement)
return np.array(placements), True
def place_objects_in_grid(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
random_state: np.random.RandomState,
max_num_trials: int = 5,
) -> Tuple[np.ndarray, bool]:
"""
Place objects within rectangular boundaries by dividing the placement area into a grid of cells
of equal size, and then randomly sampling cells for each object to be placed in.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects.
:param random_state: numpy random state to use to shuffle placement positions
:param max_num_trials: maximum number of trials to run (a trial will fail if there is overlap
detected between any two placements; generally this shouldn't happen with this algorithm)
:return: Tuple[np.ndarray, bool], where the array is of size (num_objects, 3) with columns set
to the x, y, z coordinates of objects relative to the world frame, and the boolean
indicates whether the placement is valid.
"""
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
table_pos, table_size, table_height = table_dimensions
def _get_global_placement(placement: np.ndarray):
return placement + [offset_x, offset_y, 0.0] - table_size + table_pos
# 1. Determine the number of rows and columns of the grid, based on the largest object width
# and height.
total_object_area = 0.0
n_objects = object_bounding_boxes.shape[0]
max_obj_height = 0.0
max_obj_width = 0.0
for i in range(n_objects):
# Bounding boxes are in half-sizes.
obj_width = object_bounding_boxes[i, 1, 0] * 2
obj_height = object_bounding_boxes[i, 1, 1] * 2
max_obj_height = max(max_obj_height, obj_height)
max_obj_width = max(max_obj_width, obj_width)
object_area = obj_width * obj_height
total_object_area += object_area
n_columns = int(width // max_obj_width)
n_rows = int(height // max_obj_height)
n_cells = n_columns * n_rows
cell_width = width / n_columns
cell_height = height / n_rows
if n_cells < n_objects:
# Cannot find a valid placement via this method; give up.
logging.warning(
f"Unable to fit {n_objects} objects into placement area with {n_cells} cells"
)
return np.zeros(shape=(n_objects, 3)), False
for trial_i in range(max_num_trials):
placement_valid = True
placements: List[Tuple[NumType, NumType, NumType]] = []
# 2. Initialize an array with all valid cell coordinates.
# Create an array of shape (n_rows, n_columns, 2) where each element contains the row,col
# coord
coords = np.dstack(np.mgrid[0:n_rows, 0:n_columns])
# Create a shuffled list where ever entry is a valid (row, column) coordinate.
coords = np.reshape(coords, (n_rows * n_columns, 2))
random_state.shuffle(coords)
coords = list(coords)
# 3. Place each object into a randomly selected cell.
for object_idx in range(n_objects):
row, col = coords.pop()
pos, size = object_bounding_boxes[object_idx]
prop_x = cell_width * col + size[0] - pos[0]
prop_y = cell_height * row + size[1] - pos[1]
# Reference is to (xmin, ymin, zmin) of table.
prop_z = object_bounding_boxes[object_idx, 1, -1] + 2 * table_size[-1]
prop_z -= object_bounding_boxes[object_idx, 0, -1]
placement = _get_global_placement(np.array([prop_x, prop_y, prop_z]))
b1_x, b1_y = placement[:2]
if not _is_valid_proposal(
b1_x, b1_y, object_idx, object_bounding_boxes, placements
):
placement_valid = False
logging.warning(f"Trial {trial_i} failed on object {object_idx}")
break
placements.append(placement)
if placement_valid:
assert (
len(placements) == n_objects
), "There should be a placement for every object"
break
return np.array(placements), placement_valid
def place_objects_with_no_constraint(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
max_placement_trial_count: int,
max_placement_trial_count_per_object: int,
random_state: np.random.RandomState,
) -> Tuple[np.ndarray, bool]:
"""
Place objects within rectangular boundaries without any extra constraint.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param max_placement_trial_count: To prevent infinite loop caused by target placements,
max_placement_trial_count should set to a finite positive number.
:param max_placement_trial_count_per_object: To prevent infinite loop caused by target
placements, max_placement_trial_count_per_object should set to a finite positive number.
:param random_state: numpy RandomState to use for sampling
:return: np.ndarray of size (num_objects, 3) where columns are x, y, z coordinates of objects
relative to the world frame and boolean indicating whether if the proposal is valid.
"""
def _get_placement_proposal(object_idx):
# randomly place the object within the bounds
pos, size = object_bounding_boxes[object_idx]
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
x, y = random_state.uniform(
low=(size[0], size[1]), high=(width - size[0], height - size[1])
)
x -= pos[0]
y -= pos[1]
return x, y
return _place_objects(
object_bounding_boxes,
table_dimensions,
placement_area,
_get_placement_proposal,
max_placement_trial_count,
max_placement_trial_count_per_object,
)
def place_targets_with_fixed_position(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
target_placements: np.ndarray,
):
"""
Place target object according to specified placement positions.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param target_placements: Placement positions (x, y) relative to the placement area. Normalized
to [0, 1]
:return: Global placement positions (x, y, z) for all objects.
"""
def _get_placement_proposal(object_idx):
width, height, _ = placement_area.size
return target_placements[object_idx] * [width, height]
return _place_objects(
object_bounding_boxes,
table_dimensions,
placement_area,
_get_placement_proposal,
max_placement_trial_count=1,
max_placement_trial_count_per_object=1,
run_collision_check=False,
)
def place_targets_with_goal_distance_ratio(
object_bounding_boxes: np.ndarray,
table_dimensions: Tuple[np.ndarray, np.ndarray, float],
placement_area: PlacementArea,
object_placements: np.ndarray,
goal_distance_ratio: float,
goal_distance_min: float,
max_placement_trial_count: int,
max_placement_trial_count_per_object: int,
random_state: np.random.RandomState,
) -> Tuple[np.ndarray, bool]:
"""
Place targets around objects with goal distance.
:param object_bounding_boxes: matrix of bounding boxes (num_objects, 2, 3) where [:, 0, :]
contains the center position of the bounding box in Cartesian space relative to the body's
frame of reference and where [:, 1, :] contains the half-width, half-height, and half-depth
of the object.
:param table_dimensions: Tuple (table_pos, table_size, table_height) defining dimension of
the table where
table_pos: position of the table.
table_size: half-size of the table along (x, y, z).
table_height: height of the table.
:param placement_area: the placement area in which to place objects
:param object_placements: placements of boxes - this is the result of place_objects
:param goal_distance_ratio: goal is uniformly sampled first and then distance beween the
object and the goal is shrinked. The shrinked distance is original distance times
goal_distance_ratio.
:param goal_distance_min: minimum goal distance to ensure that goal is not too close to the
object position.
:param max_placement_trial_count: To prevent infinite loop caused by target placements,
max_placement_trial_count should set to a finite positive number.
:param max_placement_trial_count_per_object: To prevent infinite loop caused by target
placements, max_placement_trial_count_per_object should set to a finite positive number.
:param random_state: numpy RandomState to use for sampling
:return: np.ndarray of size (num_objects, 3) where columns are x, y coordinates of objects
and boolean indicating whether if the proposal is valid.
"""
def _get_placement_proposal(object_idx):
# Sample goal position relative to table area
pos, size = object_bounding_boxes[object_idx]
offset_x, offset_y, _ = placement_area.offset
width, height, _ = placement_area.size
gx, gy = random_state.uniform(
low=(size[0], size[1]), high=(width - size[0], height - size[1])
)
# Retrieve object position relative to table area
table_pos, table_size, table_height = table_dimensions
object_place = (
object_placements[object_idx]
- [offset_x, offset_y, 0.0]
+ table_size
- table_pos
)
x = object_place[0] + pos[0]
y = object_place[1] + pos[1]
| |
#
# Blue Haze
# 19 Oct 2020
#
# <NAME> - <EMAIL>
# <NAME> - <EMAIL>
#
# todo: stop / terminate the hardware when the app closes
from PySide2.QtMultimedia import *
from PySide2.QtMultimediaWidgets import QCameraViewfinder
from PySide2.QtGui import QFont
from PySide2.QtCore import Qt, QDir
from glob import glob
from recordSession import RecordSession
from pathlib import Path
from feedback import *
import os
import sys
import asyncio
import nest_asyncio
import threading
import modules.utils as utls
import modules.config as cfg
import modules.ui as ui
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# window basic properties
self.setWindowTitle('Blue Haze')
self.setFixedSize(cfg.UI_WIDTH, cfg.UI_HEIGHT)
# tab widget
self.tab_widget = QTabWidget()
# setup group
self.complete_session_name = None
self.session_name = QLineEdit()
self.video_file_path = QLineEdit()
self.list_cameras = QComboBox()
self.list_audio_devices = QComboBox()
self.list_audio_devices.setDuplicatesEnabled(False)
self.list_backing_tracks = QComboBox()
self.PLAY_BACKING_TRACK = 'Play backing track'
self.play_stop_backing_track_button = QPushButton(self.PLAY_BACKING_TRACK)
# check if debug is on & auto set
# session name & video file path fields
if cfg.DEBUG:
self.session_name.setText('test')
self.video_file_path.setText(str(Path.home()) + '\Documents\hdi')
# hardware
self.bullet_bitalino_label = QLabel()
self.bitalino_label = QLabel('Bitalino')
self.bullet_brainbit_label = QLabel()
self.brainbit_label = QLabel('Brainbit')
self.bullet_realsense_label = QLabel()
self.realsense_label = QLabel('RealSense camera')
self.bullet_picoboard_label = QLabel()
self.picoboard_label = QLabel('Picoboard')
self.hardware_status = {'Bitalino': not cfg.HARDWARE,
'Brainbit': not cfg.HARDWARE,
'RealSense': not cfg.HARDWARE}
self.picoboard_status = False
# record bottom area
self.record_stop_button = QPushButton('Record session')
self.record_stop_button.setEnabled(not cfg.HARDWARE)
self.recording_label = QLabel()
# mic volume
self.volume_slider = QSlider()
self.volume_slider.setOrientation(Qt.Horizontal)
self.volume_slider.setTickInterval(10)
self.volume_slider.setMinimum(1)
self.volume_slider.setMaximum(100)
self.volume_slider.setValue(cfg.UI_INITIAL_MIC_VOLUME)
self.volume_slider.valueChanged.connect(self.change_value_mic_volume_label)
self.volume_slider_label = QLabel('30')
# states
self.recording = False
# objects
self.backing_track_player = PlayAudioTrack(parent=self)
self.view_finder = QCameraViewfinder()
# hardware setup
self.get_list_cameras()
self.get_list_audio_devices()
self.get_list_backing_tracks()
if cfg.HARDWARE:
self.setup_hw()
# feedback
self.feedback = Feedback(parent=self)
# ui setup
self.setup_ui()
# start the camera
self.camera = QCamera(self.list_cameras.currentData())
self.start_camera()
# see
# https://stackoverflow.com/questions/46827007/runtimeerror-this-event-loop-is-already-running-in-python
nest_asyncio.apply()
# record session object
self.record_session = RecordSession(parent=self)
def setup_hw(self):
init_hardware = utls.Hardware(parent=self)
# picoboard, realsense, bitalino and brainbit init
if not self.picoboard_status:
threading.Thread(target=init_hardware.start_picoboard).start()
if not self.hardware_status['RealSense']:
threading.Thread(target=init_hardware.start_realsense).start()
if not self.hardware_status['Brainbit']:
threading.Thread(target=init_hardware.start_brainbit).start()
if not self.hardware_status['Bitalino']:
threading.Thread(target=init_hardware.start_bitalino).start()
def setup_ui(self):
record_tab_widget = QWidget()
record_tab_widget.setLayout(self.ui_tab_record_tab_widget())
feedback_tab_widget = QWidget()
feedback_tab_widget.setLayout(self.feedback.ui_tab_feedback_tab_widget())
self.tab_widget.addTab(record_tab_widget, 'Record')
self.tab_widget.addTab(feedback_tab_widget, 'Feedback')
self.tab_widget.currentChanged.connect(self.tab_changed)
# disable the feedback tab
self.tab_widget.setTabEnabled(1, False)
# let's add some margin/breathing space to it!
main_layout = QHBoxLayout()
main_layout.setContentsMargins(20, 25, 20, 20)
main_layout.addWidget(self.tab_widget)
main_widget = QWidget()
main_widget.setLayout(main_layout)
self.setCentralWidget(main_widget)
# connect the record/stop button signal
self.record_stop_button.clicked.connect(self.action_record_stop_button)
def ui_tab_record_tab_widget(self):
# fields & hardware
fields_and_hardware = QGridLayout()
# hardware
hardware_group_box = QGroupBox()
hardware_list = QGridLayout()
hardware_list.setSpacing(5)
# fields
fields_group_box = QGroupBox()
fields = QGridLayout()
fields.setSpacing(8)
# session name
session_name_label = QLabel('Session name: ')
# video file path
video_path_file_label = QLabel('Video/Audio path: ')
folder_browser_button = QPushButton('Browse directories')
# connect the folder_browser_button signal
folder_browser_button.clicked.connect(self.show_folder_browser)
# camera selection
list_cameras_label = QLabel('Available cameras: ')
refresh_cameras_button = QPushButton('Refresh cameras')
# connect the button signal
refresh_cameras_button.clicked.connect(self.refresh_cameras)
# connect the list of cameras signal
self.list_cameras.activated[str].connect(self.change_camera)
# audio input selection
list_audio_label = QLabel('Available audio input: ')
refresh_audio_input_button = QPushButton('Refresh audio input')
# connect the refresh_audio_input_button signal
refresh_audio_input_button.clicked.connect(self.refresh_audio_input)
# backing track selection
list_backing_tracks_label = QLabel('Available backing tracks: ')
# connect the play_stop_backing_track_button signal
self.play_stop_backing_track_button.clicked.connect(self.play_stop_backing_track)
# mic volume slider
mic_volume_slider_label = QLabel('Mic volume: ')
# fields layout
# session name
fields.addWidget(session_name_label, 0, 0)
fields.addWidget(self.session_name, 0, 1)
# video path
fields.addWidget(video_path_file_label, 1, 0)
fields.addWidget(self.video_file_path, 1, 1)
fields.addWidget(folder_browser_button, 1, 2)
# cameras
fields.addWidget(list_cameras_label, 2, 0)
fields.addWidget(self.list_cameras, 2, 1)
fields.addWidget(refresh_cameras_button, 2, 2)
# audio
fields.addWidget(list_audio_label, 3, 0)
fields.addWidget(self.list_audio_devices, 3, 1)
fields.addWidget(refresh_audio_input_button, 3, 2)
# backing tracks
fields.addWidget(list_backing_tracks_label, 4, 0)
fields.addWidget(self.list_backing_tracks, 4, 1)
fields.addWidget(self.play_stop_backing_track_button, 4, 2)
# mic volume slider
fields.addWidget(mic_volume_slider_label, 5, 0)
fields.addWidget(self.volume_slider, 5, 1)
fields.addWidget(self.volume_slider_label, 5, 2)
fields_group_box.setLayout(fields)
# hardware
self.bullet_bitalino_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_idle.png')
self.bullet_brainbit_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_idle.png')
self.bullet_realsense_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_idle.png')
self.bullet_picoboard_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_idle.png')
refresh_hardware_button = QPushButton('Refresh hardware')
# connect the refresh hardware button
refresh_hardware_button.clicked.connect(self.setup_hw)
hardware_list.addWidget(self.bullet_bitalino_label, 0, 0)
hardware_list.addWidget(self.bitalino_label, 0, 1)
hardware_list.addWidget(self.bullet_brainbit_label, 1, 0)
hardware_list.addWidget(self.brainbit_label, 1, 1)
hardware_list.addWidget(self.bullet_realsense_label, 2, 0)
hardware_list.addWidget(self.realsense_label, 2, 1)
hardware_list.addWidget(self.bullet_picoboard_label, 3, 0)
hardware_list.addWidget(self.picoboard_label, 3, 1)
hardware_list.addWidget(refresh_hardware_button, 4, 1, 2, 2)
hardware_list.setRowStretch(5, 1)
hardware_group_box.setLayout(hardware_list)
fields_and_hardware.addWidget(fields_group_box, 0, 0)
fields_and_hardware.addWidget(hardware_group_box, 0, 1)
# viewfinder
view_finder_group_box = QGroupBox()
view_finder_group_box.setMinimumHeight(630)
view_finder_layout = QGridLayout()
view_finder_layout.addWidget(self.view_finder, 1, 1)
view_finder_group_box.setLayout(view_finder_layout)
# record/stop button
record_button_group_box = QGroupBox()
record_button_layout = QHBoxLayout()
# rec image
self.recording_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'gray_rec.png')
record_button_layout.addStretch(1)
record_button_layout.addWidget(self.recording_label)
record_button_layout.addWidget(self.record_stop_button)
record_button_group_box.setLayout(record_button_layout)
# layout
record_tab_layout = QVBoxLayout()
record_tab_layout.addLayout(fields_and_hardware)
record_tab_layout.addWidget(view_finder_group_box)
record_tab_layout.addWidget(record_button_group_box)
return record_tab_layout
def change_value_mic_volume_label(self):
self.volume_slider_label.setText(str(self.volume_slider.value()))
@Slot(dict)
def enable_disable_recording_tab(self, action):
if action['disable']:
self.tab_widget.setTabEnabled(0, False)
else:
self.tab_widget.setTabEnabled(0, True)
@Slot(dict)
def get_complete_session_name(self, complete_session_name):
self.complete_session_name = complete_session_name['session_name']
@Slot(dict)
def action_record_stop_button(self):
# check if the session name & video path file fields are filled
if not self.session_name.text() or not self.video_file_path.text():
self.error_dialog('Please inform both the Session Name and the Video/Audio Path!')
return
# check if the directory exists
if not QDir(self.video_file_path.text()).exists():
self.error_dialog('The directory {} does not exist!'.format(self.video_file_path.text()))
return
# check if the directory is writable
if not os.access(self.video_file_path.text(), os.W_OK):
self.error_dialog('The directory {} is not writable!'.format(self.video_file_path.text()))
return
if self.recording:
# it is already recording
# we will stop the session
self.recording_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'gray_rec.png')
self.record_stop_button.setText('Record session')
# enable fields
self.session_name.setEnabled(True)
self.video_file_path.setEnabled(True)
self.list_cameras.setEnabled(True)
self.list_audio_devices.setEnabled(True)
self.list_backing_tracks.setEnabled(True)
self.volume_slider.setEnabled(True)
# stop session
self.record_session.stop()
# restart camera
self.wait_for_video_process()
else:
# it is not yet recording
# we will start the session
# on MacOs it is possible to keep showing the camera
# on GUI while it is recording the SAME camera.
# Unfortunately, it is not possible neither on Linux
# nor on Windows. This is the reason why we are
# stopping the camera here and restarting it
# after the recording is finished
self.camera.stop()
self.recording_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'red_rec.png')
self.record_stop_button.setText('Recording… Press here to stop')
# disable fields
self.session_name.setEnabled(False)
self.video_file_path.setEnabled(False)
self.list_cameras.setEnabled(False)
self.list_audio_devices.setEnabled(False)
self.list_backing_tracks.setEnabled(False)
self.volume_slider.setEnabled(False)
# start session
if self.backing_track_player.state() == QMediaPlayer.State.PlayingState:
self.backing_track_player.stop()
self.record_session.start_recording(self.session_name.text(),
self.video_file_path.text(),
self.list_cameras.currentData().description(),
self.list_audio_devices.currentData(),
self.list_backing_tracks.currentText(),
int(self.volume_slider.value()))
self.recording = not self.recording
@Slot()
def tab_changed(self, i):
if i == 1:
# to reload / update the list of collections in the feedback tab &
# start the thread to read data from the picoboard
self.feedback.setup(self.complete_session_name)
else:
# stop the player & restart the button states if it is playing
if self.feedback.player.state() is self.feedback.PLAYING:
self.feedback.stop()
# terminate the thread to read data from the picoboard
# exit & quit simply don't work
self.feedback.thread.terminate()
@Slot()
def show_folder_browser(self):
folder_dialog = QFileDialog()
folder_dialog.setOption(QFileDialog.ShowDirsOnly)
folder_dialog.setFileMode(QFileDialog.Directory)
if folder_dialog.exec_():
self.video_file_path.setText(folder_dialog.directory().absolutePath())
@Slot()
def refresh_audio_input(self):
self.list_audio_devices.clear()
self.get_list_audio_devices()
@Slot()
def change_backing_track(self):
self.list_backing_tracks.clear()
self.get_list_backing_tracks()
@Slot()
def refresh_cameras(self):
self.list_cameras.clear()
self.get_list_cameras()
@Slot()
def change_camera(self):
self.camera.stop()
self.camera = QCamera(self.list_cameras.currentData())
self.camera.setCaptureMode(QCamera.CaptureVideo)
self.start_camera()
@Slot()
def play_stop_backing_track(self):
if self.backing_track_player.state() == QMediaPlayer.State.PlayingState:
self.backing_track_player.stop()
self.play_stop_backing_track_button.setText(self.PLAY_BACKING_TRACK)
# disable field
self.list_backing_tracks.setEnabled(True)
else:
backing_track_file = '{}{}'.format(cfg.ASSETS_BACKING_AUDIO_FOLDER, self.list_backing_tracks.currentText())
self.backing_track_player.setup_media(backing_track_file)
self.backing_track_player.play()
self.play_stop_backing_track_button.setText('Stop backing track')
# enable field
self.list_backing_tracks.setEnabled(False)
def wait_for_video_process(self):
loop = asyncio.get_event_loop()
async_function = asyncio.wait([self.check_video_process_terminate()])
loop.run_until_complete(async_function)
async def check_video_process_terminate(self):
while True:
if self.record_session.video_process.poll() is not None:
# extract audio from video
self.record_session.extract_audio_from_video()
break
self.change_camera()
def start_camera(self):
self.camera.setViewfinder(self.view_finder)
self.camera.start()
def get_list_cameras(self):
# list the available cameras
for camera_info in QCameraInfo.availableCameras():
# do not list RealSense Camera
if 'Intel' not in camera_info.description():
self.list_cameras.addItem(camera_info.description(), camera_info)
def get_list_audio_devices(self):
temp_list = []
# list the available audio devices
for device_info in QAudioDeviceInfo.availableDevices(QAudio.AudioInput):
if device_info.deviceName() not in temp_list:
self.list_audio_devices.addItem(device_info.deviceName(), device_info)
temp_list.append(device_info.deviceName())
def get_list_backing_tracks(self):
# list the available audio_backing tracks
backing_tracks_folder = '{}*wav'.format(cfg.ASSETS_BACKING_AUDIO_FOLDER)
for backing_track in glob(backing_tracks_folder):
trackname = os.path.basename(backing_track)
self.list_backing_tracks.addItem(trackname)
def error_dialog(self, message):
error_dialog = QMessageBox(self)
error_dialog.setText(message)
error_dialog.setWindowTitle('Blue Haze - Error')
error_dialog.setIcon(QMessageBox.Critical)
error_dialog.setStandardButtons(QMessageBox.Ok)
error_dialog.exec_()
# slot to get info from hardware initialization
@Slot(dict)
def hw_init_status(self, status):
"""
status:
: from : type of hardware
: result : True -> Ok
False -> Error
"""
if status['result']:
if status['from'] == 'RealSense':
self.bullet_realsense_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_ok.png')
self.realsense_label.setStyleSheet('QLabel { color: GreenYellow; }')
self.hardware_status['RealSense'] = True
elif status['from'] == 'Bitalino':
self.bullet_bitalino_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_ok.png')
self.bitalino_label.setStyleSheet('QLabel { color: GreenYellow; }')
self.hardware_status['Bitalino'] = True
elif status['from'] == 'BrainBit':
self.bullet_brainbit_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_ok.png')
self.brainbit_label.setStyleSheet('QLabel { color: GreenYellow; }')
self.hardware_status['Brainbit'] = True
elif status['from'] == 'Picoboard':
self.bullet_picoboard_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_ok.png')
self.picoboard_label.setStyleSheet('QLabel { color: GreenYellow; }')
self.tab_widget.setTabEnabled(1, True)
self.picoboard_status = True
if False not in self.hardware_status.values():
self.record_stop_button.setEnabled(True)
elif not status['result'] and status['from'] != 'Picoboard':
if status['from'] == 'RealSense':
self.bullet_realsense_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_error.png')
self.realsense_label.setStyleSheet('QLabel { color: red; }')
elif status['from'] == 'Bitalino':
self.bullet_bitalino_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_error.png')
self.bitalino_label.setStyleSheet('QLabel { color: red; }')
elif status['from'] == 'BrainBit':
self.bullet_brainbit_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_error.png')
self.brainbit_label.setStyleSheet('QLabel { color: red; }')
elif status['from'] == 'Picoboard':
self.bullet_picoboard_label.setPixmap(cfg.ASSETS_IMAGES_FOLDER + 'hardware_error.png')
self.picoboard_label.setStyleSheet('QLabel { color: red; }')
self.tab_widget.setTabEnabled(1, False)
self.picoboard_status = False
self.error_dialog('Error initializing {}. Please check the connections.'
.format(status['from']))
Slot()
def player_track_end(self):
self.play_stop_backing_track_button.setText(self.PLAY_BACKING_TRACK)
if __name__ == '__main__':
# UI | |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 17:49:21 2013
@author: <NAME>
Minor edits by <NAME>
"""
import numpy as np
from matplotlib import pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Ellipse
from matplotlib import animation
import matplotlib.gridspec as gridspec
# Font to be used for labels on the plot
font = {'size' : 9}
plt.rc('font', **font)
# Setup figure and subplots
# Size, dpi and Title for Plot window
#fig = plt.figure(num = 'Orbit Simulator', figsize = (12,8.5),dpi = 100)
fig = plt.figure(num = 'Orbit Simulator', figsize = (9.5,6.75),dpi = 100)
# Divide in 3x3 grid, set area to be used on the plot
gs = gridspec.GridSpec(3, 3)
gs.update(left=0.07, right=0.95, wspace=0.15)
#ax = fig.add_subplot(gs[0,:-1], aspect ='equal', projection = '3d') # Maybe use to implement 3D view
# Define the main subplot where orbits are shown
ax = fig.add_subplot(gs[0:,:-1], aspect = 'equal')
ax.set_ylabel('Distance (in AU)')
plt.setp(ax.get_xticklabels(), visible=False) # Set xaxis tick labels to be invisible
ax.text(0.01, 0.01, 'As seen by Observer',
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='Black', fontsize=12)
# Define the subplot where the velocity profile is shown
ax2 = fig.add_subplot(gs[:-1,-1], aspect = 'auto')
ax2.set_xlabel('Time (in Years)')
ax2.yaxis.tick_right()
ax2.set_ylabel('Velocity (in km/s)')
ax2.locator_params(nbins=6) # limit number of x-ticks
# Define subplot where the Top view of the orbit is shown
ax3 = fig.add_subplot(gs[0,0], aspect = 'equal')
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.text(0.1, 0.99, 'Orbit Top view',
verticalalignment='top', horizontalalignment='left',
transform=ax.transAxes,
color='Black', fontsize=12)
pause = True # Click to pause functionality
change = False
# Initialize global variables - orbital elements
phase = 0.0 # Angle in the orbit, measured with respect to periastron
timer = 0.0 # Time Counter
comx = 0. # Center of Mass co-ordinates
comy = 0.
m1 = 3.0; # Mass of Obj 1, in Solar mass units
m2 = 1.0; # Mass of Obj 2, in Solar mass units
semi_a = 1.0 # Semi major axis for the orbit, in AU
ecc = 0.3 # Eccentricity
alpha = semi_a*(1-ecc**2)
nodeangle = 0. # Node angle for the orbit
inclination = np.pi/2 # Inclination of the orbit
mu = m1*m2/(m1+m2); # Reduced Mass
semi_b = semi_a*(1-ecc**2)**0.5 # Semi-minor Axis
L = np.sqrt(mu*semi_a*(1-ecc**2)) # Orbital angula rmomentum : constant for a given orbit
P = ((1/(m1+m2))*semi_a**3)**0.5 # Period of the orbit, in years
tarray = np.zeros(721) # Placeholder to store conversion between time step "i" to phase in orbit
xt = np.zeros(721) # Placeholder to store conversion between time step "i" to actual time units in years
xt[:]= [(2*P/720)*x for x in range(721)]
for i in range(721):
tht = np.radians(phase)
tarray[i] = tht
phase += np.absolute((1 + ecc*np.cos(tht))**2 / (1 - ecc**2)**1.5)
phase %= 360
phase = 0.
##################### Show Orbiting Bodies & corresponding orbits
M1 = plt.Circle((0, 0), 0.03, fc='r', clip_on=True, lw = 0); # Draw a circle to represent Body 1
M2 = plt.Circle((0, 0), 0.03, fc='b', clip_on=True, lw = 0); # Draw a circle to represent Body 2
# Try to draw the orbit that the objects will follow
orb1, = ax.plot(0,0,'r-', alpha = 0.33, visible = False) # empty place holder graphs for orbits
orb2, = ax.plot(0,0,'b-', alpha = 0.33, visible = False)
############ Previous attempts for orbits ####
#Ellipse(xy=(-semi_a*(ecc)*(mu/m1), 0), width=2*semi_a*(mu/m1), height=2*semi_b*(mu/m1)*np.cos(inclination),
# edgecolor='r', fc='None', alpha = 0.33, lw=1)
#Ellipse(xy=(semi_a*(ecc)*(mu/m2), 0), width=2*semi_a*(mu/m2), height=2*semi_b*(mu/m2)*np.cos(inclination),
# edgecolor='b', fc='None', alpha = 0.33, lw=1)
##############################################
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.grid(True)
############ Show Velocity of corrresponding bodies with respect to time
# Draw circles for showing the instantaneous velocity of the body on the velocity - time graph
#Mv1 = plt.Circle((0, 0), 0.05*P, fc = 'r', ec='r', clip_on=True, lw = 1);
#Mv2 = plt.Circle((0, 0), 0.05*P, fc = 'k', ec='k', clip_on=True, lw = 1);
Mv1 = Ellipse((0, 0), 0.1*P, 0.1*P, fc = 'r', ec='r', clip_on=True, lw = 1);
Mv2 = Ellipse((0, 0), 0.1*P, 0.1*P, fc = 'b', ec='b', clip_on=True, lw = 1);
# 29.87 is velocity of Earth around the Sun, scaled using a^3/(M_sol) = P^2
d3 = 29.87*np.sqrt((m1+m2/alpha))*(1+ecc)
d4 = 29.87*np.sqrt((m1+m2/alpha))* ecc
d6 = np.sin(inclination)*np.sqrt((d4**2 + d3**2)) # used to define the axis limit on velocity plot, some number larger than either
v1, = ax2.plot(0,0,'r-', visible = False) # empty place holder graphs for velocity curves
v2, = ax2.plot(0,0,'b-', visible = False)
ax2.set_xlim(0, 2*P) # Plot velocity for two prbits
ax2.set_ylim(-d6-0.1, d6+0.1)
ax2.grid(True)
#ax.get_xaxis().set_animated(True) # enabling it takes away the labels
############### Preffered view of the orbits - from the top, no effect of inclination ####
Mi1 = plt.Circle((0, 0), 0.05, fc='r', clip_on=True, lw = 0);
Mi2 = plt.Circle((0, 0), 0.05, fc='b', clip_on=True, lw = 0);
# Draw orbits as elipses
orbi1 = Ellipse(xy=(-semi_a*(ecc)*(mu/m1), 0), width=2*semi_a*(mu/m1), height=2*semi_b*(mu/m1),
edgecolor='r', fc='None', lw=0.5)
orbi2 = Ellipse(xy=(semi_a*(ecc)*(mu/m2), 0), width=2*semi_a*(mu/m2), height=2*semi_b*(mu/m2),
edgecolor='b', fc='None', lw=0.5)
ax3.set_xlim(-2, 2)
ax3.set_ylim(-2, 2)
ax3.grid(True)
###############################################################################
# pause animation on click
def onClick(event):
global pause
pause ^= True
###############################################################################
def init():
global M1, M2, orb1, orb2, Mv1, Mv2, Mi1, Mi2, orbi1, orbi2, phase, v1, v2
M1.center = (-100, -100) # initialize the patches at a far location
M2.center = (-100, -100)
ax.add_patch(M1)
# art3d.pathpatch_2d_to_3d(M1, z=0, zdir="x")
ax.add_patch(M2)
# art3d.pathpatch_2d_to_3d(M2, z=0, zdir="x")
# orb1.center = (-100, -100)
# ax.add_patch(orb1)
# orb2.center = (-100, -100)
# ax.add_patch(orb2)
#####################################################
Mv1.center = (-100, -100)
Mv2.center = (-100, -100)
ax2.add_patch(Mv1)
ax2.add_patch(Mv2)
#####################################################
Mi1.center = (-100, -100)
Mi2.center = (-100, -100)
ax3.add_patch(Mi1)
ax3.add_patch(Mi2)
orbi1.center = (-100, -100)
ax3.add_patch(orbi1)
orbi2.center = (-100, -100)
ax3.add_patch(orbi2)
######################################################
## return everything that you want to remain visible as the animation runs
return M1,M2, orb1, orb2, Mv1, Mv2, Mi1, Mi2, orbi1, orbi2, v1, v2
###############################################################################
def update(val):
global comx, comy, m1, m2, d6
global semi_a, semi_b, ecc, alpha, nodeangle, inclination
global mu, L, P, r , r1, r2
global M1, M2, orb1, orb2, Mi1, Mi2, orbi1, orbi2, v1, v2, pause
global phase, timer, xt, tarray, change
phase = 0.
timer = 0.
v1.set_visible(False)
v2.set_visible(False)
orb2.set_visible(False)
orb2.set_visible(False)
m1 = round(s_m1.val,1)
m2 = round(s_m2.val,1)
semi_a = round(s_a.val,1)
if round(s_ecc.val,1) != ecc :
ecc = round(s_ecc.val,1)
change = True
alpha = semi_a*(1-ecc**2)
nodeangle = np.radians(int(s_node.val))
inclination = np.radians(int(s_inc.val))
mu = ((m1*m2)/(m1+m2));
semi_b = semi_a*(1-ecc**2)**0.5
L = np.sqrt(mu*alpha)
P = ((1/(m1+m2))*semi_a**3)**0.5
if change == True:
for i in range(721):
tht = np.radians(phase)
tarray[i] = tht
phase += np.absolute((1 + ecc*np.cos(tht))**2 / (1 - ecc**2)**1.5)
phase %= 360
phase = 0.
change = False
xt[:]= [(2*P/720)*x for x in range(721)]
r = alpha/(1+ecc);
r1 = r*(mu/m1);
r2 = -r*(mu/m2);
M1.set_radius(0.03*(semi_a))
M2.set_radius(0.03*(semi_a))
orb1.set_xdata(comx + (mu/m1)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(tarray[0:361] + nodeangle));
orb1.set_ydata(comy + (mu/m1)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(inclination) * np.sin(tarray[0:361] + nodeangle));
orb1.set_visible(True)
ax.draw_artist(orb1)
orb2.set_xdata(comx - (mu/m2)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(tarray[0:361] + nodeangle));
orb2.set_ydata(comy - (mu/m2)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(inclination) * np.sin(tarray[0:361] + nodeangle));
orb2.set_visible(True)
ax.draw_artist(orb2)
########### Old orbit plot attempt ####
# orb1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
#
# orb1.width = 2*semi_a*(mu/m1)*(np.cos(nodeangle))**2 + 2*semi_b*(mu/m1)*(np.sin(nodeangle))**2
# orb1.height = np.cos(inclination)*(2*semi_a*(mu/m1)*(np.sin(nodeangle))**2 + 2*semi_b*(mu/m1)*(np.cos(nodeangle))**2)
# #orb1.angle = np.rad2deg(nodeangle)
#
# orb2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
#
# orb2.width = 2*semi_a*(mu/m2)*(np.cos(nodeangle))**2 + 2*semi_b*(mu/m2)*(np.sin(nodeangle))**2
# orb2.height = np.cos(inclination)*(2*semi_a*(mu/m2)*(np.sin(nodeangle))**2 + 2*semi_b*(mu/m2)*(np.cos(nodeangle))**2)
#orb2.angle = np.rad2deg(nodeangle)
ax.set_xlim(-2*semi_a, 2*semi_a)
ax.set_ylim(-2*semi_a, 2*semi_a)
###############################################################
d3 = 29.87*np.sqrt((m1+m2/alpha))*(1+ecc)
d4 = 29.87*np.sqrt((m1+m2/alpha))* ecc
d6 = np.sin(inclination)*np.sqrt((d4**2 + d3**2))
v1.set_ydata((mu/m1)*np.sin(inclination)*(d4*np.sin(tarray+nodeangle)*np.sin(tarray) + (1/(1+ecc))*d3*np.cos(tarray+nodeangle)*(1+ecc*np.cos(tarray))))
v1.set_xdata(xt)
v1.set_visible(True)
ax2.draw_artist(v1)
v2.set_ydata((-mu/m2)*np.sin(inclination)*(d4*np.sin(tarray+nodeangle)*np.sin(tarray) + (1/(1+ecc))*d3*np.cos(tarray+nodeangle)*(1+ecc*np.cos(tarray))))
v2.set_xdata(xt)
v2.set_visible(True)
ax2.draw_artist(v2)
ax2.set_xlim(0, 2*P)
ax2.set_ylim(-d6-0.1, d6+0.1)
ratio = (d6+0.1)/P #ylim/xlim ratio
#Mv1.set_radius(0.05*(P))
#Mv2.set_radius(0.05*(P))
Mv1.width = 0.1*P
Mv1.height = 0.1*P*ratio
Mv2.width = 0.1*P
Mv2.height = 0.1*P*ratio# / np.sin(inclination)
###############################################################
Mi1.set_radius(0.05*(semi_a))
Mi2.set_radius(0.05*(semi_a))
orbi1.width = 2*semi_a*(mu/m1)
orbi1.height = 2*semi_b*(mu/m1)
orbi1.angle = np.rad2deg(nodeangle)
orbi1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
orbi2.width = 2*semi_a*(mu/m2)
orbi2.height = 2*semi_b*(mu/m2)
orbi2.angle = np.rad2deg(nodeangle)
orbi2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
ax3.set_xlim(-2*semi_a, 2*semi_a)
ax3.set_ylim(-2*semi_a, 2*semi_a)
##################################################################
pause = False
###############################################################################
def animate(i):
global semi_a, alpha, ecc, inclination, nodeangle
global r, r1, r2, mu, m1, m2, P
global M1, M2, orb1, orb2, Mi1, Mi2, orbi1, orbi2, comx, comy
global phase, tarray, timer, xt
if not pause:
tht = phase
r = alpha/(1+(ecc*np.cos(tht)));
r1 = r*(mu/m1);
r2 = -r*(mu/m2);
#############################################################
#x1, y1 = M1.center
x1 = comx + r1 * np.cos(tht + nodeangle);
y1 = (comy + r1 * np.cos(inclination) * np.sin(tht + nodeangle));
#x2, y2 = M2.center
x2 = comx + r2 * np.cos(tht + nodeangle);
y2 = (comy + r2 * np.cos(inclination) * np.sin(tht + nodeangle));
M1.center = (x1, y1)
M2.center = (x2, y2)
# orb1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
# orb2.center = (comx | |
<reponame>opcode-eu-org/opcode-blog
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Static blog-aware site generator in Python mostly compatible with Jekyll.
Usage:
obraz (build | serve | new PATH) [options]
obraz -h|--help
Commands:
build Build your site.
serve Serve your site locally.
new Create a new Obraz site scaffold in PATH.
Options:
-s --source=DIR Source directory.
-d --destination=DIR Destination directory.
--force Force overwriting the destination directory.
--safe Disable custom plugins.
-w --watch Watch for changes and rebuild.
-D --drafts Render posts in the _drafts folder.
-H --host=HOSTNAME Listen at the given hostname.
-P --port=PORT Listen at the given port.
-b --baseurl=URL Serve the website from the given base URL.
-q --quiet Be quiet.
-t --trace Display traceback when an error occurs.
-v --version Show version.
-h --help Show help message.
For documentation see <http://obraz.pirx.ru/>.
"""
import errno
import os
import re
import shutil
import sys
import traceback
from datetime import datetime
from glob import glob
from http.server import SimpleHTTPRequestHandler, HTTPServer
from io import BytesIO
from threading import Thread
from time import sleep
from typing import (Collection, Any, Callable, Iterable, Dict, Sequence,
TypeVar, Optional, List)
from urllib.request import pathname2url, url2pathname
import yaml
from docopt import docopt
from jinja2 import Environment, FileSystemLoader
from markdown import markdown
__all__ = [
'file_filter',
'generator',
'loader',
'processor',
'template_filter',
'template_renderer',
]
PAGE_ENCODING = URL_ENCODING = 'UTF-8'
DEFAULT_CONFIG: Dict[str, Any] = {
'source': './',
'destination': './_site',
'include': ['.htaccess'],
'exclude': [],
'exclude_patterns': [
r'^[\.#].*',
r'.*~$',
r'.*\.swp$',
#r'.*\.s[uvw][a-z]$', # *.swp files, etc.
],
'full_build_patterns': [
r'_layouts',
r'_includes',
],
'host': '0.0.0.0',
'port': '8000',
'baseurl': '',
}
_quiet = False
_loaders = []
_processors = []
_file_filters = {}
_template_filters = {}
_render_string = lambda string, context, site: string
_T = TypeVar('_T')
def file_filter(extensions: Collection[str]) -> Any:
"""Register a page content filter for file extensions."""
def wrapper(f):
for ext in extensions:
_file_filters[ext] = f
return f
return wrapper
def template_filter(name: str) -> Any:
"""Register a template filter."""
def wrapper(f):
_template_filters[name] = f
return f
return wrapper
def template_renderer(f: Callable[[str, dict, dict], str]) -> Any:
"""Set a custom template renderer."""
global _render_string
_render_string = f
return f
def loader(f: Callable[[str, dict], dict]) -> Any:
"""Register a site source content loader."""
_loaders.insert(0, f)
return f
def processor(f: Callable[[str], None]) -> Any:
"""Register a site content processor."""
_processors.insert(0, f)
return f
def generator(f: Callable[[str], None]) -> Any:
"""Register a destination files generator for the site."""
_processors.append(f)
return f
def fallback_loader(f: Callable[[str, dict], dict]) -> Any:
_loaders.append(f)
return f
def load_yaml_mapping(path: str) -> dict:
try:
with open(path, 'rb') as fd:
mapping = yaml.load(fd)
return mapping if mapping else {}
except IOError as e:
if e.errno == errno.ENOENT:
return {}
def merge(x1, x2):
if isinstance(x1, dict) and isinstance(x2, dict):
res = x1.copy()
for k, v in x2.items():
if k in res:
res[k] = merge(res[k], v)
else:
res[k] = v
return res
elif isinstance(x1, list) and isinstance(x2, list):
res = list(x1)
res.extend(x2)
return res
elif x1 == x2:
return x1
else:
raise ValueError(f"Cannot merge '{x1!r}' and '{x2!r}'")
def all_source_files(source: str, destination: str) -> Iterable[str]:
dst_base, dst_name = os.path.split(os.path.realpath(destination))
for source, dirs, files in os.walk(source):
if os.path.realpath(source) == dst_base and dst_name in dirs:
dirs.remove(dst_name)
for filename in files:
yield os.path.join(source, filename)
def changed_files(source: str, destination: str, config: Dict[str, Any],
poll_interval: int = 1) -> str:
times = {}
while True:
changed = []
for path in all_source_files(source, destination):
rel_path = os.path.relpath(path, source)
if not is_file_visible(rel_path, config):
continue
new = os.stat(path).st_mtime
old = times.get(path)
if not old or new > old:
times[path] = new
changed.append(path)
if changed:
yield changed
sleep(poll_interval)
def is_file_visible(path: str, config: Dict[str, Any]) -> bool:
"""Check file name visibility according to site settings."""
parts = path.split(os.path.sep)
exclude = config.get('exclude', [])
exclude_patterns = config.get('exclude_patterns', [])
if path in config.get('include', []):
return True
elif any(re.match(pattern, part)
for pattern in exclude_patterns
for part in parts):
return False
elif any(path.startswith(s) for s in exclude):
return False
else:
return True
def is_underscored(path: str) -> bool:
parts = path.split(os.path.sep)
return any(part.startswith('_') for part in parts)
def path2url(path: str) -> str:
m = re.match(r'(.*)[/\\]index.html?$', path)
if m:
path = m.group(1) + os.path.sep
path = os.path.sep + path
return pathname2url(path.encode(URL_ENCODING))
def url2path(url: str) -> str:
if url.endswith('/'):
url += 'index.html'
return url2pathname(url).lstrip(os.path.sep)
def make_dirs(path: str) -> None:
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
def remove(path: str) -> None:
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
def info(message: str) -> None:
if not _quiet:
log(message)
def exception(e: BaseException, trace: bool) -> None:
if trace:
traceback.print_tb(e.__traceback__)
log(f'Error: {e}')
def log(message: str) -> None:
sys.stderr.write(f'{message}\n')
sys.stderr.flush()
def progress(msg: str, xs: Sequence[_T]) -> Iterable[_T]:
if _quiet:
for x in xs:
yield x
else:
size = len(xs)
for i, x in enumerate(xs, 1):
yield x
s = f'{msg}: {int(i * 100 / size)}% ({i}/{size})'
sys.stderr.write('\r' + s)
sys.stderr.write('\n')
def file_suffix(path: str) -> str:
_, ext = os.path.splitext(path)
return ext
def object_name(f: Any) -> str:
if f.__doc__:
lines = f.__doc__.splitlines()
for line in lines:
line = line.strip()
if line:
return line.rstrip('.')
return f.__name__
@template_filter('markdownify')
@file_filter(['.md', '.markdown'])
def markdown_filter(s: str, config: Any) -> str:
return markdown(s)
@fallback_loader
def load_file(path: str, config: Dict[str, Any]) -> Optional[Dict[str, Any]]:
if not is_file_visible(path, config) or is_underscored(path):
return None
return {
'files': [{'url': path2url(path), 'path': path}],
}
@template_renderer
def jinja2_render_string(string: str, context: Dict[str, Any],
config: Dict[str, Any]) -> str:
includes = os.path.join(config['source'], '_includes')
env = Environment(loader=FileSystemLoader(includes))
for name, f in _template_filters.items():
env.filters[name] = lambda s: f(s, config)
t = env.from_string(string)
return t.render(**context)
def read_template(path: str) -> Optional[Dict[str, Any]]:
with open(path, 'rb') as fd:
if fd.read(3) != b'---':
return None
lines = []
while True:
line = fd.readline()
if re.match(b'^---\r?\n', line):
break
elif line == b'':
return None
lines.append(line)
front_matter = BytesIO(b''.join(lines))
front_matter.name = path
page = yaml.load(front_matter)
if not page:
page = {}
content = fd.read().decode(PAGE_ENCODING)
page['content'] = content
return page
@loader
def load_page(path: str, config: Dict[str, Any]) -> Optional[Dict['str', Any]]:
if not is_file_visible(path, config) or is_underscored(path):
return None
name, suffix = os.path.splitext(path)
if suffix in _file_filters:
dst = f'{name}.html'
else:
dst = path
page = read_template(os.path.join(config['source'], path))
if not page:
return None
page.update({'url': path2url(dst), 'path': path})
return {
'pages': [page]
}
def read_post(path: str, date: datetime, title: str,
config: Dict[str, Any]) -> Optional[Dict[str, Any]]:
page = read_template(os.path.join(config['source'], path))
if not page:
return None
if 'date' in page:
date = page['date']
permalink = config.get('permalink', '/{year}/{month}/{day}/{title}.html')
url_vars = {
'year': f'{date.year:04}',
'month': f'{date.month:02}',
'day': f'{date.day:02}',
'title': title,
}
url = pathname2url(permalink.format(**url_vars))
page.update({'url': url, 'path': path})
if 'date' not in page:
date_str = '{year}-{month}-{day}'.format(**url_vars)
page['date'] = datetime.strptime(date_str, '%Y-%m-%d')
page['id'] = '/{year}/{month}/{day}/{title}'.format(**url_vars)
return {
'posts': [page],
'tags': dict((tag, [page]) for tag in page.get('tags', [])),
}
@loader
def load_post(path: str, config: Dict[str, Any]) -> Optional[Dict[str, Any]]:
post_re = re.compile(r'(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})-'
r'(?P<title>.+)')
parts = path.split(os.path.sep)
if '_posts' not in parts:
return None
if not is_file_visible(path, config):
return None
name, _ = os.path.splitext(os.path.basename(path))
m = post_re.match(name)
if not m:
return None
date = datetime.strptime('{year}-{month}-{day}'.format(**m.groupdict()),
'%Y-%m-%d')
return read_post(path, date, m.group('title'), config)
@loader
def load_draft(path: str, config: Dict[str, Any]) -> Optional[Dict[str, Any]]:
if not config.get('drafts'):
return None
if '_drafts' not in path.split(os.path.sep):
return None
if not is_file_visible(path, config):
return None
title, _ = os.path.splitext(os.path.basename(path))
return read_post(path, config['time'], title, config)
def render_layout(content: str, page: Dict[str, Any],
site: Dict[str, Any]) -> str:
name = page.get('layout', 'nil')
if name == 'nil':
| |
import os
import yaml
import jsonschema
import numpy as np
import math
import json
import copy
import time as ttime
from .xrf_utils import split_compound_mass, generate_eline_list
from .utils import normalize_data_by_scaler, convert_time_to_nexus_string
import logging
logger = logging.getLogger()
# ==========================================================================================
# Functions for operations with YAML files used for keeping descriptions of XRF standards
_xrf_standard_schema = {
"type": "object",
"additionalProperties": False,
"required": ["name", "serial", "description", "compounds"],
"properties": {
"name": {"type": "string"},
"serial": {"type": "string"},
"description": {"type": "string"},
"compounds": {"type": "object",
# Chemical formula should always start with a captial letter (Fe2O3)
"patternProperties": {"^[A-Z][A-Za-z0-9]*$": {"type": "number"}},
"additionalProperties": False,
"minProperties": 1
},
"density": {"type": "number"} # Total density is an optional parameter
}
}
_xrf_standard_schema_instructions = """
# The file was automatically generated.
#
# Instructions for editing this file:
#
# Description of each standard starts with '- name: ...'. Every following line
# must be indented by 4 spaces. Each description contains the following items:
# 'name' (name of the standard, arbitrary string), 'serial' (serial number, of
# the standard, but can be arbitrary string, 'description' (string that contains
# description of the standard). Those fields may be filled with arbitrary information,
# best suited to distinguish the standard later. If string consists of only digits
# (in case of serial number) it must be enclosed in quotes.
#
# The field 'compounds' lists all compounds in the standard. The compounds are
# presented in the form <compound_formula>: <concentration>.
# <compound_formula> has to be a valid chemical formula, representing a pure
# element (C, Fe, Ga, etc.) or compound (Fe2O3, GaAs, etc). Element names
# must start with a capital letter followed by a lowercase letter (if present).
# No characters except 'A-Z', 'a-z' and '0-1' are allowed. Lines containing
# compound specifications must be indented by extra 4 spaces.
#
# The optional field 'density' specifies total density of the sample and used
# to check integrity of the data (the sum of densities of all compounds
# must be equaly to 'density' value.
#
# All density values (for compounds and total density) are specified in ug/cm^2
#
# Example (the lines contain extra '#' character, which is not part of YAML file):
#
#- name: Micromatter 41164
# serial: '41164'
# description: CeF3 21.1 / Au 20.6
# compounds:
# CeF3: 21.1
# Au: 20.6
# density: 41.7
#
# The easiest way to start creating the list of custom standards is to uncomment
# and edit the following example. To create extra records, duplicate and
# edit the example or any existing record.
#- name: Name of the Standard
# serial: '32654'
# description: CeF3 21.1 / Au 20.6 (any convenient description)
# compounds:
# CeF3: 21.1
# Au: 20.6
"""
def save_xrf_standard_yaml_file(file_path, standard_data, *, overwrite_existing=False):
r"""
Save descriptions of of XRF standards to YAML file
Parameters
----------
file_path: str
absolute or relative path to the saved YAML file. If the path does not exist, then
it is created.
standard_data: list(dict)
list of dictionaries, each dictionary is representing the description of one
XRF standard. Sending ``[]`` will create YAML file, which contains only instructions
for manual editing of records. Such file can be read by the function
``load_xrf_standard_yaml_file``, which returns ``[]``.
overwrite_existing: bool
indicates if existing file should be overwritten. Default is False, since
overwriting of an existing parameter file will lead to loss of data.
Returns
-------
no value is returned
Raises
------
IOError if the YAML file already exists and ``overwrite_existing`` is not enabled.
"""
# Make sure that the directory exists
file_path = os.path.expanduser(file_path)
file_path = os.path.abspath(file_path)
flp, _ = os.path.split(file_path)
os.makedirs(flp, exist_ok=True)
if not overwrite_existing and os.path.isfile(file_path):
raise IOError(f"File '{file_path}' already exists")
s_output = _xrf_standard_schema_instructions
if standard_data:
s_output += yaml.dump(standard_data, default_flow_style=False, sort_keys=False, indent=4)
with open(file_path, "w") as f:
f.write(s_output)
def load_xrf_standard_yaml_file(file_path, *, schema=_xrf_standard_schema):
r"""
Load the list of XRF standard descriptions from YAML file and verify the schema.
Parameters
----------
file_path: str
absolute or relative path to YAML file. If file does not exist then IOError is raised.
schema: dict
reference to schema used for validation of the descriptions. If ``schema`` is ``None``,
then validation is disabled (this is not the default behavior).
Returns
-------
list of dictionaries, each dictionary is representing the description of one XRF
standard samples. Empty dictionary is returned if the file contains no data.
Raises
------
IOError is raised if the YAML file does not exist.
jsonschema.ValidationError is raised if schema validation fails.
RuntimeError if the sum of areal densities of all compounds does not match the
total density of the sample for at least one sample. The list of all sample
records for which the data integrity is not confirmed is returned in the
error message. For records that do not contain 'density' field the integrity
check is not performed.
"""
file_path = os.path.expanduser(file_path)
file_path = os.path.abspath(file_path)
if not os.path.isfile(file_path):
raise IOError(f"File '{file_path}' does not exist")
with open(file_path, 'r') as f:
standard_data = yaml.load(f, Loader=yaml.FullLoader)
if standard_data is None:
standard_data = []
if schema is not None:
for data in standard_data:
jsonschema.validate(instance=data, schema=schema)
# Now check if all densities of compounds sums to total density in every record
msg = []
for data in standard_data:
if "density" in data:
# The sum of all densities must be equal to total density
sm = np.sum(list(data["compounds"].values()))
if not math.isclose(sm, data["density"], abs_tol=1e-6):
msg.append(f"Record #{data['serial']} ({data['name']}): "
f"computed {sm} vs total {data['density']}")
if msg:
msg = [f" {_}" for _ in msg]
msg = '\n'.join(msg)
msg = "Sum of areal densities does not match total density:\n" + msg
raise RuntimeError(msg)
return standard_data
def load_included_xrf_standard_yaml_file():
r"""
Load YAML file with descriptions of XRF standards that is part of the
package.
Returns
-------
List of dictionaries, each dictionary represents description of one XRF standard.
Raises
------
Exceptions may be raised by ``load_xrf_standard_yaml_file`` function
"""
# Generate file name (assuming that YAML file is in the same directory)
file_name = "xrf_quant_standards.yaml"
file_path = os.path.realpath(__file__)
file_path, _ = os.path.split(file_path)
file_path = os.path.join(file_path, file_name)
return load_xrf_standard_yaml_file(file_path)
def compute_standard_element_densities(compounds):
r"""
Computes areal density of each element in the mix of compounds.
Some compounds in the mix may contain the same elements.
Parameters
----------
compounds: dict
dictionary of compound densities: key - compound formula,
value - density (typically ug/cm^2)
Returns
-------
Dictionary of element densities: key - element name (symbolic),
value - elmenet density.
"""
element_densities = {}
for key, value in compounds.items():
el_dens = split_compound_mass(key, value)
for el, dens in el_dens.items():
if el in element_densities:
element_densities[el] += dens
else:
element_densities[el] = dens
return element_densities
# ==========================================================================================
# Functions for operations with JSON files used for keeping quantitative data obtained
# after processing of XRF standard samples. The data is saved after processing
# XRF scan of standard samples and later used for quantitative analysis of
# experimental samples.
_xrf_quant_fluor_schema = {
"type": "object",
"additionalProperties": False,
"required": ["name", "serial", "description", "element_lines",
"incident_energy", "scaler_name", "distance_to_sample",
"creation_time_local", "source_scan_id", "source_scan_uid"],
"properties": {
# 'name', 'serial' and 'description' (optional) are copied
# from the structure used for description of XRF standard samples
"name": {"type": "string"},
"serial": {"type": "string"},
"description": {"type": "string"},
# The list of element lines. The list is not expected to be comprehensive:
# it includes only the lines selected for processing of standard samples.
"element_lines": {
"type": "object",
"additionalProperties": False,
"minProperties": 1,
# Symbolic expression representing an element line:
# Fe - represents all lines, Fe_K - K-lines, Fe_Ka - K alpha lines,
# Fe_Ka1 - K alpha 1 line. Currently only selections that contain
# all K, L or M lines is supported.
"patternProperties": {
r"^[A-Z][a-z]?(_[KLM]([ab]\d?)?)?$": {
"type": "object",
"additionalProperties": False,
"required": ["density", "fluorescence"],
"properties": {
"density": {"type": "number"},
"fluorescence": {"type": ["number", "null"]}
}
}
},
},
# Incident energy used in the processing experiment
"incident_energy": {"type": "number"},
# Selected channel, expected values are 'sum', 'det1', 'det2', 'det3' etc.
"detector_channel": {"type": ["string", "null"]},
# Name of the valid scaler name (specific for data recorded on the beamline
"scaler_name": {"type": ["string", "null"]},
# Distance to the sample (number or null)
"distance_to_sample": {"type": ["number", "null"]},
# Time of file creation (NEXUS format), optional, null if not set
"creation_time_local": {"type": ["string", "null"]},
# Scan ID of the source (scan of the standard), optional, null if | |
# flake8: noqa I201
from Child import Child
from Node import Node
DECL_NODES = [
# type-assignment -> '=' type
Node('TypeInitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Type'),
]),
# typealias-declaration -> attributes? access-level-modifier? 'typealias'
# typealias-name generic-parameter-clause?
# type-assignment
# typealias-name -> identifier
Node('TypealiasDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('TypealiasKeyword', kind='TypealiasToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
# associatedtype-declaration -> attributes? access-level-modifier?
# 'associatedtype' associatedtype-name
# inheritance-clause? type-assignment?
# generic-where-clause?
# associatedtype-name -> identifier
Node('AssociatedtypeDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('AssociatedtypeKeyword', kind='AssociatedtypeToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
Node('FunctionParameterList', kind='SyntaxCollection',
element='FunctionParameter'),
Node('ParameterClause', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ParameterList', kind='FunctionParameterList'),
Child('RightParen', kind='RightParenToken'),
]),
# -> Type
Node('ReturnClause', kind='Syntax',
children=[
Child('Arrow', kind='ArrowToken'),
Child('ReturnType', kind='Type'),
]),
# function-signature ->
# '(' parameter-list? ')' (throws | rethrows)? '->'? type?
Node('FunctionSignature', kind='Syntax',
children=[
Child('Input', kind='ParameterClause'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('Output', kind='ReturnClause', is_optional=True),
]),
# if-config-clause ->
# ('#if' | '#elseif' | '#else') expr? (stmt-list | switch-case-list)
Node('IfConfigClause', kind='Syntax',
children=[
Child('PoundKeyword', kind='Token',
token_choices=[
'PoundIfToken',
'PoundElseifToken',
'PoundElseToken',
]),
Child('Condition', kind='Expr',
is_optional=True),
Child('Elements', kind='Syntax',
node_choices=[
Child('Statements', kind='CodeBlockItemList'),
Child('SwitchCases', kind='SwitchCaseList'),
Child('Decls', kind='MemberDeclList'),
]),
]),
Node('IfConfigClauseList', kind='SyntaxCollection',
element='IfConfigClause'),
# if-config-decl -> '#if' expr stmt-list else-if-directive-clause-list
# else-clause? '#endif'
Node('IfConfigDecl', kind='Decl',
children=[
Child('Clauses', kind='IfConfigClauseList'),
Child('PoundEndif', kind='PoundEndifToken'),
]),
Node('PoundErrorDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundError', kind='PoundErrorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundWarningDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundWarning', kind='PoundWarningToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('DeclModifier', kind='Syntax',
children=[
Child('Name', kind='Token',
text_choices=[
'class', 'convenience', 'dynamic', 'final', 'infix',
'lazy', 'optional', 'override', 'postfix', 'prefix',
'required', 'static', 'unowned', 'weak', 'private',
'fileprivate', 'internal', 'public', 'open',
'mutating', 'nonmutating', 'indirect',
]),
Child('DetailLeftParen', kind='LeftParenToken', is_optional=True),
Child('Detail', kind='IdentifierToken', is_optional=True),
Child('DetailRightParen', kind='RightParenToken', is_optional=True),
]),
Node('InheritedType', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('TypeName', kind='Type'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('InheritedTypeList', kind='SyntaxCollection',
element='InheritedType'),
# type-inheritance-clause -> ':' type
Node('TypeInheritanceClause', kind='Syntax',
children=[
Child('Colon', kind='ColonToken'),
Child('InheritedTypeCollection', kind='InheritedTypeList'),
]),
# class-declaration -> attributes? access-level-modifier?
# 'class' class-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' class-members '}'
# class-name -> identifier
Node('ClassDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ClassKeyword', kind='ClassToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# struct-declaration -> attributes? access-level-modifier?
# 'struct' struct-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' struct-members '}'
# struct-name -> identifier
Node('StructDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('StructKeyword', kind='StructToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('ProtocolDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ProtocolKeyword', kind='ProtocolToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# extension-declaration -> attributes? access-level-modifier?
# 'extension' extended-type
# type-inheritance-clause?
# generic-where-clause?
# '{' extension-members '}'
# extension-name -> identifier
Node('ExtensionDecl', kind='Decl', traits=['DeclGroup'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('ExtensionKeyword', kind='ExtensionToken'),
Child('ExtendedType', kind='Type'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('MemberDeclBlock', kind='Syntax', traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Members', kind='MemberDeclList'),
Child('RightBrace', kind='RightBraceToken'),
]),
# member-decl-list = member-decl member-decl-list?
Node('MemberDeclList', kind='SyntaxCollection',
element='MemberDeclListItem'),
# member-decl = decl ';'?
Node('MemberDeclListItem', kind='Syntax',
description='''
A member declaration of a type consisting of a declaration and an \
optional semicolon;
''',
children=[
Child('Decl', kind='Decl',
description='The declaration of the type member.'),
Child('Semicolon', kind='SemicolonToken', is_optional=True,
description='An optional trailing semicolon.'),
]),
# source-file = code-block-item-list eof
Node('SourceFile', kind='Syntax',
traits=['WithStatements'],
children=[
Child('Statements', kind='CodeBlockItemList'),
Child('EOFToken', kind='EOFToken')
]),
# initializer -> '=' expr
Node('InitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Expr'),
]),
# parameter ->
# external-parameter-name? local-parameter-name ':'
# type '...'? '='? expression? ','?
Node('FunctionParameter', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('FirstName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
# One of these two names needs be optional, we choose the second
# name to avoid backtracking.
Child('SecondName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Type', kind='Type',
is_optional=True),
Child('Ellipsis', kind='Token',
is_optional=True),
Child('DefaultArgument', kind='InitializerClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# declaration-modifier -> access-level-modifier
# | mutation-modifier
# | 'class'
# | 'convenience'
# | 'dynamic'
# | 'final'
# | 'infix'
# | 'lazy'
# | 'optional'
# | 'override'
# | 'postfix'
# | 'prefix'
# | 'required'
# | 'static'
# | 'unowned'
# | 'unowned(safe)'
# | 'unowned(unsafe)'
# | 'weak'
# mutation-modifier -> 'mutating' | 'nonmutating'
Node('ModifierList', kind='SyntaxCollection',
element='DeclModifier',
element_name='Modifier'),
Node('FunctionDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('FuncKeyword', kind='FuncToken'),
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'UnspacedBinaryOperatorToken',
'SpacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Signature', kind='FunctionSignature'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('InitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('InitKeyword', kind='InitToken'),
Child('OptionalMark', kind='Token',
token_choices=[
'PostfixQuestionMarkToken',
'InfixQuestionMarkToken',
'ExclamationMarkToken',
],
is_optional=True),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Parameters', kind='ParameterClause'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('DeinitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('DeinitKeyword', kind='DeinitToken'),
Child('Body', kind='CodeBlock'),
]),
Node('SubscriptDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('SubscriptKeyword', kind='SubscriptToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Indices', kind='ParameterClause'),
Child('Result', kind='ReturnClause'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Accessor', kind='AccessorBlock', is_optional=True),
]),
# access-level-modifier -> 'private' | 'private' '(' 'set' ')'
# | 'fileprivate' | 'fileprivate' '(' 'set' ')'
# | 'internal' | 'internal' '(' 'set' ')'
# | 'public' | 'public' '(' 'set' ')'
# | 'open' | 'open' '(' 'set' ')'
Node('AccessLevelModifier', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('Modifier', kind='IdentifierToken',
is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
Node('AccessPathComponent', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('TrailingDot', kind='PeriodToken', is_optional=True),
]),
Node('AccessPath', kind='SyntaxCollection', element='AccessPathComponent'),
Node('ImportDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifiers', kind='ModifierList',
is_optional=True),
Child('ImportTok', kind='ImportToken'),
Child('ImportKind', kind='Token', is_optional=True,
token_choices=[
'TypealiasToken', 'StructToken', 'ClassToken',
'EnumToken', 'ProtocolToken', 'VarToken', 'LetToken',
'FuncToken',
]),
Child('Path', kind='AccessPath'),
]),
# (value)
Node('AccessorParameter', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='IdentifierToken'),
Child('RightParen', kind='RightParenToken'),
]),
Node('AccessorDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifier', kind='DeclModifier', is_optional=True),
Child('AccessorKind', kind='Token',
text_choices=[
'get', 'set', 'didSet', 'willSet',
]),
Child('Parameter', kind='AccessorParameter', is_optional=True),
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('AccessorList', kind="SyntaxCollection", element='AccessorDecl'),
Node('AccessorBlock', kind="Syntax", traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('AccessorListOrStmtList', kind='Syntax',
node_choices=[
Child('Accessors', kind='AccessorList'),
Child('Statements', kind='CodeBlockItemList')]),
Child('RightBrace', kind='RightBraceToken'),
]),
# Pattern: Type = Value { get {} },
Node('PatternBinding', kind="Syntax",
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation', is_optional=True),
Child('Initializer', kind='InitializerClause', is_optional=True),
Child('Accessor', kind='AccessorBlock', is_optional=True),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('PatternBindingList', kind="SyntaxCollection",
element='PatternBinding'),
Node('VariableDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList', is_optional=True),
Child('Modifiers', kind='ModifierList', is_optional=True),
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Bindings', kind='PatternBindingList'),
]),
Node('EnumCaseElement', kind='Syntax',
description='''
An element of an enum case, containing the name of the case and, \
optionally, either associated values or an assignment to a raw value.
''',
traits=['WithTrailingComma'],
children=[
Child('Identifier', kind='IdentifierToken',
description='The name of this case.'),
Child('AssociatedValue', kind='ParameterClause', is_optional=True,
description='The set of associated values of the case.'),
Child('RawValue', kind='InitializerClause', is_optional=True,
description='''
The raw value of this enum element, if present.
'''),
Child('TrailingComma', kind='CommaToken', is_optional=True,
description='''
The trailing comma of this element, if the case has \
multiple elements.
'''),
]),
Node('EnumCaseElementList', kind='SyntaxCollection',
description='A collection of 0 or more `EnumCaseElement`s.',
element='EnumCaseElement'),
Node('EnumCaseDecl', kind='Decl',
description='''
A `case` declaration of a Swift `enum`. It can have 1 or more \
`EnumCaseElement`s inside, each declaring a different case of the
enum.
''',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the case declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
description='''
The declaration modifiers applied to the case declaration.
'''),
Child('CaseKeyword', kind='CaseToken',
description='The `case` keyword for this case.'),
Child('Elements', kind='EnumCaseElementList',
description='The elements this case declares.')
]),
Node('EnumDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `enum` declaration.',
children=[
Child('Attributes', kind='AttributeList', is_optional=True,
description='''
The attributes applied to the enum declaration.
'''),
Child('Modifiers', kind='ModifierList', is_optional=True,
description='''
The declaration modifiers applied to the enum declaration.
'''),
Child('EnumKeyword', kind='EnumToken',
description='''
The `enum` keyword for this declaration.
'''),
Child('Identifier', kind='IdentifierToken',
description='''
The name of this enum.
'''),
Child('GenericParameters', kind='GenericParameterClause',
is_optional=True,
description='''
The generic parameters, if any, for this enum.
'''),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True,
description='''
The inheritance clause describing conformances or raw \
values for this enum.
'''),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True,
| |
region_l = 0.0
region_r = 1.0
u_true, f, A_eps, u_left, u_right = MS_LaplaceEqs.get_infos2pLaplace_1D_2(
in_dim=input_dim, out_dim=out_dim, intervalL=region_l, intervalR=region_r, index2p=p_index, eps1=epsilon1,
eps2=epsilon2)
elif R['PDE_type'] == 'Possion_Boltzmann':
# 求解如下方程, A_eps(x) 震荡的比较厉害,具有多个尺度
# d **** d ****
# - ---- | A_eps(x)* ---- u_eps(x) | + K(x)u_eps(x) =f(x), x \in R^n
# dx **** dx ****
p_index = R['order2pLaplace_operator']
epsilon = R['epsilon']
region_l = 0.0
region_r = 1.0
A_eps, kappa, u_true, u_left, u_right, f = MS_BoltzmannEqs.get_infos2Boltzmann_1D(
in_dim=input_dim, out_dim=out_dim, region_a=region_l, region_b=region_r, index2p=p_index, eps=epsilon,
eqs_name=R['equa_name'])
sd2nn = SD2NN(input_dim=R['input_dim'], out_dim=1, hidden2Normal=R['hidden2normal'],
hidden2Scale1=R['hidden2scale1'], hidden2Scale2=R['hidden2scale2'],
Model_name2Normal=R['model2Normal'], Model_name2Scale1=R['model2Scale1'],
Model_name2Scale2=R['model2Scale2'], actIn_name2Normal=R['actHidden_name2Normal'],
actHidden_name2Normal=R['actHidden_name2Normal'], actOut_name2Normal='linear',
actIn_name2Scale=R['actHidden_name2Scale'], actHidden_name2Scale=R['actHidden_name2Scale'],
actOut_name2Scale='linear', opt2regular_WB='L2', type2numeric='float32', freq2Normal=R['freq2Normal'],
freq2Scale1=R['freq2Scale1'], freq2Scale2=R['freq2Scale2'])
global_steps = tf.compat.v1.Variable(0, trainable=False)
with tf.device('/gpu:%s' % (R['gpuNo'])):
with tf.compat.v1.variable_scope('vscope', reuse=tf.compat.v1.AUTO_REUSE):
X_it = tf.compat.v1.placeholder(tf.float32, name='X_it', shape=[None, input_dim]) # * 行 1 列
X_left = tf.compat.v1.placeholder(tf.float32, name='X_left', shape=[None, input_dim]) # * 行 1 列
X_right = tf.compat.v1.placeholder(tf.float32, name='X_right', shape=[None, input_dim]) # * 行 1 列
bd_penalty = tf.compat.v1.placeholder_with_default(input=1e3, shape=[], name='bd_p')
UdotU_penalty = tf.compat.v1.placeholder_with_default(input=1.0, shape=[], name='p_powU')
in_learning_rate = tf.compat.v1.placeholder_with_default(input=1e-5, shape=[], name='lr')
if R['PDE_type'] == 'general_Laplace':
UNN2train, Loss_it2NNs, UNN_dot_UNN = sd2nn.loss_it2Laplace(
X=X_it, fside=f, loss_type=R['loss_type'], alpha1=using_scale1_orthogonal,
alpha2=using_scale2_orthogonal, opt2orthogonal=R['opt2orthogonal'])
elif R['PDE_type'] == 'pLaplace':
fx = MS_LaplaceEqs.force_sice_3scale2(X_it, eps1=R['epsilon1'], eps2=R['epsilon2'])
UNN2train, Loss_it2NNs, UNN_dot_UNN = sd2nn.loss_it2pLaplace(
X=X_it, Aeps=A_eps, fside=fx, if_lambda2fside=False, loss_type=R['loss_type'],
alpha1=using_scale1_orthogonal, alpha2=using_scale2_orthogonal, opt2orthogonal=R['opt2orthogonal'])
elif R['PDE_type'] == 'Possion_Boltzmann':
UNN2train, Loss_it2NNs, UNN_dot_UNN = sd2nn.loss_it2Possion_Boltzmann()
if R['opt2loss_udotu'] == 'with_orthogonal':
Loss2UNN_dot_UNN = UdotU_penalty * UNN_dot_UNN
else:
Loss2UNN_dot_UNN = tf.constant(0.0)
if R['opt2loss_bd'] == 'unified_boundary':
loss_bd2left = sd2nn.loss_bd2NormalAddScale(X_left, Ubd_exact=u_left, alpha1=R['contrib2scale1'],
alpha2=R['contrib2scale2'])
loss_bd2right = sd2nn.loss_bd2NormalAddScale(X_right, Ubd_exact=u_right, alpha1=R['contrib2scale1'],
alpha2=R['contrib2scale2'])
Loss_bd2NNs = bd_penalty * (loss_bd2left + loss_bd2right)
else:
loss_bd2Normal_left = sd2nn.loss2Normal_bd(X_left, Ubd_exact=u_left)
loss_bd2Normal_right = sd2nn.loss2Normal_bd(X_right, Ubd_exact=u_right)
loss_bd2Normal = loss_bd2Normal_left + loss_bd2Normal_right
loss_bd2Scale1_left = sd2nn.loss2Scale1_bd(X_left, alpha=using_scale1_boundary)
loss_bd2Scale1_right = sd2nn.loss2Scale1_bd(X_right, alpha=using_scale1_boundary)
loss_bd2Scale1 = loss_bd2Scale1_left + loss_bd2Scale1_right
loss_bd2Scale2_left = sd2nn.loss2Scale2_bd(X_left, alpha=using_scale2_boundary)
loss_bd2Scale2_right = sd2nn.loss2Scale2_bd(X_right, alpha=using_scale2_boundary)
loss_bd2Scale2 = loss_bd2Scale2_left + loss_bd2Scale2_right
Loss_bd2NNs = bd_penalty*(loss_bd2Normal + loss_bd2Scale1 + loss_bd2Scale2)
regularSum2WB = sd2nn.get_regularSum2WB()
PWB = penalty2WB * regularSum2WB
Loss2NN = Loss_it2NNs + Loss_bd2NNs + Loss2UNN_dot_UNN + PWB
my_optimizer = tf.train.AdamOptimizer(in_learning_rate)
if R['loss_type'] == 'variational_loss' or R['loss_type'] == 'variational_loss2':
if R['train_model'] == 'training_group4_1':
train_op1 = my_optimizer.minimize(Loss_it2NNs, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3, train_op4)
elif R['train_model'] == 'training_group3':
train_op1 = my_optimizer.minimize(Loss_it2NNs, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3)
elif R['train_model'] == 'training_group4_2':
train_op1 = my_optimizer.minimize(Loss_it2NNs, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3, train_op4)
elif R['train_model'] == 'training_group2':
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op3, train_op4)
else:
train_Loss2NN = my_optimizer.minimize(Loss2NN, global_step=global_steps)
# 训练上的真解值和训练结果的误差
U_true = u_true(X_it)
train_mse_NN = tf.reduce_mean(tf.square(U_true - UNN2train))
train_rel_NN = train_mse_NN / tf.reduce_mean(tf.square(U_true))
UNN_Normal2test, UNN_Scale12test, UNN_Scale22test, UNN2test = \
sd2nn.evalue_MscaleDNN(X_points=X_it, alpha1=R['contrib2scale1'], alpha2=R['contrib2scale2'])
t0 = time.time()
loss_it_all, loss_bd_all, loss_all, loss_udu_all, train_mse_all, train_rel_all = [], [], [], [], [], []
test_mse_all, test_rel_all = [], []
test_epoch = []
test_batch_size = 1000
test_x_bach = np.reshape(np.linspace(region_l, region_r, num=test_batch_size), [-1, 1])
saveData.save_testData_or_solus2mat(test_x_bach, dataName='testx', outPath=R['FolderName'])
# ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了
config = tf.compat.v1.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置
config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
tmp_lr = learning_rate
for i_epoch in range(R['max_epoch'] + 1):
x_it_batch = DNN_data.rand_it(batchsize_it, input_dim, region_a=region_l, region_b=region_r)
xl_bd_batch, xr_bd_batch = DNN_data.rand_bd_1D(batchsize_bd, input_dim, region_a=region_l, region_b=region_r)
tmp_lr = tmp_lr * (1 - lr_decay)
if R['activate_penalty2bd_increase'] == 1:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_bd = init_bd_penalty
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_bd = 10 * init_bd_penalty
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_bd = 50 * init_bd_penalty
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_bd = 100 * init_bd_penalty
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_bd = 200 * init_bd_penalty
else:
temp_penalty_bd = 500 * init_bd_penalty
else:
temp_penalty_bd = init_bd_penalty
if R['activate_powSolus_increase'] == 1:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_powU = init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_powU = 10* init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_powU = 50*init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_powU = 100*init_UdotU_penalty
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_powU = 200*init_UdotU_penalty
else:
temp_penalty_powU = 500*init_UdotU_penalty
else:
temp_penalty_powU = init_UdotU_penalty
_, loss_it_nn, loss_bd_nn, loss_nn, udu_nn, train_mse_nn, train_rel_nn, pwb = sess.run(
[train_Loss2NN, Loss_it2NNs, Loss_bd2NNs, Loss2NN, UNN_dot_UNN, train_mse_NN, train_rel_NN, PWB],
feed_dict={X_it: x_it_batch, X_left: xl_bd_batch, X_right: xr_bd_batch,
in_learning_rate: tmp_lr, bd_penalty: temp_penalty_bd, UdotU_penalty: temp_penalty_powU})
loss_it_all.append(loss_it_nn)
loss_bd_all.append(loss_bd_nn)
loss_all.append(loss_nn)
loss_udu_all.append(udu_nn)
train_mse_all.append(train_mse_nn)
train_rel_all.append(train_rel_nn)
if i_epoch % 1000 == 0:
run_times = time.time() - t0
DNN_tools.print_and_log_train_one_epoch(
i_epoch, run_times, tmp_lr, temp_penalty_bd, temp_penalty_powU, pwb, loss_it_nn, loss_bd_nn, loss_nn,
udu_nn, train_mse_nn, train_rel_nn, log_out=log_fileout_NN)
# --------------------------- test network ----------------------------------------------
test_epoch.append(i_epoch / 1000)
u_true2test, utest_nn, unn_normal, unn_scale1, unn_scale2 = sess.run(
[U_true, UNN2test, UNN_Normal2test, UNN_Scale12test, UNN_Scale22test],
feed_dict={X_it: test_x_bach})
test_mse2nn = np.mean(np.square(u_true2test - utest_nn))
test_mse_all.append(test_mse2nn)
test_rel2nn = test_mse2nn / np.mean(np.square(u_true2test))
test_rel_all.append(test_rel2nn)
DNN_tools.print_and_log_test_one_epoch(test_mse2nn, test_rel2nn, log_out=log_fileout_NN)
# ----------------------- save training results to mat files, then plot them ---------------------------------
saveData.save_trainLoss2mat_1actFunc(loss_it_all, loss_bd_all, loss_all, actName=act_func2Normal,
outPath=R['FolderName'])
saveData.save_train_MSE_REL2mat(train_mse_all, train_rel_all, actName=act_func2Normal, outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(loss_it_all, lossType='loss_it', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(loss_bd_all, lossType='loss_bd', seedNo=R['seed'], outPath=R['FolderName'],
yaxis_scale=True)
plotData.plotTrain_loss_1act_func(loss_all, lossType='loss', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_MSE_REL_1act_func(train_mse_all, train_rel_all, actName=act_func2Scale, seedNo=R['seed'],
outPath=R['FolderName'], yaxis_scale=True)
# ---------------------- save testing results to mat files, then plot them --------------------------------
saveData.save_testData_or_solus2mat(u_true2test, dataName='Utrue', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(utest_nn, dataName=act_func2Normal, outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(unn_normal, dataName='normal', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(unn_scale1, dataName='scale1', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(unn_scale2, dataName='scale2', outPath=R['FolderName'])
saveData.save_testMSE_REL2mat(test_mse_all, test_rel_all, actName=act_func2Scale, outPath=R['FolderName'])
plotData.plotTest_MSE_REL(test_mse_all, test_rel_all, test_epoch, actName=act_func2Scale, seedNo=R['seed'],
outPath=R['FolderName'], yaxis_scale=True)
if __name__ == "__main__":
R = {}
R['gpuNo'] = 0
if platform.system() == 'Windows':
os.environ["CDUA_VISIBLE_DEVICES"] = "%s" % (R['gpuNo'])
else:
print('-------------------------------------- linux -----------------------------------------------')
# Linux终端没有GUI, 需要添加如下代码,而且必须添加在 import matplotlib.pyplot 之前,否则无效。
matplotlib.use('Agg')
if tf.test.is_gpu_available():
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" # 设置当前使用的GPU设备仅为第 0,1,2,3 块GPU, 设备名称为'/gpu:0'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# ------------------------------------------- 文件保存路径设置 ----------------------------------------
store_file = 'pLaplace1D'
# store_file = 'Boltzmann1D'
# store_file = 'Convection1D'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
OUT_DIR = os.path.join(BASE_DIR, store_file)
if not os.path.exists(OUT_DIR):
print('---------------------- OUT_DIR ---------------------:', OUT_DIR)
os.mkdir(OUT_DIR)
R['seed'] = np.random.randint(1e5)
seed_str = str(R['seed']) # int 型转为字符串型
FolderName = os.path.join(OUT_DIR, seed_str) # 路径连接
R['FolderName'] = FolderName
if not os.path.exists(FolderName):
print('--------------------- FolderName -----------------:', FolderName)
os.mkdir(FolderName)
# ---------------------------------------- 复制并保存当前文件 -----------------------------------------
if platform.system() == 'Windows':
tf.compat.v1.reset_default_graph()
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
else:
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
# ---------------------------- Setup of laplace equation ------------------------------
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
step_stop_flag = input('please input an integer number to activate step-stop----0:no---!0:yes--:')
R['activate_stop'] = int(step_stop_flag)
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
R['max_epoch'] = 200000
if 0 != R['activate_stop']:
epoch_stop = input('please input a stop epoch:')
R['max_epoch'] = int(epoch_stop)
if store_file == 'Laplace1D':
R['PDE_type'] = 'general_Laplace'
R['equa_name'] = 'PDE1'
# R['equa_name'] = 'PDE2'
# R['equa_name'] = 'PDE3'
# R['equa_name'] = 'PDE4'
# R['equa_name'] = 'PDE5'
# R['equa_name'] = 'PDE6'
# R['equa_name'] = 'PDE7'
elif store_file == 'pLaplace1D':
R['PDE_type'] = 'pLaplace'
R['equa_name'] = '3scale2'
# R['equa_name'] = '3scale3'
elif store_file == 'Boltzmann1D':
R['PDE_type'] = 'Possion_Boltzmann'
# R['equa_name'] = 'Boltzmann1'
R['equa_name'] = 'Boltzmann2'
if R['PDE_type'] == 'pLaplace' or R['PDE_type'] == 'Possion_Boltzmann':
# 尺度设置
epsilon1 = input('please input epsilon1 =') # 由终端输入的会记录为字符串形式
R['epsilon1'] = float(epsilon1) # 字符串转为浮点
epsilon2 = input('please input epsilon2 =') # 由终端输入的会记录为字符串形式
R['epsilon2'] = float(epsilon2) # 字符串转为浮点
# 问题幂次
order2pLaplace = input('please input the order(a int number) to p-laplace:')
order = float(order2pLaplace)
R['order2pLaplace_operator'] = order
R['input_dim'] = 1 # 输入维数,即问题的维数(几元问题)
R['output_dim'] = 1 # 输出维数
R['loss_type'] = 'variational_loss' # PDE变分
# R['loss_type'] = 'variational_loss2' # PDE变分
# R['loss_type'] = 'L2_loss' # L2 loss
# R['opt2orthogonal'] = 0 # 0: L2 opt2orthogonal+energy 1: opt2orthogonal 2:energy
R['opt2orthogonal'] = 1 # 0: L2 opt2orthogonal+energy 1: opt2orthogonal 2:energy
# R['opt2orthogonal'] = 2 # 0: L2 opt2orthogonal+energy 1: opt2orthogonal 2:energy
# ---------------------------- Setup of DNN -------------------------------
R['batch_size2interior'] = 3000 # 内部训练数据的批大小
R['batch_size2boundary'] = 500 # 边界训练数据大小
R['regular_wb_model'] = 'L0'
# R['regular_wb_model'] = 'L1'
# R['regular_wb_model'] = 'L2'
R['penalty2weight_biases'] = 0.000 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.001 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.0025 # Regularization parameter for weights
R['activate_penalty2bd_increase'] = 1
R['init_boundary_penalty'] = 100 # Regularization parameter for boundary conditions
R['activate_powSolus_increase'] = 0
if R['activate_powSolus_increase'] == 1:
R['init_penalty2orthogonal'] = 5.0
elif R['activate_powSolus_increase'] == 2:
| |
"""
Tests for the algorithms.spectral submodule
"""
import numpy as np
import scipy
from scipy import fftpack
import numpy.testing as npt
import numpy.testing.decorators as dec
import nose.tools as nt
import nitime.algorithms as tsa
import nitime.utils as utils
def test_get_spectra():
"""
Testing spectral estimation
"""
methods = (None,
{"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
{"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
for method in methods:
avg_pwr1 = []
avg_pwr2 = []
est_pwr1 = []
est_pwr2 = []
arsig1, _, _ = utils.ar_generator(N=2 ** 16) # needs to be that long
# for the answers to converge
arsig2, _, _ = utils.ar_generator(N=2 ** 16)
avg_pwr1.append((arsig1 ** 2).mean())
avg_pwr2.append((arsig2 ** 2).mean())
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.get_spectra(tseries, method=method)
# \sum_{\omega} psd d\omega:
est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
# Get it right within the order of magnitude:
npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
def test_get_spectra_complex():
"""
Testing spectral estimation
"""
methods = (None,
{"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
{"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
for method in methods:
avg_pwr1 = []
avg_pwr2 = []
est_pwr1 = []
est_pwr2 = []
# Make complex signals:
r, _, _ = utils.ar_generator(N=2 ** 16) # It needs to be that long for
# the answers to converge
c, _, _ = utils.ar_generator(N=2 ** 16)
arsig1 = r + c * scipy.sqrt(-1)
r, _, _ = utils.ar_generator(N=2 ** 16)
c, _, _ = utils.ar_generator(N=2 ** 16)
arsig2 = r + c * scipy.sqrt(-1)
avg_pwr1.append((arsig1 * arsig1.conjugate()).mean())
avg_pwr2.append((arsig2 * arsig2.conjugate()).mean())
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.get_spectra(tseries, method=method)
# \sum_{\omega} psd d\omega:
est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
# Get it right within the order of magnitude:
npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
def test_get_spectra_unknown_method():
"""
Test that providing an unknown method to get_spectra rasies a ValueError
"""
tseries = np.array([[1, 2, 3], [4, 5, 6]])
npt.assert_raises(ValueError,
tsa.get_spectra, tseries, method=dict(this_method='foo'))
def test_periodogram():
"""Test some of the inputs to periodogram """
arsig, _, _ = utils.ar_generator(N=1024)
Sk = fftpack.fft(arsig)
f1, c1 = tsa.periodogram(arsig)
f2, c2 = tsa.periodogram(arsig, Sk=Sk)
npt.assert_equal(c1, c2)
# Check that providing a complex signal does the right thing
# (i.e. two-sided spectrum):
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig = r + c * scipy.sqrt(-1)
f, c = tsa.periodogram(arsig)
npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
def test_periodogram_csd():
"""Test corner cases of periodogram_csd"""
arsig1, _, _ = utils.ar_generator(N=1024)
arsig2, _, _ = utils.ar_generator(N=1024)
tseries = np.vstack([arsig1, arsig2])
Sk = fftpack.fft(tseries)
f1, c1 = tsa.periodogram_csd(tseries)
f2, c2 = tsa.periodogram_csd(tseries, Sk=Sk)
npt.assert_equal(c1, c2)
# Check that providing a complex signal does the right thing
# (i.e. two-sided spectrum):
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig1 = r + c * scipy.sqrt(-1)
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig2 = r + c * scipy.sqrt(-1)
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.periodogram_csd(tseries)
npt.assert_equal(f.shape[0], N) # Should be N, not the one-sided N/2 + 1
def test_dpss_windows():
""" Test a funky corner case of DPSS_windows """
N = 1024
NW = 0 # Setting NW to 0 triggers the weird corner case in which some of
# the symmetric tapers have a negative average
Kmax = 7
# But that's corrected by the algorithm:
d, w = tsa.dpss_windows(1024, 0, 7)
for this_d in d[0::2]:
npt.assert_equal(this_d.sum(axis=-1) < 0, False)
def test_dpss_properties():
""" Test conventions of Slepian eigenvectors """
N = 2000
NW = 200
d, lam = tsa.dpss_windows(N, NW, 2*NW-2)
# 2NW-2 lamdas should be all > 0.9
nt.assert_true(
(lam > 0.9).all(), 'Eigenvectors show poor spectral concentration'
)
# test orthonomality
err = np.linalg.norm(d.dot(d.T) - np.eye(2*NW-2), ord='fro')
nt.assert_true(err**2 < 1e-16, 'Eigenvectors not numerically orthonormal')
# test positivity of even functions
nt.assert_true(
(d[::2].sum(axis=1) > 0).all(),
'Even Slepian sequences should have positive DC'
)
# test positive initial slope of odd functions
# (this tests the sign of a linear slope)
pk = np.argmax(np.abs(d[1::2, :N/2]), axis=1)
t = True
for p, f in zip(pk, d[1::2]):
t = t and np.sum( np.arange(1,p+1) * f[:p] ) >= 0
nt.assert_true(t, 'Odd Slepians should begin positive-going')
def test_get_spectra_bi():
"""
Test the bi-variate get_spectra function
"""
methods = (None,
{"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
{"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
for method in methods:
arsig1, _, _ = utils.ar_generator(N=2 ** 16)
arsig2, _, _ = utils.ar_generator(N=2 ** 16)
avg_pwr1 = (arsig1 ** 2).mean()
avg_pwr2 = (arsig2 ** 2).mean()
avg_xpwr = (arsig1 * arsig2.conjugate()).mean()
tseries = np.vstack([arsig1, arsig2])
f, fxx, fyy, fxy = tsa.get_spectra_bi(arsig1, arsig2, method=method)
# \sum_{\omega} PSD(\omega) d\omega:
est_pwr1 = np.sum(fxx * (f[1] - f[0]))
est_pwr2 = np.sum(fyy * (f[1] - f[0]))
est_xpwr = np.sum(fxy * (f[1] - f[0])).real
# Test that we have the right order of magnitude:
npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
npt.assert_array_almost_equal(np.mean(est_xpwr),
np.mean(avg_xpwr),
decimal=-1)
def test_mtm_lin_combo():
"Test the functionality of cross and autospectrum MTM combinations"
spec1 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
spec2 = np.random.randn(5, 100) + 1j * np.random.randn(5, 100)
# test on both broadcasted weights and per-point weights
for wshape in ((2, 5, 1), (2, 5, 100)):
weights = np.random.randn(*wshape)
sides = 'onesided'
mtm_cross = tsa.mtm_cross_spectrum(
spec1, spec2, (weights[0], weights[1]), sides=sides
)
nt.assert_true(mtm_cross.dtype in np.sctypes['complex'],
'Wrong dtype for crossspectrum')
nt.assert_true(len(mtm_cross) == 51,
'Wrong length for halfband spectrum')
sides = 'twosided'
mtm_cross = tsa.mtm_cross_spectrum(
spec1, spec2, (weights[0], weights[1]), sides=sides
)
nt.assert_true(len(mtm_cross) == 100,
'Wrong length for fullband spectrum')
sides = 'onesided'
mtm_auto = tsa.mtm_cross_spectrum(
spec1, spec1, weights[0], sides=sides
)
nt.assert_true(mtm_auto.dtype in np.sctypes['float'],
'Wrong dtype for autospectrum')
nt.assert_true(len(mtm_auto) == 51,
'Wrong length for halfband spectrum')
sides = 'twosided'
mtm_auto = tsa.mtm_cross_spectrum(
spec1, spec2, weights[0], sides=sides
)
nt.assert_true(len(mtm_auto) == 100,
'Wrong length for fullband spectrum')
def test_mtm_cross_spectrum():
"""
Test the multi-taper cross-spectral estimation. Based on the example in
doc/examples/multi_taper_coh.py
"""
NW = 4
K = 2 * NW - 1
N = 2 ** 10
n_reps = 10
n_freqs = N
tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)
est_psd = []
for k in range(n_reps):
data, nz, alpha = utils.ar_generator(N=N)
fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
# 'one-sided', so multiply by 2:
psd = 2 * (hz * hz.conj()).real
tdata = tapers * data
tspectra = fftpack.fft(tdata)
L = N / 2 + 1
sides = 'onesided'
w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)
sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
est_psd.append(sxx)
fxx = np.mean(est_psd, 0)
psd_ratio = np.mean(fxx / psd)
# This is a rather lenient test, making sure that the average ratio is 1 to
# within an order of magnitude. That is, that they are equal on average:
npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)
# Test raising of error in case the inputs don't make sense:
npt.assert_raises(ValueError,
tsa.mtm_cross_spectrum,
tspectra, np.r_[tspectra, tspectra],
(w, w))
@dec.slow
def test_multi_taper_psd_csd():
"""
Test the multi taper psd and csd estimation functions.
Based on the example in
doc/examples/multi_taper_spectral_estimation.py
"""
N = 2 ** 10
n_reps = 10
psd = []
est_psd = []
est_csd = []
for jk in [True, False]:
for k in range(n_reps):
for adaptive in [True, False]:
ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
ar_seq -= ar_seq.mean()
fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha],
n_freqs=N)
psd.append(2 * (hz * hz.conj()).real)
f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=adaptive,
jackknife=jk)
est_psd.append(psd_mt)
f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),
adaptive=adaptive)
# Symmetrical in this case, so take one element out:
est_csd.append(csd_mt[0][1])
fxx = np.mean(psd, axis=0)
fxx_est1 = np.mean(est_psd, axis=0)
fxx_est2 = np.mean(est_csd, axis=0)
# Tests the psd:
psd_ratio1 = np.mean(fxx_est1 / fxx)
npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)
# Tests the csd:
psd_ratio2 = np.mean(fxx_est2 / fxx)
npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)
def test_gh57():
"""
https://github.com/nipy/nitime/issues/57
"""
data = np.random.randn(10, 1000)
for jk in [True, False]:
for adaptive in [True, False]:
f, psd, sigma = tsa.multi_taper_psd(data, adaptive=adaptive,
jackknife=jk)
def test_hermitian_periodogram_csd():
"""
Make sure CSD matrices returned by various methods have
Hermitian symmetry.
"""
sig = np.random.randn(4,256)
_, csd1 | |
This is retrieved
from one of following three URLs (which are stored in nussl.constants):
NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or NUSSL_EFZ_MODEL_METADATA_URL.
Args:
url (str): URL for the EFZ server that has metadata. One of these three:
NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or
NUSSL_EFZ_MODEL_METADATA_URL.
Returns:
(list): List of dicts with metadata for the desired file type.
"""
request = Request(url)
# Make sure to get the newest data
request.add_header('Pragma', 'no-cache')
request.add_header('Cache-Control', 'max-age=0')
try:
return json.loads(urlopen(request).read())
except:
raise NoConnectivityError("Can't connect to internet")
def _download_metadata_for_file(file_name, file_type):
"""
Downloads the metadata entry for a specific file (:param:`file_name`) on the EFZ server.
Args:
file_name (str): File name as specified on the EFZ server.
file_type (str): 'Type' of file, either 'audio', 'model', or 'benchmark'.
Returns:
(dict) Metadata entry for the specified file, or ``None`` if it could not be located.
"""
metadata_urls = {
'audio': constants.NUSSL_EFZ_AUDIO_METADATA_URL,
'benchmark': constants.NUSSL_EFZ_BENCHMARK_METADATA_URL,
'model': constants.NUSSL_EFZ_MODEL_METADATA_URL,
}
if file_type in metadata_urls:
metadata_url = metadata_urls[file_type]
else:
# wrong file type, return
raise MetadataError(f'Cannot find metadata of type {file_type}.')
metadata = _download_all_metadata(metadata_url)
for file_metadata in metadata:
if file_metadata['file_name'] == file_name:
return file_metadata
raise MetadataError(
f'No matching metadata for file {file_name}'
f' at url {constants.NUSSL_EFZ_AUDIO_METADATA_URL}!'
)
def download_audio_file(audio_file_name, local_folder=None, verbose=True):
"""
Downloads the specified audio file from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
audio_file_name: (str) Name of the audio file to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> piano_path = nussl.efz_utils.download_audio_file('K0140.wav')
>>> piano_signal = nussl.AudioSignal(piano_path)
"""
file_metadata = _download_metadata_for_file(audio_file_name, 'audio')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_AUDIO_URL, audio_file_name)
result = _download_file(audio_file_name, file_url, local_folder, 'audio',
file_hash=file_hash, verbose=verbose)
return result
def download_trained_model(model_name, local_folder=None, verbose=True):
"""
Downloads the specified trained model from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
model_name: (str) Name of the trained model to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> model_path = nussl.efz_utils.download_trained_model('deep_clustering_model.h5')
>>> signal = nussl.AudioSignal()
>>> piano_signal = nussl.DeepClustering(signal, model_path=model_path)
"""
file_metadata = _download_metadata_for_file(model_name, 'model')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_MODELS_URL, model_name)
result = _download_file(model_name, file_url, local_folder, 'models',
file_hash=file_hash, verbose=verbose)
return result
def download_benchmark_file(benchmark_name, local_folder=None, verbose=True):
"""
Downloads the specified benchmark file from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
benchmark_name: (str) Name of the trained model to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> import numpy as np
>>> stm_atn_path = nussl.efz_utils.download_benchmark_file('benchmark_sym_atn.npy')
>>> sym_atm = np.load(stm_atn_path)
"""
file_metadata = _download_metadata_for_file(benchmark_name, 'benchmark')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_BENCHMARKS_URL, benchmark_name)
result = _download_file(benchmark_name, file_url, local_folder, 'benchmarks',
file_hash=file_hash, verbose=verbose)
return result
def _download_file(file_name, url, local_folder, cache_subdir,
file_hash=None, cache_dir=None, verbose=True):
"""
Downloads the specified file from the
Heavily inspired by and lovingly adapted from keras' `get_file` function:
https://github.com/fchollet/keras/blob/afbd5d34a3bdbb0916d558f96af197af1e92ce70/keras/utils/data_utils.py#L109
Args:
file_name: (String) name of the file located on the server
url: (String) url of the file
local_folder: (String) alternate folder in which to download the file
cache_subdir: (String) subdirectory of folder in which to download flie
file_hash: (String) expected hash of downloaded file
cache_dir:
Returns:
(String) local path to downloaded file
"""
if local_folder not in [None, '']:
# local folder provided, let's create it if it doesn't exist and use it as datadir
os.makedirs(os.path.expanduser(local_folder), exist_ok=True)
datadir = os.path.expanduser(local_folder)
else:
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join('~', '.nussl'))
datadir_base = os.path.expanduser(cache_dir)
datadir = os.path.join(datadir_base, cache_subdir)
os.makedirs(datadir, exist_ok=True)
file_path = os.path.join(datadir, file_name)
download = False
if os.path.exists(file_path):
if file_hash is not None:
# compare the provided hash with the hash of the file currently at file_path
current_hash = _hash_file(file_path)
# if the hashes are equal, we already have the file we need, so don't download
if file_hash != current_hash:
if verbose:
warnings.warn(
f'Hash for {file_path} does not match known hash. '
f' Downloading {file_name} from servers...'
)
download = True
elif verbose:
print(f'Matching file found at {file_path}, skipping download.')
else:
download = True
else:
download = True
if download:
if verbose:
print(f'Saving file at {file_path}\nDownloading {file_name} from {url}')
def _dl_progress(count, block_size, total_size):
percent = int(count * block_size * 100 / total_size)
if percent <= 100:
sys.stdout.write(f'\r{file_name}...{percent}%')
sys.stdout.flush()
try:
try:
reporthook = _dl_progress if verbose else None
urlretrieve(url, file_path, reporthook)
if verbose: print() # print a new line after the progress is done.
except HTTPError as e:
raise FailedDownloadError(f'URL fetch failure on {url}: {e.code} -- {e.msg}')
except URLError as e:
raise FailedDownloadError(f'URL fetch failure on {url}: {e.errno} -- {e.reason}')
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(file_path):
os.remove(file_path)
raise e
# check hash of received file to see if it matches the provided hash
if file_hash is not None:
download_hash = _hash_file(file_path)
if file_hash != download_hash:
# the downloaded file is not what it should be. Get rid of it.
os.remove(file_path)
raise MismatchedHashError(
f'Deleted downloaded file ({file_path}) because of a hash mismatch.'
)
return file_path
else:
return file_path
def _hash_directory(directory, ext=None):
"""
Calculates the hash of every child file in the given directory using python's built-in SHA256
function (using `os.walk()`, which also searches subdirectories recursively). If :param:`ext`
is specified, this will only look at files with extension provided.
This function is used to verify the integrity of data sets for use with nussl. Pretty much
just makes sure that when we loop through/look at a directory, we understand the structure
because the organization of the data set directories for different data sets are all unique
and thus need to be hard coded by each generator function (below). If we get a hash mismatch
we can throw an error easily.
Args:
directory (str): Directory within which file hashes get calculated. Searches recursively.
ext (str): If provided, this function will only calculate the hash on files with the given
extension.
Returns:
(str): String containing only hexadecimal digits of the has of the
contents of the given directory.
"""
hash_list = []
for path, sub_dirs, files in os.walk(directory):
if ext is None:
hash_list.extend([_hash_file(os.path.join(path, f)) for f in files
if os.path.isfile(os.path.join(path, f))])
| |
<gh_stars>0
from http import HTTPStatus
from unittest.mock import MagicMock
import jwt
from pytest import fixture
from app import app
from api.errors import INVALID_ARGUMENT
from tests.unit.payloads_for_tests import PRIVATE_KEY
@fixture(scope='session')
def client():
app.rsa_private_key = PRIVATE_KEY
app.testing = True
with app.test_client() as client:
yield client
@fixture(scope='session')
def valid_jwt(client):
def _make_jwt(
key='some_key',
host='exabeam.com',
jwks_host='visibility.amp.cisco.com',
aud='http://localhost',
kid='02B1174234C29F8EFB69911438F597FF3FFEE6B7',
wrong_structure=False,
wrong_jwks_host=False
):
payload = {
'key': key,
'host': host,
'jwks_host': jwks_host,
'aud': aud,
}
if wrong_jwks_host:
payload.pop('jwks_host')
if wrong_structure:
payload.pop('key')
return jwt.encode(
payload, client.application.rsa_private_key, algorithm='RS256',
headers={
'kid': kid
}
)
return _make_jwt
@fixture(scope='module')
def invalid_json_expected_payload():
def _make_message(message):
return {
'errors': [{
'code': INVALID_ARGUMENT,
'message': message,
'type': 'fatal'
}]
}
return _make_message
def mock_api_response(status_code=HTTPStatus.OK, payload=None, text=None):
mock_response = MagicMock()
mock_response.status_code = status_code
mock_response.ok = status_code == HTTPStatus.OK
mock_response.json = lambda: payload
mock_response.text = text
return mock_response
@fixture(scope='module')
def ssl_error_expected_relay_response():
return {
'errors':
[
{
'code': 'unknown',
'message':
'Unable to verify SSL certificate: '
'Self signed certificate',
'type': 'fatal'
}
]
}
@fixture
def mock_exception_for_ssl_error():
mock_response = MagicMock()
mock_response.reason.args.__getitem__().verify_message = 'self signed' \
' certificate'
return mock_response
@fixture(scope='module')
def connection_error_expected_relay_response():
return {
'errors':
[
{
'code': 'connection error',
'message':
'Unable to connect to Exabeam, validate the '
'configured API endpoint: '
'https://exabeam.com',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def invalid_header_error_expected_relay_response():
return {
'errors':
[
{
'code': 'authorization error',
'message':
'Authorization failed: wrong key',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def bad_request_expected_relay_response():
return {
'errors':
[
{
'code': 'Bad Request',
'message': 'Unexpected response from Exabeam: '
'Bad request to Exabeam',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def unknown_response_code_relay_response():
return {
'errors': [
{
'code': '522',
'message': 'Unexpected response from Exabeam: None',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def expected_exabeam_response():
return {
'responses': [
{
'took': 26,
'timed_out': False,
'_shards': {
'total': 1,
'successful': 1,
'skipped': 0,
'failed': 0
},
'hits': {
'total': 39963,
'max_score': None,
'hits': [
{
'_index': 'exabeam-2021.08.11',
'_type': 'logs',
'_id': 'lms.kafka.topic_10_35121_bb3b8a648af1',
'_score': None,
'_routing': 'zwt8HOYI',
'_source': {
'exa_parser_name': 'code42-file-operations-4',
'forwarder': 'gke-tbd2-int-e2e-standard-7c2a2d'
'ba-lsxs.c.ops-dist-tbd2-int-e2e.'
'internal',
'device_name': 'JOHNM-OFFICIAL-',
'@timestamp': '2021-08-11T23:09:49.462Z',
'device_id': '944596934062634167',
'file_ext': 'Executable',
'file_name': 'Pandora.exe',
'exa_adjustedEventTime': '2021-08-11T23:05:28'
'.670Z',
'file_size': 9.2345856E7,
'exa_activity_type': [
'object-access/delete',
'object-access'
],
'domain': 'JOHNM-OFFICIAL-WIN10.qa.code42.com',
'exa_outcome': [
'success'
],
'mime': 'application/x-dosexec',
'accesses': 'DELETED',
'src_ip': '192.168.3.11',
'user_email': '<EMAIL>',
'sha256': '5fc8282e46b6e741b8d6fe2b3e35a21a62a'
'f9f4368a5b94eca90f7e6d527dc6c',
'indexTime': '2021-08-11T23:09:49.668Z',
'Vendor': 'Code42',
'dest_host': 'JOHNM-OFFICIAL-',
'data_type': 'file-operations',
'md5': '762545aa60caa6768542e15ac96ad770',
'port': 4793,
'is_reputation_domain': False,
'exa_rawEventTime': '2021-08-11T23:05:28.670Z',
'file_path': 'C:/Program Files/WindowsApps/Pan'
'doraMediaInc.29680B314EFC2_15.0.'
'3.0_x64__n619g4d5j0fnw/app/',
'message': '<110>1 2021-08-11T23:05:28.670Z '
'bb379a00ba2a Skyformation - 686537'
'8086067993358 - CEF:0|Skyformation'
'|SkyFormation Cloud Apps Security|'
'2.0.0|sk4-resource-deleted|resourc'
'e-deleted|0|cat=application-data c'
's6Label=raw-event destinationServi'
'ceName=Code42 fileType=file flexSt'
'ring1=DELETED msg=Resource [Resour'
'ce: file :: Pandora.exe] was delet'
'ed by [<EMAIL>] outc'
'ome=Executable proto=exe requestCl'
'ientApplication=Code42 - DEMO src='
'192.168.3.11 suid=username suser'
'=<EMAIL> ext_fileCat'
'egoryByExtension=Executable cs6={'
'\'eventId\':\'0_c4b5e830-824a-40a3'
'-a6d9-345664cfbb33_944596934062634'
'167_1020014027375976393_966\'} ',
'file_type': 'Executable',
'user_uid': '920256648733700755',
'time': '2021-08-11T23:05:28.670Z',
'Product': 'Code42',
'file_parent': 'C:/Program Files/WindowsApps/P'
'andoraMediaInc.29680B314EFC2_1'
'5.0.3.0_x64__n619g4d5j0fnw/'
'app/',
'@version': '1',
'exa_category': 'File',
'exa_device_type': [
'application'
],
'is_threat_src_ip': False,
'is_ransomware_src_ip': False,
'is_tor_src_ip': False
},
'fields': {
'indexTime': [
'2021-08-11T23:09:49.668Z'
]
},
'sort': [
1628723389668
]
}
]
}
}
]
}
@fixture(scope='module')
def expected_relay_response(success_observe_body, success_refer_body):
def _make_payload(route):
payload_to_route_match = {
'/observe/observables': success_observe_body,
'/refer/observables': success_refer_body
}
return payload_to_route_match[route]
return _make_payload
@fixture(scope='module')
def success_observe_body():
return {
'data': {
'sightings': {
'count': 1,
'docs': [
{
'confidence': 'High',
'count': 1,
'data': {
'columns': [
{
'name': 'forwarder',
'type': 'string'
},
{
'name': 'device_name',
'type': 'string'
},
{
'name': 'device_id',
'type': 'string'
},
{
'name': 'file_ext',
'type': 'string'
},
{
'name': 'file_name',
'type': 'string'
},
{
'name': 'file_size',
'type': 'string'
},
{
'name': 'domain',
'type': 'string'
},
{
'name': 'mime',
'type': 'string'
},
{
'name': 'accesses',
'type': 'string'
},
{
'name': 'src_ip',
'type': 'string'
},
{
'name': 'user_email',
'type': 'string'
},
{
'name': 'sha256',
'type': 'string'
},
{
'name': 'indexTime',
'type': 'string'
},
{
'name': 'dest_host',
'type': 'string'
},
{
'name': 'data_type',
'type': 'string'
},
{
'name': 'md5',
'type': 'string'
},
{
'name': 'port',
'type': 'string'
},
{
'name': 'file_path',
'type': 'string'
},
{
'name': 'file_type',
'type': 'string'
},
{
'name': 'user_uid',
'type': 'string'
},
{
'name': 'time',
'type': 'string'
},
{
'name': 'file_parent',
'type': 'string'
}
],
'rows': [
[
'gke-tbd2-int-e2e-standard-7c2a2dba-lsxs.c'
'.ops-dist-tbd2-int-e2e.internal',
'JOHNM-OFFICIAL-', '944596934062634167',
'Executable', 'Pandora.exe', 92345856.0,
'JOHNM-OFFICIAL-WIN10.qa.code42.com',
'application/x-dosexec', 'DELETED',
'192.168.3.11', '<EMAIL>',
'5fc8282e46b6e741b8d6fe2b3e35a21a62af9f436'
'8a5b94eca90f7e6d527dc6c',
'2021-08-11T23:09:49.668Z',
'JOHNM-OFFICIAL-', 'file-operations',
'762545aa60caa6768542e15ac96ad770', 4793,
'C:/Program Files/WindowsApps/PandoraMedia'
'Inc.29680B314EFC2_15.0.3.0_x64__n619g4d5j'
'0fnw/app/', 'Executable',
'920256648733700755',
'2021-08-11T23:05:28.670Z',
'C:/Program Files/WindowsApps/PandoraMedia'
'Inc.29680B314EFC2_15.0.3.0_x64__n619g4d5j'
'0fnw/app/'
]
]
},
'description': '```\n<110>1 2021-08-11T23:05:28.670Z '
'bb379a00ba2a Skyformation - 686537808'
'6067993358 - CEF:0|Skyformation|SkyFo'
'rmation Cloud Apps Security|2.0.0|sk4'
'-resource-deleted|resource-deleted|0|'
'cat=application-data cs6Label=raw-eve'
'nt destinationServiceName=Code42 file'
'Type=file flexString1=DELETED msg=Res'
'ource [Resource: file :: Pandora.exe]'
' was deleted by [<EMAIL>'
'm] outcome=Executable proto=exe reque'
'stClientApplication=Code42 - DEMO src'
'=192.168.3.11 suid=username suser=j'
'<EMAIL> ext_fileCategory'
'ByExtension=Executable cs6={\'eventId'
'\':\'0_c4b5e830-824a-40a3-a6d9-345664'
'cfbb33_944596934062634167_10200140273'
'75976393_966\'} \n```',
'external_ids': [
'lms.kafka.topic_10_35121_bb3b8a648af1'
],
'id': 'transient:sighting-f34e127d-a696-5e1a-8868-afa'
'fc4541eec',
'internal': True,
'observables': [
{
'type': 'domain', 'value': 'cisco.com'
}
],
'observed_time': {
'start_time': '2021-08-11T23:05:28.670Z'
},
'schema_version': '1.1.7',
'short_description': 'Exabeam received a log from gke'
'-tbd2-int-e2e-standard-7c2a2dba'
'-lsxs.c.ops-dist-tbd2-int-e2e.i'
'nternal containing the observab'
'le',
'source': 'Exabeam',
'source_uri': 'https://exabeam.com/data/app/dataui#/di'
'scover?_g=(time:(from:now-30d))&_a=(int'
'erval:(text:Auto,val:auto),query:(query'
'_string:(default_field:message,query:\''
'_id:%22lms.kafka.topic_10_35121_bb3b8a6'
'48af1%22\')),queryString:\'_id:%22lms.k'
'afka.topic_10_35121_bb3b8a648af1%22\',s'
'earchExecuted:!t,sort:!(indexTime,desc)'
',uiState:(vis:(colors:(Count:%23139df2)'
')))',
'title': 'Log message received by Exabeam in last 30 '
'days contains observable',
'type': 'sighting'
}
]
}
}
}
@fixture(scope='module')
def success_refer_body():
return {
'data':
[
{
'categories':
[
'Search',
'Exabeam'
],
'description': 'Search for this domain in '
'Exabeam Data Lake',
'id': 'ref-exabeam-search-domain-cisco.com',
'title': 'Search for this domain',
'url': 'https://exabeam.com/data/app/dataui#/discover?_g=('
'time:(from:now-30d))&_a=(interval:(text:Auto,val:a'
'uto),query:(query_string:(default_field:message,qu'
'ery:\'%22cisco.com%22%20AND%20NOT%20(event_sub'
'type:%22Exabeam%20Audit%20Event%22)\')),queryStrin'
'g:\'%22cisco.com%22%20AND%20NOT%20(event_subty'
'pe:%22Exabeam%20Audit%20Event%22)\',searchExecuted'
':!t,sort:!(indexTime,desc),uiState:(vis:(colors:(C'
'ount:%23139df2))))'
}
]
}
def exabeam_response_tile(tile_id):
tile_id_map = {
'affected_ips': exabeam_response_affected_ips(),
'activity_types': exabeam_response_activity_types(),
'categories': exabeam_response_categories(),
'activity_types_per_day': exabeam_response_activity_types_per_day(),
'categories_per_day': exabeam_response_categories_per_day()
}
return tile_id_map[tile_id]
def exabeam_response_affected_ips():
return {
'aggregations': {
'is_threat_src_ip': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 0,
'key_as_string': 'false',
'doc_count': 495736
}
]
},
'is_ransomware_src_ip': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 0,
'key_as_string': 'false',
'doc_count': 495736
}
]
},
'is_tor_src_ip': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 0,
'key_as_string': 'false',
'doc_count': 495736
}
]
}
}
}
def exabeam_response_activity_types():
return {
'aggregations': {
'exa_activity_type.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'object-access',
'doc_count': 303061
},
{
'key': 'object-access/write',
'doc_count': 156235
},
{
'key': 'object-access/delete',
'doc_count': 146194
}
]
}
}
}
def exabeam_response_categories():
return {
'aggregations': {
'exa_category.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'File',
'doc_count': 302998
},
{
'key': 'DLP',
'doc_count': 213888
},
{
'key': 'Application',
'doc_count': 664
}
]
}
}
}
def exabeam_response_activity_types_per_day():
return {
'aggregations': {
'activity_types_per_day': {
'buckets': [
{
'key_as_string': '<KEY>',
'key': 1632182400000,
'doc_count': 15078,
'exa_activity_type.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'dlp-alert',
'doc_count': 8083
},
{
'key': 'authentication',
'doc_count': 4
}
]
}
},
{
'key_as_string': '<KEY>',
'key': 1632268800000,
'doc_count': 16190,
'exa_activity_type.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'dlp-alert',
'doc_count': 8717
},
{
'key': 'authentication',
'doc_count': 14
}
]
}
}
]
}
}
}
def exabeam_response_categories_per_day():
return {
'aggregations': {
'categories_per_day': {
'buckets': [
{
'key_as_string': '<KEY>',
'key': 1632182400000,
'doc_count': 15078,
'exa_category.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'DLP',
'doc_count': 15073
},
{
'key': 'Application',
'doc_count': 4
},
{
'key': 'Print Activity',
'doc_count': 1
}
]
}
},
{
'key_as_string': '<KEY>',
'key': 1632268800000,
'doc_count': 16190,
'exa_category.keyword': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{
'key': 'DLP',
'doc_count': 16176
},
{
'key': 'Application',
'doc_count': 14
}
]
}
}
]
}
}
}
def relay_response_tile_data(tile_id):
tile_id_map = {
'affected_ips': relay_response_affected_ips(),
'activity_types': relay_response_activity_types(),
'categories': relay_response_categories(),
'activity_types_per_day': relay_response_activity_types_per_day(),
'categories_per_day': relay_response_categories_per_day()
}
return tile_id_map[tile_id]
def relay_response_affected_ips():
return [
{
'icon': 'warning',
'label': 'Ransomware IPs',
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'is_ransomware_src_ip:"true"%20AND%20NOT%20(event_sub'
'type:%22Exabeam%20Audit%20Event%22)\')),queryString:'
'\'is_ransomware_src_ip:"true"%20AND%20NOT%20(event_s'
'ubtype:%22Exabeam%20Audit%20Event%22)\',searchExecut'
'ed:!t,sort:!(indexTime,desc),uiState:(vis:(colors:(C'
'ount:%23139df2))))',
'value': 0,
'value_unit': 'integer'
},
{
'icon': 'warning',
'label': 'Threat IPs',
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'is_threat_src_ip:"true"%20AND%20NOT%20(event_subtype'
':%22Exabeam%20Audit%20Event%22)\')),queryString:\'is'
'_threat_src_ip:"true"%20AND%20NOT%20(event_subtype:%'
'22Exabeam%20Audit%20Event%22)\',searchExecuted:!t,so'
'rt:!(indexTime,desc),uiState:(vis:(colors:(Count:%23'
'139df2))))',
'value': 0,
'value_unit': 'integer'
},
{
'icon': 'warning',
'label': 'Tor IPs',
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'is_tor_src_ip:"true"%20AND%20NOT%20(event_subtype:%2'
'2Exabeam%20Audit%20Event%22)\')),queryString:\'is_to'
'r_src_ip:"true"%20AND%20NOT%20(event_subtype:%22Exab'
'eam%20Audit%20Event%22)\',searchExecuted:!t,sort:!(i'
'ndexTime,desc),uiState:(vis:(colors:(Count:%23139df2'
'))))',
'value': 0,
'value_unit': 'integer'
}
]
def relay_response_activity_types():
return [
{
'key': 0,
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'exa_activity_type:"object-access"%20AND%20NOT%20(eve'
'nt_subtype:%22Exabeam%20Audit%20Event%22)\')),queryS'
'tring:\'exa_activity_type:"object-access"%20AND%20NO'
'T%20(event_subtype:%22Exabeam%20Audit%20Event%22)\','
'searchExecuted:!t,sort:!(indexTime,desc),uiState:(vi'
's:(colors:(Count:%23139df2))))',
'value': 303061
},
{
'key': 1,
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'exa_activity_type:"object-access/write"%20AND%20NOT%'
'20(event_subtype:%22Exabeam%20Audit%20Event%22)\')),'
'queryString:\'exa_activity_type:"object-access/write'
'"%20AND%20NOT%20(event_subtype:%22Exabeam%20Audit%20'
'Event%22)\',searchExecuted:!t,sort:!(indexTime,desc)'
',uiState:(vis:(colors:(Count:%23139df2))))',
'value': 156235},
{
'key': 2,
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'exa_activity_type:"object-access/delete"%20AND%20NOT'
'%20(event_subtype:%22Exabeam%20Audit%20Event%22)\'))'
',queryString:\'exa_activity_type:"object-access/dele'
'te"%20AND%20NOT%20(event_subtype:%22Exabeam%20Audit%'
'20Event%22)\',searchExecuted:!t,sort:!(indexTime,des'
'c),uiState:(vis:(colors:(Count:%23139df2))))',
'value': 146194
}
]
def relay_response_categories():
return [
{
'key': 0,
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'exa_category:"File"%20AND%20NOT%20(event_subtype:%22'
'Exabeam%20Audit%20Event%22)\')),queryString:\'exa_ca'
'tegory:"File"%20AND%20NOT%20(event_subtype:%22Exabea'
'm%20Audit%20Event%22)\',searchExecuted:!t,sort:!(ind'
'exTime,desc),uiState:(vis:(colors:(Count:%23139df2))'
'))',
'value': 302998},
{
'key': 1,
'link_uri': 'https://exabeam.com/data/app/dataui#/discover?_g=(ti'
'me:(from:now-30d))&_a=(interval:(text:Auto,val:auto)'
',query:(query_string:(default_field:message,query:\''
'exa_category:"DLP"%20AND%20NOT%20(event_subtype:%22E'
'xabeam%20Audit%20Event%22)\')),queryString:\'exa_cat'
| |
'SAME' or 'VALID'
data_format : string
An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of
[batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width].
dilations : int or list
An int or list of ints that has length 1 or 3 which defaults to 1.
The dilation factor for each dimension of input. If set to k > 1,
there will be k-1 skipped cells between each filter element on that dimension.
Dilations in the batch and depth dimensions must be 1.
name : string
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as input.
"""
return Conv1D(stride=stride, padding=padding, data_format=data_format, dilations=dilations)(input, filters)
def same_padding(input, weight, strides, dilations):
# H(in) + 2* padding[0] - dilation[0] * (Ksize[0] - 1) - 1
# H(out) = = floor( -------------------------------------------------------------- + 1 )
# stride[0]
if isinstance(weight, torch.Tensor):
if len(input.shape) == 3:
filter_rows = weight.size(2)
if len(input.shape) == 4:
filter_rows = weight.size(2)
filter_cols = weight.size(3)
elif len(input.shape) == 5:
filter_rows = weight.size(2)
filter_cols = weight.size(3)
filter_depth = weight.size(4)
else:
if len(input.shape) == 3:
filter_rows = weight[0]
elif len(input.shape) == 4:
filter_rows = weight[0]
filter_cols = weight[1]
elif len(input.shape) == 5:
filter_rows = weight[0]
filter_cols = weight[1]
filter_depth = weight[2]
if len(input.shape) == 3:
input_rows = input.size(2)
out_rows = (input_rows + strides - 1) // strides
padding_rows = max(0, (out_rows - 1) * strides + (filter_rows - 1) * dilations + 1 - input_rows)
rows_odd = (padding_rows % 2 != 0)
return rows_odd, padding_rows
if len(input.shape) == 4:
input_rows = input.size(2)
input_cols = input.size(3)
# filter_rows = weight.size(2)
# filter_cols = weight.size(3)
out_rows = (input_rows + strides[0] - 1) // strides[0]
out_cols = (input_cols + strides[1] - 1) // strides[1]
padding_rows = max(0, (out_rows - 1) * strides[0] + (filter_rows - 1) * dilations[0] + 1 - input_rows)
padding_cols = max(0, (out_cols - 1) * strides[1] + (filter_cols - 1) * dilations[1] + 1 - input_cols)
rows_odd = (padding_rows % 2 != 0)
cols_odd = (padding_cols % 2 != 0)
return rows_odd, cols_odd, padding_rows, padding_cols
if len(input.shape) == 5:
input_rows = input.size(2)
input_cols = input.size(3)
input_depth = input.size(4)
# filter_rows = weight.size(2)
# filter_cols = weight.size(3)
# filter_depth = weight.size(4)
out_rows = (input_rows + strides[0] - 1) // strides[0]
out_cols = (input_cols + strides[1] - 1) // strides[1]
out_depth = (input_depth + strides[2] - 1) // strides[2]
padding_rows = max(0, (out_rows - 1) * strides[0] + (filter_rows - 1) * dilations[0] + 1 - input_rows)
padding_cols = max(0, (out_cols - 1) * strides[1] + (filter_cols - 1) * dilations[1] + 1 - input_cols)
padding_depth = max(0, (out_depth - 1) * strides[2] + (filter_depth - 1) * dilations[2] + 1 - input_depth)
rows_odd = (padding_rows % 2 != 0)
cols_odd = (padding_cols % 2 != 0)
depth_odd = (padding_depth % 2 != 0)
return rows_odd, cols_odd, depth_odd, padding_rows, padding_cols, padding_depth
class Conv2D(object):
def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None, groups=1):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format is 'NHWC':
self.strides = (strides[1], strides[2])
self.dilations = (dilations[1], dilations[2])
elif self.data_format is 'NCHW':
self.strides = (strides[2], strides[3])
self.dilations = (dilations[2], dilations[3])
self.groups = groups
def __call__(self, input, filters):
if self.data_format == 'NHWC':
input = nhwc_to_nchw(input)
if self.padding == 'same':
output = self.conv2d_same_padding(input, filters)
else:
output = F.conv2d(input, filters, stride=self.strides, padding=self.padding,
dilation=self.dilations, groups=self.groups)
if self.data_format == 'NHWC':
output = nchw_to_nhwc(output)
return output
def conv2d_same_padding(self, input, weight, bias=None):
rows_odd, cols_odd, padding_rows, padding_cols = same_padding(input, weight, self.strides, self.dilations)
if rows_odd or cols_odd:
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(
input, weight, bias, self.strides, padding=(padding_rows // 2, padding_cols // 2), dilation=self.dilations,
groups=self.groups
)
def conv2d(input, filters, strides, padding, data_format='NHWC', dilations=None):
"""
Computes a 2-D convolution given 4-D input and filters tensors.
Parameters
----------
input : tensor
Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor.
The dimension order is interpreted according to the value of data_format, see below for details.
filters : tensor
Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels]
strides : int of list
The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension.
By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details.
padding : string
"SAME" or "VALID"
data_format : string
"NHWC", "NCHW". Defaults to "NHWC".
dilations : list or ints
list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput.
name : string
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as input.
"""
if data_format == 'NHWC':
input = nhwc_to_nchw(input)
output = F.conv2d(input, filters, stride=strides, padding=padding, dilation=dilations)
if data_format == 'NHWC':
output = nchw_to_nhwc(output)
return output
class Conv3D(object):
def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None):
self.data_format, self.padding = preprocess_3d_format(data_format, padding)
if self.data_format is 'NDHWC':
self._strides = (strides[1], strides[2], strides[3])
self._dilations = (dilations[1], dilations[2], dilations[3])
elif self.data_format is 'NCDHW':
self._strides = (strides[2], strides[3], strides[4])
self._dilations = (dilations[2], dilations[3], dilations[4])
def __call__(self, input, filters):
if self.data_format == 'NDHWC':
input = nhwc_to_nchw(input)
if self.padding == 'same':
out = self.conv3d_same_padding(input, weight=filters)
else:
out = F.conv3d(input, weight=filters, stride=self._strides, padding=self.padding, dilation=self._dilations)
if self.data_format == 'NDHWC':
out = nchw_to_nhwc(out)
return out
def conv3d_same_padding(self, input, weight, bias=None, groups=1):
rows_odd, cols_odd, depth_odd, padding_rows, padding_cols, padding_depth = same_padding(input, weight,
self._strides, self._dilations)
if rows_odd or cols_odd or depth_odd:
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd), 0, int(depth_odd)])
return F.conv3d(
input, weight, bias, self._strides, padding=(padding_rows // 2, padding_cols // 2, padding_depth//2),
dilation=self._dilations, groups=groups
)
def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None):
"""
Computes a 3-D convolution given 5-D input and filters tensors.
Parameters
----------
input : tensor
Must be one of the following types: half, bfloat16, float32, float64.
Shape [batch, in_depth, in_height, in_width, in_channels].
filters : tensor
Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels].
in_channels must match between input and filters.
strides : list of ints
A list of ints that has length >= 5. 1-D tensor of length 5.
The stride of the sliding window for each dimension of input.
Must have strides[0] = strides[4] = 1.
padding : string
A string from: "SAME", "VALID". The type of padding algorithm to use.
data_format : string
An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data.
With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
dilations : list of ints
Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input.
If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of data_format, see above for details.
Dilations in the batch and depth dimensions must be 1.
name : string
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as input.
"""
return Conv3D(strides=strides, padding=padding, data_format=data_format, dilations=dilations)(input, filters)
def lrn(inputs, depth_radius, bias, alpha, beta):
"""
Local Response Normalization.
Parameters
----------
inputs : tensor
Must be one of the following types: half, bfloat16, float32. 4-D.
depth_radius : int
Defaults to 5. 0-D. Half-width of the 1-D normalization window.
bias : float
Defaults to 1. An offset (usually positive to avoid dividing by 0).
alpha : float
Defaults to 1. A scale factor, usually positive.
beta : float
Defaults to 0.5. An exponent.
Returns
-------
A Tensor. Has the same type as input.
"""
lrn_obj | |
<reponame>domenic/test262
#!/usr/bin/python
# Copyright 2009 the Sputnik authors. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
import logging
import optparse
import os
from os import path
import platform
import re
import subprocess
import sys
import tempfile
import time
class SputnikError(Exception):
def __init__(self, message):
self.message = message
def ReportError(s):
raise SputnikError(s)
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--command", default=None, help="The command-line to run")
result.add_option("--tests", default=path.abspath('.'), help="Path to the tests")
result.add_option("--cat", default=False, action="store_true",
help="Print test source code")
result.add_option("--summary", default=False, action="store_true",
help="Print summary after running tests")
result.add_option("--full-summary", default=False, action="store_true",
help="Print summary and test output after running tests")
result.add_option("--enable-strict-mode", default=False, action="store_true",
help="Run the mode also in ES5 strict mode")
return result
def ValidateOptions(options):
if not options.command:
ReportError("A --command must be specified.")
if not path.exists(options.tests):
ReportError("Couldn't find test path '%s'" % options.tests)
_PLACEHOLDER_PATTERN = re.compile(r"\{\{(\w+)\}\}")
_INCLUDE_PATTERN = re.compile(r"\$INCLUDE\(\"(.*)\"\);")
_SPECIAL_CALL_PATTERN = re.compile(r"\$([A-Z]+)(?=\()")
_SPECIAL_CALLS = {
'ERROR': 'testFailed',
'FAIL': 'testFailed',
'PRINT': 'testPrint'
}
def IsWindows():
p = platform.system()
return (p == 'Windows') or (p == 'Microsoft')
def StripHeader(str):
while str.startswith('//') and "\n" in str:
str = str[str.index("\n")+1:]
return str.lstrip()
class TempFile(object):
def __init__(self, suffix="", prefix="tmp", text=False):
self.suffix = suffix
self.prefix = prefix
self.text = text
self.fd = None
self.name = None
self.is_closed = False
self.Open()
def Open(self):
(self.fd, self.name) = tempfile.mkstemp(
suffix = self.suffix,
prefix = self.prefix,
text = self.text
)
def Write(self, str):
os.write(self.fd, str)
def Read(self):
f = file(self.name)
result = f.read()
f.close()
return result
def Close(self):
if not self.is_closed:
self.is_closed = True
os.close(self.fd)
def Dispose(self):
try:
self.Close()
os.unlink(self.name)
except OSError, e:
logging.error("Error disposing temp file: %s", str(e))
class TestResult(object):
def __init__(self, exit_code, stdout, stderr, case):
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.case = case
def ReportOutcome(self, long_format):
name = self.case.GetName()
if self.HasUnexpectedOutcome():
if self.case.IsNegative():
print "%s was expected to fail but didn't" % name
elif (self.case.strict_mode and self.case.IsStrictModeNegative()):
print "%s was expected to fail in strict mode, but didn't" % name
else:
if long_format:
print "=== %s failed ===" % name
else:
print "%s: " % name
out = self.stdout.strip()
if len(out) > 0:
print "--- output ---"
print out
err = self.stderr.strip()
if len(err) > 0:
print "--- errors ---"
print err
if long_format:
print "==="
elif self.case.IsNegative():
print "%s failed as expected" % name
elif self.case.strict_mode:
if self.case.IsStrictModeNegative():
print "%s failed in strict mode as expected" % name
else:
print "%s passed in strict mode" % name
else:
print "%s passed" % name
def HasFailed(self):
return self.exit_code != 0
def HasUnexpectedOutcome(self):
if self.case.IsNegative():
return not self.HasFailed()
if self.case.IsStrictModeNegative():
return not self.HasFailed()
else:
return self.HasFailed()
class TestCase(object):
def __init__(self, suite, name, full_path, strict_mode=False):
self.suite = suite
self.name = name
self.full_path = full_path
self.contents = None
self.is_negative = None
self.strict_mode = strict_mode
self.is_strict_mode_negative = None
def GetName(self):
return path.join(*self.name)
def GetPath(self):
return self.name
def GetRawContents(self):
if self.contents is None:
f = open(self.full_path)
self.contents = f.read()
f.close()
return self.contents
def IsNegative(self):
if self.is_negative is None:
self.is_negative = ("@negative" in self.GetRawContents())
return self.is_negative
def IsStrictModeNegative(self):
if self.strict_mode and self.is_strict_mode_negative is None:
self.is_strict_mode_negative = ("@strict_mode_negative" in self.GetRawContents())
return self.is_strict_mode_negative
def GetSource(self):
source = self.suite.GetInclude("framework.js", False)
source += StripHeader(self.GetRawContents())
def IncludeFile(match):
return self.suite.GetInclude(match.group(1))
source = _INCLUDE_PATTERN.sub(IncludeFile, source)
def SpecialCall(match):
key = match.group(1)
return _SPECIAL_CALLS.get(key, match.group(0))
if self.strict_mode:
source = '"use strict";\nvar strict_mode = true;\n' + _SPECIAL_CALL_PATTERN.sub(SpecialCall, source)
else:
source = "var strict_mode = false; \n" + _SPECIAL_CALL_PATTERN.sub(SpecialCall, source)
return source
def InstantiateTemplate(self, template, params):
def GetParameter(match):
key = match.group(1)
return params.get(key, match.group(0))
return _PLACEHOLDER_PATTERN.sub(GetParameter, template)
def RunTestIn(self, command_template, tmp):
tmp.Write(self.GetSource())
tmp.Close()
command = self.InstantiateTemplate(command_template, {
'path': tmp.name
})
(code, out, err) = self.Execute(command)
return TestResult(code, out, err, self)
def Execute(self, command):
if IsWindows():
args = '"%s"' % command
else:
args = command.split(" ")
stdout = TempFile(prefix="sputnik-out-")
stderr = TempFile(prefix="sputnik-err-")
try:
logging.info("exec: %s", str(args))
process = subprocess.Popen(
args,
shell = IsWindows(),
stdout = stdout.fd,
stderr = stderr.fd
)
code = process.wait()
out = stdout.Read()
err = stderr.Read()
finally:
stdout.Dispose()
stderr.Dispose()
return (code, out, err)
def Run(self, command_template):
tmp = TempFile(suffix=".js", prefix="sputnik-", text=True)
try:
result = self.RunTestIn(command_template, tmp)
finally:
tmp.Dispose()
return result
def Print(self):
print self.GetSource()
class ProgressIndicator(object):
def __init__(self, count):
self.count = count
self.succeeded = 0
self.failed = 0
self.failed_tests = []
def HasRun(self, result):
result.ReportOutcome(True)
if result.HasUnexpectedOutcome():
self.failed += 1
self.failed_tests.append(result)
else:
self.succeeded += 1
def MakePlural(n):
if (n == 1):
return (n, "")
else:
return (n, "s")
class TestSuite(object):
def __init__(self, root, stric_mode):
self.test_root = path.join(root, 'tests', 'Conformance')
self.lib_root = path.join(root, 'lib')
self.strict_mode = stric_mode
self.include_cache = { }
def Validate(self):
if not path.exists(self.test_root):
ReportError("No test repository found")
if not path.exists(self.lib_root):
ReportError("No test library found")
def IsHidden(self, path):
return path.startswith('.') or path == 'CVS'
def IsTestCase(self, path):
return path.endswith('.js')
def ShouldRun(self, rel_path, tests):
if len(tests) == 0:
return True
for test in tests:
if test in rel_path:
return True
return False
def GetTimeZoneInfoInclude(self):
dst_attribs = GetDaylightSavingsAttribs()
if not dst_attribs:
return None
lines = []
for key in sorted(dst_attribs.keys()):
lines.append('var $DST_%s = %s;' % (key, str(dst_attribs[key])))
localtz = time.timezone / -3600
lines.append('var $LocalTZ = %i;' % localtz)
return "\n".join(lines)
def GetSpecialInclude(self, name):
if name == "environment.js":
return self.GetTimeZoneInfoInclude()
else:
return None
def GetInclude(self, name, strip_header=True):
key = (name, strip_header)
if not key in self.include_cache:
value = self.GetSpecialInclude(name)
if value:
self.include_cache[key] = value
else:
static = path.join(self.lib_root, name)
if path.exists(static):
f = open(static)
contents = f.read()
if strip_header:
contents = StripHeader(contents)
self.include_cache[key] = contents + "\n"
f.close()
else:
self.include_cache[key] = ""
return self.include_cache[key]
def EnumerateTests(self, tests):
logging.info("Listing tests in %s", self.test_root)
cases = []
for root, dirs, files in os.walk(self.test_root):
for f in [x for x in dirs if self.IsHidden(x)]:
dirs.remove(f)
dirs.sort()
for f in sorted(files):
if self.IsTestCase(f):
full_path = path.join(root, f)
if full_path.startswith(self.test_root):
rel_path = full_path[len(self.test_root)+1:]
else:
logging.warning("Unexpected path %s", full_path)
rel_path = full_path
if self.ShouldRun(rel_path, tests):
basename = path.basename(full_path)[:-3]
name = rel_path.split(path.sep)[:-1] + [basename]
cases.append(TestCase(self, name, full_path, False))
if self.strict_mode:
cases.append(TestCase(self, name, full_path, True))
logging.info("Done listing tests")
return cases
def PrintSummary(self, progress):
print
print "=== Summary ==="
count = progress.count
succeeded = progress.succeeded
failed = progress.failed
print " - Ran %i test%s" % MakePlural(count)
if progress.failed == 0:
print " - All tests succeeded"
else:
percent = ((100.0 * succeeded) / count,)
print " - Passed %i test%s (%.1f%%)" % (MakePlural(succeeded) + percent)
percent = ((100.0 * failed) / count,)
print " - Failed %i test%s (%.1f%%)" % (MakePlural(failed) + percent)
positive = [c for c in progress.failed_tests if not c.case.IsNegative()]
negative = [c for c in progress.failed_tests if c.case.IsNegative()]
if len(positive) > 0:
print
print "Failed tests"
for result in positive:
print " %s" % result.case.GetName()
if len(negative) > 0:
print
print "Expected to fail but passed ---"
for result in negative:
print " %s" % result.case.GetName()
def PrintFailureOutput(self, progress):
for result in progress.failed_tests:
print
result.ReportOutcome(False)
def Run(self, command_template, tests, print_summary, full_summary):
if not "{{path}}" in command_template:
command_template += " {{path}}"
cases = self.EnumerateTests(tests)
if len(cases) == 0:
ReportError("No tests to run")
progress = ProgressIndicator(len(cases))
for case in cases:
result = case.Run(command_template)
progress.HasRun(result)
if print_summary:
self.PrintSummary(progress)
if full_summary:
self.PrintFailureOutput(progress)
else:
print
print "Use --full-summary to see output from failed tests"
print
def Print(self, tests):
cases = self.EnumerateTests(tests)
if len(cases) > 0:
cases[0].Print()
def GetDaylightSavingsTimes():
# Is the given floating-point time in DST?
def IsDst(t):
return time.localtime(t)[-1]
# Binary search to find an interval between the two times no greater than
# delta where DST switches, returning the midpoint.
def FindBetween(start, end, delta):
while end - start > delta:
middle = (end + start) / 2
if IsDst(middle) == IsDst(start):
start = middle
else:
end = middle
return (start + end) / 2
now = time.time()
one_month = (30 * 24 * 60 * 60)
# First find a date with different daylight savings. To avoid corner cases
# we try four months before and after today.
after = now + 4 * one_month
before = now - 4 * one_month
if IsDst(now) == IsDst(before) and IsDst(now) == IsDst(after):
logging.warning("Was unable to determine DST info.")
return None
# Determine when the change occurs between now | |
<reponame>derezin/DPPy
# coding: utf8
""" Implementation of finite DPP exact samplers derived from:
- the raw **projection** correlation :math:`K` kernel (no need for eigendecomposition)
- the eigendecomposition of the correlation :math:`K` kernel
.. seealso:
`Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/finite_dpps/exact_sampling.html>`_
"""
import numpy as np
import scipy.linalg as la
from dppy.utils import inner1d, check_random_state
#####################
# Projection kernel #
#####################
# Sample projection DPP from kernel
def proj_dpp_sampler_kernel(kernel, mode='GS', size=None, random_state=None):
"""
.. seealso::
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
if size:
rank = np.rint(np.trace(kernel)).astype(int)
if size > rank:
raise ValueError('size k={} > rank={}'. format(size, rank))
# Sample from orthogonal projection kernel K = K^2 = K.H K
if mode == 'GS': # Gram-Schmidt equiv Cholesky
sampl = proj_dpp_sampler_kernel_GS(kernel, size, rng)
elif mode == 'Chol': # Cholesky updates of Pou19
sampl = proj_dpp_sampler_kernel_Chol(kernel, size, rng)[0]
elif mode == 'Schur': # Schur complement
sampl = proj_dpp_sampler_kernel_Schur(kernel, size, rng)
else:
str_list = ['Invalid sampling mode, choose among:',
'- "GS (default)',
'- "Chol"',
'- "Schur"',
'Given "{}"'.format(mode)]
raise ValueError('\n'.join(str_list))
return sampl
def proj_dpp_sampler_kernel_Chol(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size`` is not provided
Chain rule is applied by performing Cholesky updates of :math:`K`.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param k:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type k:
int
:return:
If ``size`` is not provided (None),
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{k-DPP}(K)`.
along with in-place Cholesky factorization of :math:`\\mathbf{K}_{\\mathcal{X} }`
:rtype:
list and array_like
.. caution::
The current implementation is an attempt of @guilgautier to reproduce the original C implementation of `catamari <https://gitlab.com/hodge_star/catamari>`_
.. seealso::
- :cite:`Pou19` Algorithm 3 and :ref:`catamari code <https://gitlab.com/hodge_star/catamari/blob/38718a1ea34872fb6567e019ece91fbeb5af5be1/include/catamari/dense_dpp/elementary_hermitian_dpp-impl.hpp#L37>`_ for the Hermitian swap routine.
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
"""
rng = check_random_state(random_state)
hermitian = True if K.dtype.kind == 'c' else False
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
A = K.copy()
d = np.diagonal(A).astype(float)
orig_indices = np.arange(N)
for j in range(size):
# Sample from pivot index and permute
t = rng.choice(range(j, N), p=np.abs(d[j:]) / (rank - j))
# Hermitian swap of indices j and t of A (may be written in a function)
# bottom swap
A[t + 1:, [j, t]] = A[t + 1:, [t, j]]
# inner swap
tmp = A[j + 1:t, j].copy()
np.conj(A[t, j + 1:t], out=A[j + 1:t, j])
np.conj(tmp, out=A[t, j + 1:t])
# corner swap
A[t, j] = A[t, j].conj()
# diagonal swap
A[[j, t], [j, t]] = A[[t, j], [t, j]].real
# left swap
A[[j, t], :j] = A[[t, j], :j]
# Swap positions j and t of orig_indices and d
orig_indices[[j, t]] = orig_indices[[t, j]]
d[[j, t]] = d[[t, j]]
A[j, j] = np.sqrt(d[j])
if j == size - 1:
break
# Form new column and update diagonal
A[j + 1:, j] -= A[j + 1:, :j].dot(A[j, :j].conj())
A[j + 1:, j] /= A[j, j]
if hermitian:
d[j + 1:] -= A[j + 1:, j].real**2 + A[j + 1:, j].imag**2
else:
d[j + 1:] -= A[j + 1:, j]**2
return orig_indices[:size].tolist(), A[:size, :size]
def proj_dpp_sampler_kernel_GS(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size`` is not provided
Chain rule is applied by performing sequential Gram-Schmidt orthogonalization or equivalently Cholesky decomposition updates of :math:`K`.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param k:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type k:
int
:return:
If ``size`` is not provided (None),
A sample from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample from :math:`\\operatorname{k-DPP}(K)`.
:rtype:
array_like
.. seealso::
- cite:`TrBaAm18` Algorithm 3, :cite:`Gil14` Algorithm 2
- :func:`proj_dpp_sampler_kernel_Schur <proj_dpp_sampler_kernel_Schur>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
# Initialization
# ground set size / rank(K) = Tr(K)
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
c = np.zeros((N, size))
norm_2 = K.diagonal().copy() # residual norm^2
for it in range(size):
j = rng.choice(ground_set[avail],
p=np.abs(norm_2[avail]) / (rank - it))
sampl[it] = j
if it == size - 1:
break
# Update the Cholesky factor
avail[j] = False
c[avail, it] = (K[avail, j] - c[avail, :it].dot(c[j, :it]))\
/ np.sqrt(norm_2[j])
norm_2[avail] -= c[avail, it]**2
return sampl
def proj_dpp_sampler_kernel_Schur(K, size=None, random_state=None):
""" Sample from:
- :math:`\\operatorname{DPP}(K)` with orthogonal projection **correlation** kernel :math:`K` if ``size`` is not provided
- :math:`\\operatorname{k-DPP}` with orthogonal projection **likelihood** kernel :math:`K` with :math:`k=` ``size``
Chain rule is applied by computing the Schur complements.
:param K:
Orthogonal projection kernel.
:type K:
array_like
:param size:
Size of the sample.
Default is :math:`k=\\operatorname{trace}(K)=\\operatorname{rank}(K)`.
:type size:
int
:return:
If ``size`` is not provided (None),
A sample from :math:`\\operatorname{DPP}(K)`.
If ``size`` is provided,
A sample from :math:`\\operatorname{k-DPP}(K)`.
:rtype:
array_like
.. seealso::
- :func:`proj_dpp_sampler_kernel_GS <proj_dpp_sampler_kernel_GS>`
- :func:`proj_dpp_sampler_kernel_Chol <proj_dpp_sampler_kernel_Chol>`
"""
rng = check_random_state(random_state)
# Initialization
# ground set size / rank(K) = Tr(K)
N, rank = len(K), np.rint(np.trace(K)).astype(int)
if size is None: # full projection DPP
size = rank
# else: k-DPP with k = size
ground_set = np.arange(N)
sampl = np.zeros(size, dtype=int) # sample list
avail = np.ones(N, dtype=bool) # available items
# Schur complement list i.e. residual norm^2
schur_comp = K.diagonal().copy()
K_inv = np.zeros((size, size))
for it in range(size):
# Pick a new item proportionally to residual norm^2
j = rng.choice(ground_set[avail],
p=np.abs(schur_comp[avail]) / (rank - it))
# store the item and make it unavailable
sampl[it], avail[j] = j, False
# Update Schur complements K_ii - K_iY (K_Y)^-1 K_Yi
#
# 1) use Woodbury identity to update K[Y,Y]^-1 to K[Y+j,Y+j]^-1
# K[Y+j,Y+j]^-1 =
# [ K[Y,Y]^-1 + (K[Y,Y]^-1 K[Y,j] K[j,Y] K[Y,Y]^-1)/schur_j,
# -K[Y,Y]^-1 K[Y,j]/schur_j]
# [ -K[j,Y] K[Y,Y]^-1/schur_j,
# 1/schur_j]
if it == 0:
K_inv[0, 0] = 1.0 / K[j, j]
elif it == 1:
i = sampl[0]
K_inv[:2, :2] = np.array([[K[j, j], -K[j, i]],
[-K[j, i], K[i, i]]])\
/ (K[i, i] * K[j, j] - K[j, i]**2)
elif it < size - 1:
temp = K_inv[:it, :it].dot(K[sampl[:it], j]) # K_Y^-1 K_Yj
# K_jj - K_jY K_Y^-1 K_Yj
schur_j = K[j, j] - K[j, sampl[:it]].dot(temp)
K_inv[:it, :it] += np.outer(temp, temp / schur_j)
K_inv[:it, it] = - temp / schur_j
K_inv[it, :it] = K_inv[:it, it]
K_inv[it, it] = 1.0 / schur_j
else: # it == size-1
break # no need to update for nothing
# 2) update Schur complements
# K_ii - K_iY (K_Y)^-1 K_Yi for Y <- Y+j
K_iY = K[np.ix_(avail, sampl[:it + 1])]
schur_comp[avail] = K[avail, avail]\
- inner1d(K_iY.dot(K_inv[:it+1, :it+1]), K_iY, axis=1)
return sampl
##################
# Generic kernel #
##################
# Directly from correlation kernel, without spectral decomposition
##################################################################
def dpp_sampler_generic_kernel(K, random_state=None):
""" Sample from generic :math:`\\operatorname{DPP}(\\mathbf{K})` with potentially non hermitian correlation kernel :math:`\\operatorname{DPP}(\\mathbf{K})` based on :math:`LU` factorization procedure.
:param K:
Correlation kernel (potentially non hermitian).
:type K:
array_like
:return:
A sample :math:`\\mathcal{X}` from :math:`\\operatorname{DPP}(K)` and
the in-place :math:`LU factorization of :math:`K − I_{\\mathcal{X}^{c}}` where :math:`I_{\\mathcal{X}^{c}}` is the diagonal indicator matrix for the entries not in the sample :math:`\\mathcal{X}`.
:rtype:
list and array_like
.. seealso::
- :cite:`Pou19` Algorithm 1
"""
rng = check_random_state(random_state)
A = K.copy()
sample = []
for j in range(len(A)):
if rng.rand() < A[j, j]:
sample.append(j)
else:
A[j, j] -= 1
A[j + 1:, j] /= A[j, j]
A[j + 1:, j + 1:] -= np.outer(A[j + 1:, j], A[j, j + 1:])
# A[j+1:, j+1:] -= np.einsum('i,j', A[j+1:, j], A[j, j+1:])
return sample, A
# From spectral decomposition
#############################
# Phase 1: subsample eigenvectors by drawing independent Bernoulli variables with parameter the eigenvalues of the correlation kernel K.
def dpp_eig_vecs_selector(ber_params, eig_vecs,
random_state=None):
""" Phase 1 of exact sampling procedure. Subsample eigenvectors :math:`V` of the | |
except ValueError:
showwarning("An unknown error occured")
return 0
def apply(self):
# Build list of tiles in this group
tile_out = []
tile_list = list(App.tile_ids.items())
del tile_list[0]
for i in self.tile_value_list:
t_id = tile_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if t_id != 'tiles/missing.png':
tile_out.append(t_id)
# Build list of decos in this group
deco_out = []
deco_list = list(App.deco_ids.items())
del deco_list[0]
for i in self.deco_value_list:
d_id = deco_list[i.get()][0]
# If the current value was not previously (here defaulting to missing.png), don't include that in the output
if d_id != 'tiles/box.png':
deco_out.append(d_id)
self.group_dict[self.current_group] = tile_out + deco_out
self.result = self.group_dict
class App:
tiles = []
decos = []
translate_tk2f = {0: 0}
translate_f2tk = {0: 0}
load_tiles = {}
try:
with open('assets/image_config.config', 'r') as rf:
tile_ids = literal_eval(rf.readline())
deco_ids = literal_eval(rf.readline())
groups = literal_eval(rf.readline())
except FileNotFoundError:
with open('assets/image_config.config', 'w') as wf:
wf.write('{0: 0, "tiles/missing.png": 0, "tiles/block.png": 1}')
wf.write('{0: 0, "tiles/box.png": 0}')
wf.write('{"All": ["tiles/missing.png", "tiles/block.png"]}')
tile_ids = {0: 0, "tiles/missing.png": 0, "tiles/block.png": 1}
deco_ids = {0: 0, "tiles/box.png": 0}
groups = {"All": ["tiles/missing.png", "tiles/block.png"]}
def __init__(self, master):
master.title("Worldbuilder")
master.iconbitmap('assets/hammer.ico')
master.state('zoomed')
frame = Frame(master)
frame.pack(fill=NONE, expand=0)
saved = IntVar(master) # 0: Not saved, 1: Saved
saved.set(1)
selected_image = IntVar(master) # Currently selected base-layer tile
selected_deco = IntVar(master) # Currently selected decoration tile
selected_load = IntVar(master) # Currently selected loading zone tile
selected_light = IntVar(master) # Currently selected lightmap tile
cursor_mode = IntVar(master) # 0: Regular mode, 1: Pan mode, 2: Busy mode
cursor_mode.set(0)
view_mode = IntVar(master) # 0: View ground layer, 1: View decoration layer, 2: View loading zones
force_grid = IntVar(master) # 0: Do not force grid, 1: Force grid
catagories = StringVar(master)
self.tilemap = build_matrix(16, 9)
self.decomap = build_matrix(16, 9)
self.directory = "no_file"
self.colliders = []
self.loading_zones = {}
self.light_sources = []
self.default_start = (0, 0)
self.copied_load_settings = None
# Frame setup + coordinate indicator setup
self.menu_frame = Frame(frame)
self.menu_frame.pack(side=TOP, anchor=N+W)
self.coords_label = Label(frame, text="¯\_(ツ)_/¯")
self.coords_label.pack(side=BOTTOM, anchor=W)
self.map_frame = Frame(frame, bd=2, relief=SUNKEN, bg="WHITE", width=64*16, height=64*9)
self.map_frame.pack(padx=10, pady=10, side=LEFT, anchor=CENTER, expand=0)
self.tile_frame = Frame(frame, bd=2, relief=SUNKEN)
self.tile_frame.pack(padx=5, pady=5, side=RIGHT, anchor=E, expand=0)
# Additional Options Panel
self.pointer = PhotoImage(file="assets/pointer_cursor.png")
self.mover = PhotoImage(file="assets/movement_cursor.png")
self.forcegrid = PhotoImage(file="assets/grid.png")
self.menu_selection = Radiobutton(self.menu_frame, image=self.pointer, variable=cursor_mode, value=0, indicatoron=0)
self.menu_selection.grid(row=0, column=0)
self.menu_selection = Radiobutton(self.menu_frame, image=self.mover, variable=cursor_mode, value=1, indicatoron=0)
self.menu_selection.grid(row=0, column=1)
self.menu_selection = Checkbutton(self.menu_frame, image=self.forcegrid, variable=force_grid, indicatoron=0, offvalue=0, onvalue=1)
self.menu_selection.grid(row=0, column=2)
self.menu_spacing = Frame(self.menu_frame, width=80, height=40, bd=2)
self.menu_spacing.grid(row=0, column=3)
# Layer control panel initialization
self.ground = PhotoImage(file="assets/ground.png")
self.decoration = PhotoImage(file="assets/decoration.png")
self.loadzone = PhotoImage(file="assets/loading_zone.png")
self.lightmap = PhotoImage(file="assets/lightbulb.png")
self.view_selection = Radiobutton(self.menu_frame, image=self.ground, variable=view_mode, value=0, indicatoron=0)
self.view_selection.grid(row=0, column=4)
self.view_selection = Radiobutton(self.menu_frame, image=self.decoration, variable=view_mode, value=1, indicatoron=0)
self.view_selection.grid(row=0, column=5)
self.view_selection = Radiobutton(self.menu_frame, image=self.loadzone, variable=view_mode, value=2, indicatoron=0)
self.view_selection.grid(row=0, column=6)
self.view_selection = Radiobutton(self.menu_frame, image=self.lightmap, variable=view_mode, value=3, indicatoron=0)
self.view_selection.grid(row=0, column=7)
self.menu_spacing2 = Frame(self.menu_frame, width=80, height=40, bd=2)
self.menu_spacing2.grid(row=0, column=8)
# Category panel initialization
options = list(i[0] for i in App.groups.items())
catagories.set(options[0])
self.groups_menu = OptionMenu(self.menu_frame, catagories, *options)
self.groups_menu.grid(row=0, column=9)
# Changed palette group action
def set_group(event, something, var_mode):
redraw_panels()
catagories.trace('w', set_group)
# Selected image action
def set_cursor_icon(event, something, var_mode):
if cursor_mode.get() == 0:
self.map_canvas.config(cursor="")
elif cursor_mode.get() == 1:
self.map_canvas.config(cursor="fleur")
elif cursor_mode.get() == 2:
self.map_canvas.config(cursor="wait")
else:
pass
cursor_mode.trace('w', set_cursor_icon)
# Selected mode action
def set_view_mode(event, something, var_mode):
if view_mode.get() == 0:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.tile_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.img_vbar.grid(row=0, column=1, sticky=N+S)
elif view_mode.get() == 1:
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.deco_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.deco_vbar.grid(row=0, column=1, sticky=N+S)
elif view_mode.get() == 2:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.light_canvas.grid_remove()
self.light_vbar.grid_remove()
self.load_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.load_vbar.grid(row=0, column=1, sticky=N+S)
else:
self.deco_canvas.grid_remove()
self.deco_vbar.grid_remove()
self.tile_canvas.grid_remove()
self.img_vbar.grid_remove()
self.load_canvas.grid_remove()
self.load_vbar.grid_remove()
self.light_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.light_vbar.grid(row=0, column=1, sticky=N+S)
redraw_map_canvas()
view_mode.trace('w', set_view_mode)
# Toggled force grid action
def toggle_grid(event, something, var_mode):
redraw_map_canvas()
force_grid.trace('w', toggle_grid)
# Change heading in accordance to whether or not the file is saved
def save_update(event, something, var_mode):
if saved.get() == 1:
master.title("Worldbuilder")
else:
master.title("*Worldbuilder*")
saved.trace('w', save_update)
def img_setup():
'''Function to set up images for tile/deco panels'''
App.translate_tk2f = {0: 0}
App.translate_f2tk = {0: 0}
App.tiles = []
App.decos = []
App.load_tiles = {}
for tile in list(App.tile_ids.items()):
if tile[0] != 0:
img = PhotoImage(file=tile[0]).zoom(64).subsample(16)
App.tiles.append(img)
App.translate_tk2f[img] = tile[0]
App.translate_f2tk[tile[0]] = img
for deco in list(App.deco_ids.items()):
if deco[0] != 0:
img = PhotoImage(file=deco[0]).zoom(64).subsample(16)
App.decos.append(img)
App.translate_tk2f[img] = deco[0]
App.translate_f2tk[deco[0]] = img
index = -1
for load in ["assets/inactive_zone.png", "assets/reserved_zone.png", "assets/active_zone.png"]:
index += 1
App.load_tiles[index] = PhotoImage(file=load).zoom(64).subsample(16)
def tile_panel_setup():
self.tile_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.tile_canvas.grid(row=0, column=0, sticky=N+S+E+W)
self.tile_canvas.grid_propagate(False)
self.img_vbar = Scrollbar(self.tile_frame)
self.img_vbar.config(command=self.tile_canvas.yview)
self.img_vbar.grid(row=0, column=1, sticky=N+S)
self.img_vbar.activate("slider")
self.tile_x = -1
self.tile_y = 0
index = -1
for tile in App.tiles:
index += 1
if App.translate_tk2f[tile] in App.groups[catagories.get()]:
self.tile_x += 1
if self.tile_x > 2:
self.tile_x = 0
self.tile_y += 1
radiobutton = Radiobutton(self.tile_canvas, image=tile, variable=selected_image, value=index, indicatoron=0)
self.tile_canvas.create_window(self.tile_x * 72 + 36, self.tile_y * 72 + 36, window=radiobutton)
self.tile_canvas.config(scrollregion=self.tile_canvas.bbox("all"), yscrollcommand=self.img_vbar.set)
def deco_panel_setup():
'''Function to set up deco panel'''
self.deco_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.deco_canvas.grid_propagate(False)
self.deco_vbar = Scrollbar(self.tile_frame)
self.deco_vbar.config(command=self.deco_canvas.yview)
self.deco_vbar.activate("slider")
self.deco_x = -1
self.deco_y = 0
index = -1
for deco in App.decos:
self.deco_x += 1
index += 1
if self.deco_x > 2:
self.deco_x = 0
self.deco_y += 1
radiobutton = Radiobutton(self.deco_canvas, image=deco, variable=selected_deco, value=index, indicatoron=0)
self.deco_canvas.create_window(self.deco_x * 72 + 36, self.deco_y * 72 + 36, window=radiobutton)
self.deco_canvas.config(scrollregion=self.deco_canvas.bbox("all"), yscrollcommand=self.deco_vbar.set)
def load_panel_setup():
'''Function to set up loading zone panel'''
self.load_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.load_canvas.grid_propagate(False)
self.load_vbar = Scrollbar(self.tile_frame)
self.load_vbar.config(command=self.load_canvas.yview)
self.load_vbar.activate("slider")
self.load_x = -1
self.load_y = 0
index = -1
options = ["assets/delete_zone.png", "assets/new_zone.png", "assets/set_zone_destination.png", "assets/copy_zone_settings.png", "assets/paste_zone_settings.png"]
self.load_imgs = []
for i in options:
self.load_imgs.append(PhotoImage(file=i).zoom(64).subsample(16))
for img in self.load_imgs:
self.load_x += 1
index += 1
if self.load_x > 2:
self.load_x = 0
self.load_y += 1
radiobutton = Radiobutton(self.load_canvas, image=img, variable=selected_load, value=index, indicatoron=0)
self.load_canvas.create_window(self.load_x * 72 + 36, self.load_y * 72 + 36, window=radiobutton)
self.load_canvas.config(scrollregion=self.load_canvas.bbox("all"), yscrollcommand=self.load_vbar.set)
def light_panel_setup():
'''Function to set up loading zone panel'''
self.light_canvas = Canvas(self.tile_frame, width=72*3, height=72*8, bd=0)
self.light_canvas.grid_propagate(False)
self.light_vbar = Scrollbar(self.tile_frame)
self.light_vbar.config(command=self.light_canvas.yview)
self.light_vbar.activate("slider")
self.light_x = -1
self.light_y = 0
index = -1
options = ["assets/delete_light.png", "assets/3x3_light_source.png"]
self.light_imgs = []
for i in options:
self.light_imgs.append(PhotoImage(file=i).zoom(64).subsample(16))
for img in self.light_imgs:
self.light_x += 1
index += 1
if self.light_x > 2:
self.light_x = 0
self.light_y += 1
radiobutton = Radiobutton(self.light_canvas, image=img, variable=selected_light, value=index, indicatoron=0)
self.light_canvas.create_window(self.light_x * 72 + 36, self.light_y * 72 + 36, window=radiobutton)
self.light_canvas.config(scrollregion=self.light_canvas.bbox("all"), yscrollcommand=self.light_vbar.set)
def redraw_panels():
'''Redraw tile/deco panels if needed'''
original_cursor = cursor_mode.get()
cursor_mode.set(2)
master.update()
self.tile_canvas.destroy()
self.deco_canvas.destroy()
self.load_canvas.destroy()
self.light_canvas.destroy()
img_setup()
tile_panel_setup()
deco_panel_setup()
load_panel_setup()
light_panel_setup()
redraw_map_canvas()
cursor_mode.set(original_cursor)
# Actually set up tile and deco panels
img_setup()
tile_panel_setup()
deco_panel_setup()
load_panel_setup()
light_panel_setup()
# Tilemap window setup
def draw_map(matrix):
y = -1
for i in matrix:
y += 1
x = -1
for j in i:
x += 1
if matrix[y][x] != 0:
try:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.translate_f2tk[matrix[y][x]])
except KeyError:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.translate_f2tk['tiles/missing.png'])
print("An error occured while loading tilemap, \"{}\" was not found".format(matrix[y][x]))
def draw_grid():
new_height = 64 * len(self.tilemap)
new_width = 64 * len(self.tilemap[0])
self.map_canvas.config(scrollregion=(0, 0, new_width, new_height))
for i in range(0, new_height + 1):
self.map_canvas.create_line(0, 64 * i, new_width, 64 * i)
for j in range(0, new_width + 1):
self.map_canvas.create_line(64 * j, 0, 64 * j, new_height)
def redraw_map_canvas():
self.map_canvas.delete("all")
if force_grid.get() == 0:
draw_grid()
draw_map(self.tilemap)
draw_map(self.decomap)
if view_mode.get() == 2:
for i in list(self.loading_zones.items()):
if i[1] != []:
self.map_canvas.create_image((32 + 64 * i[0][0], 32 + 64 * i[0][1]), image=App.load_tiles[2])
else:
self.map_canvas.create_image((32 + 64 * i[0][0], 32 + 64 * i[0][1]), image=App.load_tiles[0])
elif view_mode.get() == 3:
for i, j in self.light_sources:
self.map_canvas.create_image((32 + 64 * i, 32 + 64 * j), image=self.light_imgs[1])
if force_grid.get() == 1:
draw_grid()
self.start = None
def mark_start(event):
'''Marks starting position of mouse for canvas dragging'''
self.start = (event.x, event.y)
callback(event)
def callback(event):
saved.set(0)
if cursor_mode.get() == 0:
# Canvas painting mode
try:
dx = self.map_canvas.xview()[0] * len(self.tilemap[0])
| |
#!python3
import os
import sys
import struct
import argparse
import yaml
import asyncio
import traceback
from signal import SIGINT, SIGTERM
from bleak import BleakClient, BleakError, BleakScanner
import atexit
import pickle
from pickle import UnpicklingError
from aiohttp import web
import ssl
import json
import logging
from typing import Awaitable, Callable, Union
logging.basicConfig(level=logging.INFO)
IS_LINUX = os.name == 'posix'
IS_WINDOWS = os.name == 'nt'
# HELPER FUNCTIONS
def mmToRaw(mm: float) -> float:
return (mm - BASE_HEIGHT) * 10
def rawToMM(raw: float) -> float:
return (raw / 10) + BASE_HEIGHT
def rawToSpeed(raw: float) -> float:
return (raw / 100)
# GATT CHARACTERISTIC AND COMMAND DEFINITIONS
UUID_HEIGHT = '99fa0021-338a-1024-8a49-009c0215f78a'
UUID_COMMAND = '99fa0002-338a-1024-8a49-009c0215f78a'
UUID_REFERENCE_INPUT = '99fa0031-338a-1024-8a49-009c0215f78a'
COMMAND_UP = bytearray(struct.pack("<H", 71))
COMMAND_DOWN = bytearray(struct.pack("<H", 70))
COMMAND_STOP = bytearray(struct.pack("<H", 255))
COMMAND_REFERENCE_INPUT_STOP = bytearray(struct.pack("<H", 32769))
COMMAND_REFERENCE_INPUT_UP = bytearray(struct.pack("<H", 32768))
COMMAND_REFERENCE_INPUT_DOWN = bytearray(struct.pack("<H", 32767))
# CONFIGURATION SETUP
# Height of the desk at it's lowest (in mm)
# I assume this is the same for all Idasen desks
BASE_HEIGHT = 620
MAX_HEIGHT = 1270 # 6500
# Default config
config = {
"mac_address": None,
"stand_height": BASE_HEIGHT + 420,
"sit_height": BASE_HEIGHT + 63,
"height_tolerance": 2.0,
"adapter_name": 'hci0',
"scan_timeout": 5,
"connection_timeout": 10,
"sit": False,
"stand": False,
"monitor": False,
"move_to": None
}
# Overwrite from config.yaml
config_file = {}
config_file_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'config.yaml')
if (config_file_path):
with open(config_file_path, 'r') as stream:
try:
config_file = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Reading config.yaml failed")
exit(1)
config.update(config_file)
# Overwrite from command line args
parser = argparse.ArgumentParser(description='')
parser.add_argument('--mac-address', dest='mac_address',
type=str, help="Mac address of the Idasen desk")
parser.add_argument('--stand-height', dest='stand_height', type=int,
help="The height the desk should be at when standing (mm)")
parser.add_argument('--sit-height', dest='sit_height', type=int,
help="The height the desk should be at when sitting (mm)")
parser.add_argument('--height-tolerance', dest='height_tolerance', type=float,
help="Distance between reported height and target height before ceasing move commands (mm)")
parser.add_argument('--adapter', dest='adapter_name', type=str,
help="The bluetooth adapter device name")
parser.add_argument('--scan-timeout', dest='scan', type=int,
help="The timeout for bluetooth scan (seconds)")
parser.add_argument('--connection-timeout', dest='connection_timeout', type=int,
help="The timeout for bluetooth connection (seconds)")
cmd = parser.add_mutually_exclusive_group()
cmd.add_argument('--sit', dest='sit', action='store_true',
help="Move the desk to sitting height")
cmd.add_argument('--stand', dest='stand', action='store_true',
help="Move the desk to standing height")
cmd.add_argument('--monitor', dest='monitor', action='store_true',
help="Monitor desk height and speed")
cmd.add_argument('--move-to', dest='move_to', type=int,
help="Move desk to specified height (mm)")
cmd.add_argument('--scan', dest='scan_adapter', action='store_true',
help="Scan for devices using the configured adapter")
cmd.add_argument('--web', dest='web', action='store_true',
help="Run WebServer")
parser.add_argument('--port', dest='web_port', action='store', type=int)
args = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}
config.update(args)
if not config['mac_address']:
parser.error("Mac address must be provided")
if config['sit_height'] >= config['stand_height']:
parser.error("Sit height must be less than stand height")
if config['sit_height'] < BASE_HEIGHT:
parser.error("Sit height must be greater than {}".format(BASE_HEIGHT))
if config['stand_height'] > MAX_HEIGHT:
parser.error("Stand height must be less than {}".format(MAX_HEIGHT))
config['stand_height_raw'] = mmToRaw(config['stand_height'])
config['sit_height_raw'] = mmToRaw(config['sit_height'])
config['height_tolerance_raw'] = 10 * config['height_tolerance']
if config['move_to']:
config['move_to_raw'] = mmToRaw(config['move_to'])
if 'IDASEN_SHARED_KEY' in os.environ:
config['shared_key'] = os.environ['IDASEN_SHARED_KEY']
if IS_WINDOWS:
# Windows doesn't use this paramter so rename it so it looks nice for the logs
config['adapter_name'] = 'default adapter'
# MAIN PROGRAM
def print_height_data(sender, data):
height, speed = struct.unpack("<Hh", data)
print(
"Height: {:4.0f}mm Speed: {:2.0f}mm/s".format(rawToMM(height), rawToSpeed(speed)))
def has_reached_target(height, target):
# The notified height values seem a bit behind so try to stop before
# reaching the target value to prevent overshooting
return (abs(height - target) <= config['height_tolerance_raw'])
async def move_up(client):
await client.write_gatt_char(UUID_COMMAND, COMMAND_UP)
async def move_down(client):
await client.write_gatt_char(UUID_COMMAND, COMMAND_DOWN)
async def stop(client):
# This emulates the behaviour of the app. Stop commands are sent to both
# Reference Input and Command characteristics.
await client.write_gatt_char(UUID_COMMAND, COMMAND_STOP)
if IS_LINUX:
# It doesn't like this on windows
await client.write_gatt_char(UUID_REFERENCE_INPUT, COMMAND_REFERENCE_INPUT_STOP)
stop_flag = False
def asked_to_stop():
global stop_flag
return stop_flag
def ask_to_stop():
global stop_flag
logging.warning('ASK TO STOP')
stop_flag = True
def reset_stop_flag():
global stop_flag
stop_flag = False
class CancelToken:
isCanceled = False
def IsCancelled(self) -> bool:
return self.isCanceled
def cancel(self):
self.isCanceled = True
async def subscribe(client: BleakClient, uuid: str, callback: Callable[[int, bytearray], None], cancelationToken: CancelToken = asked_to_stop) -> Awaitable[None]:
"""Listen for notifications on a characteristic"""
await client.start_notify(uuid, callback)
try:
while not cancelationToken.IsCancelled():
await asyncio.sleep(0.1)
except asyncio.CancelledError:
pass
finally:
await client.stop_notify(uuid)
async def move_to(client, target):
"""Move the desk to a specified height"""
logger = logging.getLogger('move_to')
initial_height, speed = struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
# Initialise by setting the movement direction
direction = "UP" if target > initial_height else "DOWN"
# Set up callback to run when the desk height changes. It will resend
# movement commands until the desk has reached the target height.
count = 0
c_token = CancelToken()
def _move_to(sender, data):
nonlocal count, c_token
height, speed = struct.unpack("<Hh", data)
count = count + 1
logger.debug("Height: {:4.0f}mm Target: {:4.0f}mm Speed: {:2.0f}mm/s".format(
rawToMM(height), rawToMM(target), rawToSpeed(speed)))
# Stop if we have reached the target
if has_reached_target(height, target):
asyncio.create_task(stop(client))
logger.info('has reached target')
c_token.cancel()
# Or resend the movement command if we have not yet reached the
# target.
# Each movement command seems to run the desk motors for about 1
# second if uninterrupted and the height value is updated about 16
# times.
# Resending the command on the 6th update seems a good balance
# between helping to avoid overshoots and preventing stutterinhg
# (the motor seems to slow if no new move command has been sent)
elif direction == "UP" and count == 6:
asyncio.create_task(move_up(client))
count = 0
elif direction == "DOWN" and count == 6:
asyncio.create_task(move_down(client))
count = 0
# Listen for changes to desk height and send first move command (if we are
# not) already at the target height.
if not has_reached_target(initial_height, target):
logger.info('moving to {:4.0f}mm from {:4.0f}mm in direction {}'.format(
rawToMM(target), rawToMM(initial_height), direction))
sub_task = subscribe(client, UUID_HEIGHT, _move_to, c_token)
tasks = [sub_task]
if direction == "UP":
tasks.append(move_up(client))
elif direction == "DOWN":
tasks.append(move_down(client))
await asyncio.gather(*[task for task in tasks])
else:
logger.info('not moving to {:4.0f}mm from {:4.0f}mm'.format(
rawToMM(target), rawToMM(initial_height)))
def unpickle_desk():
"""Load a Bleak device config from a pickle file and check that it is the correct device"""
try:
if not IS_WINDOWS:
with open("desk.pickle", 'rb') as f:
desk = pickle.load(f)
if desk.address == config['mac_address']:
return desk
except Exception:
pass
return None
def pickle_desk(desk):
"""Attempt to pickle the desk"""
if not IS_WINDOWS:
with open('desk.pickle', 'wb') as f:
pickle.dump(desk, f)
async def scan(mac_address=None):
"""Scan for a bluetooth device with the configured address and return it or return all devices if no address specified"""
print('Scanning\r', end="")
scanner = BleakScanner()
devices = await scanner.discover(device=config['adapter_name'], timeout=config['scan_timeout'])
if not mac_address:
return devices
for device in devices:
if (device.address == mac_address):
print('Scanning - Desk Found')
return device
print('Scanning - Desk {} Not Found'.format(mac_address))
return None
async def connect(desk):
"""Attempt to connect to the desk"""
try:
print('Connecting\r', end="")
client = BleakClient(desk, device=config['adapter_name'])
await client.connect(timeout=config['connection_timeout'])
return client
except BleakError as e:
print('Connecting failed')
os._exit(1)
raise e
client = None
async def run():
"""Begin the action specified by command line arguments and config"""
global client
try:
# Scanning doesn't require a connection so do it first and exit
if config['scan_adapter']:
devices = await scan()
print('Found {} devices using {}'.format(
len(devices), config['adapter_name']))
for device in devices:
print(device)
os._exit(0)
# Attempt to load and connect to the pickled desk
desk = unpickle_desk()
if not desk:
# If that fails then rescan for the desk
desk = await scan(config['mac_address'])
if not desk:
print('Could not find desk {}'.format(config['mac_address']))
os._exit(1)
client = await connect(desk)
# Cache the Bleak device config to connect more quickly in future+
pickle_desk(desk)
def disconnect_callback(client):
if not asked_to_stop():
print("Lost connection with {}".format(client.address))
ask_to_stop()
client.set_disconnected_callback(disconnect_callback)
print("Connected {}".format(config['mac_address']))
# Always print current height
initial_height, speed = struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
print("Height: {:4.0f}mm".format(rawToMM(initial_height)))
target = None
if config['monitor']:
# Print changes to height data
await subscribe(client, UUID_HEIGHT, print_height_data)
elif config['sit']:
# Move to configured sit height
target = config['sit_height_raw']
await move_to(client, target)
elif config['stand']:
# Move to configured stand height
target = config['stand_height_raw']
await move_to(client, target)
elif config['move_to']:
# Move to custom height
target = config['move_to_raw']
await move_to(client, target)
if target:
# If we were moving to a target height, wait, then print the actual final height
await asyncio.sleep(1)
final_height, speed = struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
print("Final height: {:4.0f}mm Target: {:4.0f}mm)".format(
rawToMM(final_height), rawToMM(target)))
except BleakError as e:
print(e)
except Exception as e:
traceback.print_exc()
# HTTP SERVER STUFF
async def start_background_tasks(app: web.Application):
desk = unpickle_desk()
def disconnect_callback(*args, **kwargs):
if not asked_to_stop():
print(args, kwargs)
print("Lost connection with {}".format(client.address))
ask_to_stop()
if desk is not None:
app['bt_client'] = await connect(desk)
app['bt_client'].set_disconnected_callback(disconnect_callback)
print("Connected {}".format(config['mac_address']))
else:
print(
'Could not find desk {} - please run without web at least once'.format(config['mac_address']))
os._exit(1)
async def cleanup_background_tasks(app: web.Application):
ask_to_stop()
await app['bt_client'].disconnect()
async def validateToken(request: web.Request) -> Union[web.Response, None]:
if request.content_type != | |
from cuda.upfirdn_2d import *
from cuda.fused_bias_act import fused_bias_act
##################################################################################
# Layers
##################################################################################
class Conv2D(tf.keras.layers.Layer):
def __init__(self, fmaps, kernel, resample_kernel, up, down, gain, lrmul, **kwargs):
super(Conv2D, self).__init__(**kwargs)
self.fmaps = fmaps
self.kernel = kernel
self.gain = gain
self.lrmul = lrmul
self.up = up
self.down = down
self.k, self.pad0, self.pad1 = compute_paddings(resample_kernel, self.kernel, up, down, is_conv=True)
def build(self, input_shape):
weight_shape = [self.kernel, self.kernel, input_shape[1], self.fmaps]
init_std, self.runtime_coef = compute_runtime_coef(weight_shape, self.gain, self.lrmul)
# [kernel, kernel, fmaps_in, fmaps_out]
w_init = tf.random.normal(shape=weight_shape, mean=0.0, stddev=init_std)
self.w = tf.Variable(w_init, name='w', trainable=True)
def call(self, inputs, training=None, mask=None):
x = inputs
w = self.runtime_coef * self.w
# actual conv
if self.up:
x = upsample_conv_2d(x, w, self.kernel, self.kernel, self.pad0, self.pad1, self.k)
elif self.down:
x = conv_downsample_2d(x, w, self.kernel, self.kernel, self.pad0, self.pad1, self.k)
else:
x = tf.nn.conv2d(x, w, data_format='NCHW', strides=[1, 1, 1, 1], padding='SAME')
return x
class ModulatedConv2D(tf.keras.layers.Layer):
def __init__(self, fmaps, style_fmaps, kernel, resample_kernel, up, down, demodulate, fused_modconv, gain, lrmul, **kwargs):
super(ModulatedConv2D, self).__init__(**kwargs)
assert not (up and down)
self.fmaps = fmaps
self.style_fmaps = style_fmaps
self.kernel = kernel
self.demodulate = demodulate
self.up = up
self.down = down
self.fused_modconv = fused_modconv
self.gain = gain
self.lrmul = lrmul
self.k, self.pad0, self.pad1 = compute_paddings(resample_kernel, self.kernel, up, down, is_conv=True)
# self.factor = 2
self.mod_dense = Dense(self.style_fmaps, gain=1.0, lrmul=1.0, name='mod_dense')
self.mod_bias = BiasAct(lrmul=1.0, act='linear', name='mod_bias')
def build(self, input_shape):
x_shape, w_shape = input_shape[0], input_shape[1]
in_fmaps = x_shape[1]
weight_shape = [self.kernel, self.kernel, in_fmaps, self.fmaps]
init_std, self.runtime_coef = compute_runtime_coef(weight_shape, self.gain, self.lrmul)
# [kkIO]
w_init = tf.random.normal(shape=weight_shape, mean=0.0, stddev=init_std)
self.w = tf.Variable(w_init, name='w', trainable=True)
def scale_conv_weights(self, w):
# convolution kernel weights for fused conv
weight = self.runtime_coef * self.w # [kkIO]
weight = weight[np.newaxis] # [BkkIO]
# modulation
style = self.mod_dense(w) # [BI]
style = self.mod_bias(style) + 1.0 # [BI]
weight *= style[:, np.newaxis, np.newaxis, :, np.newaxis] # [BkkIO]
# demodulation
d = None
if self.demodulate:
d = tf.math.rsqrt(tf.reduce_sum(tf.square(weight), axis=[1, 2, 3]) + 1e-8) # [BO]
weight *= d[:, np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO]
return weight, style, d
def call(self, inputs, training=None, mask=None):
x, y = inputs
# height, width = tf.shape(x)[2], tf.shape(x)[3]
# prepare weights: [BkkIO] Introduce minibatch dimension
# prepare convoultuon kernel weights
weight, style, d = self.scale_conv_weights(y)
if self.fused_modconv:
# Fused => reshape minibatch to convolution groups
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]])
# weight: reshape, prepare for fused operation
new_weight_shape = [tf.shape(weight)[1], tf.shape(weight)[2], tf.shape(weight)[3], -1] # [kkI(BO)]
weight = tf.transpose(weight, [1, 2, 3, 0, 4]) # [kkIBO]
weight = tf.reshape(weight, shape=new_weight_shape) # [kkI(BO)]
else:
# [BIhw] Not fused => scale input activations
x *= style[:, :, tf.newaxis, tf.newaxis]
# Convolution with optional up/downsampling.
if self.up:
x = upsample_conv_2d(x, weight, self.kernel, self.kernel, self.pad0, self.pad1, self.k)
elif self.down:
x = conv_downsample_2d(x, weight, self.kernel, self.kernel, self.pad0, self.pad1, self.k)
else:
x = tf.nn.conv2d(x, weight, data_format='NCHW', strides=[1, 1, 1, 1], padding='SAME')
# Reshape/scale output
if self.fused_modconv:
# Fused => reshape convolution groups back to minibatch
x_shape = tf.shape(x)
x = tf.reshape(x, [-1, self.fmaps, x_shape[2], x_shape[3]])
elif self.demodulate:
# [BOhw] Not fused => scale output activations
x *= d[:, :, tf.newaxis, tf.newaxis]
return x
class Dense(tf.keras.layers.Layer):
def __init__(self, fmaps, gain=1.0, lrmul=1.0, **kwargs):
super(Dense, self).__init__(**kwargs)
self.fmaps = fmaps
self.gain = gain
self.lrmul = lrmul
def build(self, input_shape):
fan_in = tf.reduce_prod(input_shape[1:])
weight_shape = [fan_in, self.fmaps]
init_std, self.runtime_coef = compute_runtime_coef(weight_shape, self.gain, self.lrmul)
w_init = tf.random.normal(shape=weight_shape, mean=0.0, stddev=init_std)
self.w = tf.Variable(w_init, name='w', trainable=True)
def call(self, inputs, training=None, mask=None):
weight = self.runtime_coef * self.w
c = tf.reduce_prod(tf.shape(inputs)[1:])
x = tf.reshape(inputs, shape=[-1, c])
x = tf.matmul(x, weight)
return x
class LabelEmbedding(tf.keras.layers.Layer):
def __init__(self, embed_dim, **kwargs):
super(LabelEmbedding, self).__init__(**kwargs)
self.embed_dim = embed_dim
def build(self, input_shape):
weight_shape = [input_shape[1], self.embed_dim]
# tf 1.15 mean(0.0), std(1.0) default value of tf.initializers.random_normal()
w_init = tf.random.normal(shape=weight_shape, mean=0.0, stddev=1.0)
self.w = tf.Variable(w_init, name='w', trainable=True)
def call(self, inputs, training=None, mask=None):
x = tf.matmul(inputs, self.w)
return x
##################################################################################
# etc
##################################################################################
class PixelNorm(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(PixelNorm, self).__init__(**kwargs)
def call(self, inputs, training=None, mask=None):
x = inputs * tf.math.rsqrt(tf.reduce_mean(tf.square(inputs), axis=1, keepdims=True) + 1e-8)
return x
class BiasAct(tf.keras.layers.Layer):
def __init__(self, lrmul, act, **kwargs):
super(BiasAct, self).__init__(**kwargs)
self.lrmul = lrmul
self.act = act
def build(self, input_shape):
b_init = tf.zeros(shape=(input_shape[1],), dtype=tf.float32)
self.b = tf.Variable(b_init, name='b', trainable=True)
def call(self, inputs, training=None, mask=None):
b = self.lrmul * self.b
x = fused_bias_act(inputs, b=b, act=self.act, alpha=None, gain=None)
return x
class Noise(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Noise, self).__init__(**kwargs)
def build(self, input_shape):
self.noise_strength = tf.Variable(initial_value=0.0, dtype=tf.float32, trainable=True, name='w')
def call(self, inputs, noise=None, training=None, mask=None):
x_shape = tf.shape(inputs)
# noise: [1, 1, x_shape[2], x_shape[3]] or None
if noise is None:
noise = tf.random.normal(shape=(x_shape[0], 1, x_shape[2], x_shape[3]), dtype=tf.float32)
x = inputs + noise * self.noise_strength
return x
class MinibatchStd(tf.keras.layers.Layer):
def __init__(self, group_size, num_new_features, **kwargs):
super(MinibatchStd, self).__init__(**kwargs)
self.group_size = group_size
self.num_new_features = num_new_features
def call(self, inputs, training=None, mask=None):
s = tf.shape(inputs)
group_size = tf.minimum(self.group_size, s[0])
y = tf.reshape(inputs, [group_size, -1, self.num_new_features, s[1] // self.num_new_features, s[2], s[3]])
y = tf.cast(y, tf.float32)
y -= tf.reduce_mean(y, axis=0, keepdims=True)
y = tf.reduce_mean(tf.square(y), axis=0)
y = tf.sqrt(y + 1e-8)
y = tf.reduce_mean(y, axis=[2, 3, 4], keepdims=True)
y = tf.reduce_mean(y, axis=[2])
y = tf.cast(y, inputs.dtype)
y = tf.tile(y, [group_size, 1, s[2], s[3]])
x = tf.concat([inputs, y], axis=1)
return x
def compute_runtime_coef(weight_shape, gain, lrmul):
fan_in = tf.reduce_prod(weight_shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
fan_in = tf.cast(fan_in, dtype=tf.float32)
he_std = gain / tf.sqrt(fan_in)
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
return init_std, runtime_coef
def lerp(a, b, t):
out = a + (b - a) * t
return out
def lerp_clip(a, b, t):
out = a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
return out
def get_coords(batch_size, height, width):
x = tf.linspace(-1, 1, width)
x = tf.reshape(x, shape=[1, 1, width, 1])
x = tf.tile(x, multiples=[batch_size, width, 1, 1])
y = tf.linspace(-1, 1, height)
y = tf.reshape(y, shape=[1, height, 1, 1])
y = tf.tile(y, multiples=[batch_size, 1, height, 1])
coords = tf.concat([x, y], axis=-1)
coords = tf.transpose(coords, perm=[0, 3, 1, 2])
coords = tf.cast(coords, tf.float32)
return coords
def grid_sample_tf(img, coords, align_corners=False, padding='border'):
"""
:param img: [B, C, H, W]
:param coords: [B, C, H, W]
:return: [B, C, H, W]
"""
def get_pixel_value(img, x, y):
"""
Utility function to get pixel value for coordinate
vectors x and y from a 4D tensor image.
Input
-----
- img: tensor of shape (B, H, W, C)
- x: flattened tensor of shape (B*H*W,)
- y: flattened tensor of shape (B*H*W,)
Returns
-------
- output: tensor of shape (B, H, W, C)
"""
shape = tf.shape(x)
batch_size = shape[0]
height = shape[1]
width = shape[2]
batch_idx = tf.range(0, batch_size)
batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
b = tf.tile(batch_idx, (1, height, width))
indices = tf.stack([b, y, x], 3)
return tf.gather_nd(img, indices)
# rescale x and y to [0, W-1/H-1]
img = tf.transpose(img, perm=[0, 2, 3, 1]) # -> [N, H, W, C]
coords = tf.transpose(coords, perm=[0, 2, 3, 1]) # -> [N, H, W, C]
x, y = coords[:, ..., 0], coords[:, ..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
side = tf.cast(tf.shape(img)[1], tf.int32)
side_f = tf.cast(side, tf.float32)
if align_corners:
x = ((x + 1) / 2) * (side_f - 1)
y = ((y + 1) / 2) * (side_f - 1)
else:
x = 0.5 * ((x + 1.0) * side_f - 1)
y = 0.5 * ((y + 1.0) * side_f - 1)
if padding == 'border':
x = tf.clip_by_value(x, 0, side_f - 1)
y = tf.clip_by_value(y, 0, side_f - 1)
# -------------- Changes above --------------------
# grab 4 nearest corner points for each (x_i, y_i)
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
# recast as float for delta calculation
x0 = tf.cast(x0, 'float32')
x1 = tf.cast(x1, 'float32')
y0 = tf.cast(y0, 'float32')
y1 = tf.cast(y1, 'float32')
# calculate deltas
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
# recast as int for img boundaries
x0 = tf.cast(x0, 'int32')
x1 = tf.cast(x1, | |
<reponame>etjoa003/gpt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import os, time, PIL
import numpy as np
import matplotlib.pyplot as plt
from . import utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def entry(args):
print('pipeline.entry() mode: %s'%(str(args['mode'])))
if args['mode'] == 'training':
training(args)
elif args['mode'] == 'sample':
sample(args)
elif args['mode'] == 'plot_results':
plot_results(args)
else:
print('invalid --mode argument? Use --h to check available modes.')
def sample(args):
print('sample()')
rcon = Reconstructor()
rcon.sample(args)
def training(args):
print('training()')
tr = Trainer()
args = tr.run_training(args)
print_settings(args)
class Trainer(utils.FastPickleClient):
def __init__(self, ):
super(Trainer, self).__init__()
# inherited from FastPickleClient
# def pickle_data(self, save_data, save_dir, tv=(0,0,None), text=None):
# def load_pickled_data(self, pickled_dir, tv=(0,0,None), text=None):
def run_training(self, args):
print('Trainer.run_training() on %s dataset'%(str(args['dataset'])))
OBSERVE = utils.parse_bool_from_string(args['OBSERVE'])
EVALUATE = utils.parse_bool_from_string(args['EVALUATE'])
DO_DEBUG = utils.parse_bool_from_string(args['debug_mode'])
REALTIME_PRINT = utils.parse_bool_from_string(args['realtime_print'])
N_EPOCH = args['N_EPOCH']
PROJECT_ID = args['PROJECT_ID']
batch_size = args['batch_size']
LEARNING_RATE = args['learning_rate']
DATA_DIR = 'data' if not 'DATA_DIR' in args else args['DATA_DIR']
ROOT_DIR, PROJECT_DIR, MODEL_DIR, OUTPUT_DIR = utils.manage_directories(os.getcwd(), PROJECT_ID)
AVG_LOSS_EVERY_N_ITER = args['AVG_LOSS_EVERY_N_ITER']
RESULT_DATA_DIR = OUTPUT_DIR + '.data'
BEST_MODEL_DIR = MODEL_DIR + '.best'
SAVE_IMG_EVERY_N_ITER = args['SAVE_IMG_EVERY_N_ITER']
LOSS_IMG_DIR = OUTPUT_DIR + '.loss.jpg'
if DO_DEBUG:
args['N_EPOCH'] = 2
args['n'] = 24
STOP_AT_N_ITER = args['STOP_AT_N_ITER']
# loading data
trainloader, evalloader = self.get_data_loader(args)
evaliter = iter(evalloader)
IMG_SHAPE, is_black_and_white = self.get_image_shape(args)
output_data_dict = self.get_result_dictionary(RESULT_DATA_DIR)
# prepare model
if os.path.exists(BEST_MODEL_DIR): net = self.init_or_load_model(BEST_MODEL_DIR, args)
else: net = self.init_or_load_model(MODEL_DIR, args)
net.to(device=device)
print('n params:', utils.count_parameters(net))
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, betas=(0.5,0.999), weight_decay=0)
rcon = Reconstructor()
if EVALUATE:
print('preparing for FID with n:%s'%(str(args['n'])))
prepare_data_for_FID(args)
start = time.time()
print('\nStart training...')
for epoch in range(N_EPOCH):
len_data = len(trainloader)
for i, data in enumerate(trainloader):
net.total_iter+=1
net.train()
net.zero_grad()
x, labels = data
x = x.to(torch.float).to(device=device)
# tensor_batch_to_display(x,raise_exception=True)
y = net(x)
total_loss = criterion(x,y)
loss = total_loss
realtime_print_options = {}
total_loss.backward()
optimizer.step()
# ignore the fact that it may slow down the process a fair bit in the initial stage.
output_data_dict['iters'].append(net.total_iter)
output_data_dict['losses'].append(loss.item())
if net.total_iter > AVG_LOSS_EVERY_N_ITER and net.total_iter>100:
running_avg_loss = np.mean(output_data_dict['losses'][-AVG_LOSS_EVERY_N_ITER:])
if net.best_avg_loss > running_avg_loss:
net.best_avg_loss = running_avg_loss
avg_loss_iter, avg_loss = utils.average_every_n(output_data_dict['losses'],iter_list=output_data_dict['iters'], n=AVG_LOSS_EVERY_N_ITER)
torch.save(net, BEST_MODEL_DIR)
save_loss_image(avg_loss_iter, avg_loss, dir=LOSS_IMG_DIR)
self.pickle_data(output_data_dict, RESULT_DATA_DIR, tv=(0,0,100), text=None)
save_reconstructed_images(x, net, OUTPUT_DIR, model=args['model'], IMG_SHAPE=IMG_SHAPE, best_loss_recon=True)
if SAVE_IMG_EVERY_N_ITER>0:
if (i+1)%SAVE_IMG_EVERY_N_ITER==0 or (i+1)==len_data:
save_reconstructed_images(x, net, OUTPUT_DIR,model=args['model'], IMG_SHAPE=IMG_SHAPE, )
save_reconstructed_images(x, net, OUTPUT_DIR,model=args['model'], IMG_SHAPE=IMG_SHAPE, difference=True)
if REALTIME_PRINT:
if (i+1)%4==0 or (i+1)>=len_data:
update_str = self.make_update_text(epoch, N_EPOCH, i, len_data, net.best_avg_loss,**realtime_print_options)
print('%-96s'%(str(update_str)),end='\r')
if DO_DEBUG:
if (i+1)%STOP_AT_N_ITER==0: break
x, _ = next(evaliter)
x = x.to(torch.float).to(device=device)
save_reconstructed_images(x, net, OUTPUT_DIR,model=args['model'], IMG_SHAPE=IMG_SHAPE, black_and_white=is_black_and_white)
torch.save(net, MODEL_DIR)
if EVALUATE:
rcon.reconstruct_images_into_folder(args, do_compute_SSIM=True, realtime_print=REALTIME_PRINT)
fid_value = rcon.compute_fid(args, net)
print('\ntraining ended at net.total_iter=%s.'%(str(int(net.total_iter))))
end = time.time()
elapsed = end - start
print(' time taken %s[s] = %s [min] = %s [hr]'%(str(round(elapsed,1)), str(round(elapsed/60.,1)), str(round(elapsed/3600.,1))))
return args
def make_update_text(self, epoch, N_EPOCH, i, len_data, best_avg_loss, **kwargs):
update_str = 'epoch:%s/%s iter: %s/%s best_avg_recon_loss:%s'%(str(epoch+1),str(N_EPOCH),str(i+1),
str(len_data),str(np.round(best_avg_loss,5)))
for item_name, update_items in kwargs.items():
update_val = update_items['value']
if update_items['rounding'] is not None:
update_val = round(update_val,update_items['rounding'])
update_str += ' %s:%s'%(str(item_name),str(update_val))
return update_str
def get_data_loader(self, args):
DATA_DIR = args['DATA_DIR']
batch_size = args['batch_size']
if args['dataset'] == 'cifar10':
from .data import prepare_cifarloader
trainloader = prepare_cifarloader(train=True, root_dir=DATA_DIR, batch_size=batch_size, shuffle=True, demo=0, download=0)
evalloader = prepare_cifarloader(train=False, root_dir=DATA_DIR, batch_size=16, shuffle=True, demo=0, download=0)
return trainloader, evalloader
elif args['dataset'] == 'celeba64':
from .data import prepare_celebaloader
# Put celeba/img_align_celeba.zip folder in the "data" folder if you set DATA_DIR to 'data'
if DATA_DIR == 'data':
DATA_DIR = os.path.join(DATA_DIR, 'celeba', 'img_align_celeba.zip')
trainloader = prepare_celebaloader(img_size=(64,64),train=True, root_dir=DATA_DIR, batch_size=batch_size, shuffle=True)
evalloader = prepare_celebaloader(img_size=(64,64),train=False, root_dir=DATA_DIR, batch_size=16, shuffle=True)
return trainloader, evalloader
else:
raise RuntimeError('Invalid dataset?')
def get_result_dictionary(self, RESULT_DATA_DIR):
if os.path.exists(RESULT_DATA_DIR):
output_data_dict = self.load_pickled_data(RESULT_DATA_DIR, tv=(0,0,None), text=None)
else:
output_data_dict = {'iters':[],'losses':[]}
return output_data_dict
def init_or_load_model(self, MODEL_DIR, args):
OBSERVE = utils.parse_bool_from_string(args['OBSERVE'])
if os.path.exists(MODEL_DIR):
net = torch.load(MODEL_DIR)
print('loading model...%s at %s iter'%(str(type(net)),str(net.total_iter)))
net.OBSERVE = OBSERVE
else:
if args['dataset'] == 'cifar10' and args['model']=='SimpleGrowth':
from .model import SimpleGrowth
net = SimpleGrowth(img_shape=(3,32,32), hidden_layer=3, OBSERVE=OBSERVE)
elif args['dataset'] == 'celeba64' and args['model']=='SimpleGrowth':
from .model import SimpleGrowth
net = SimpleGrowth(img_shape=(3,64,64), hidden_layer=4, OBSERVE=OBSERVE)
else:
raise RuntimeError('Model not found. Check --dataset or --model')
print('initiating new model...%s'%(str(type(net))))
return net
def get_image_shape(self, args):
if args['dataset'] == 'cifar10':
IMG_SHAPE = (3,32,32) # (C,H,W)
is_black_and_white=False
elif args['dataset'] == 'celeba64':
IMG_SHAPE = (3,64,64) # (C,H,W)
is_black_and_white=False
else:
raise RuntimeError('Invalid dataset?')
return IMG_SHAPE, is_black_and_white
#######################
# Some utils
#######################
def save_reconstructed_images(x, net, OUTPUT_DIR,model='SimpleGrowth', IMG_SHAPE=(1,28,28), black_and_white=False, difference=False, best_loss_recon=False):
by_batch = True
if IMG_SHAPE == (3,218,178):
by_batch = False
C, H, W = IMG_SHAPE
net.eval()
if by_batch:
y = net(x)
y = y.clone().detach().cpu().numpy()
x = x.clone().detach().cpu().numpy()
if difference:
y = np.clip((y-x)**2,0,1.)**0.5
n_batch = y.shape[0]
# print(n_batch)
if n_batch>16:
y_reconstruct = y[:16]
x_compare = x[:16]
else:
x_compare = np.zeros(shape=(16,C,H,W))
y_reconstruct = np.zeros(shape=(16,C,H,W))
y_reconstruct[:n_batch] = y
x_compare[:n_batch] = x
else:
x_compare = np.zeros(shape=(16,C,H,W))
y_reconstruct = np.zeros(shape=(16,C,H,W))
batch_size = x.shape[0]
for i in range(batch_size):
y = net(x[i:i+1])
y = y.clone().detach().cpu().numpy()
x1 = x[i:i+1].clone().detach().cpu().numpy()
y_reconstruct[i] = y
x_compare[i] = x1
if i>=16: break
plt.figure(figsize=(5,10))
for i in range(16):
plt.gcf().add_subplot(8,4,i+1)
if black_and_white:
plt.gca().imshow(y_reconstruct[i][0], vmin=0,vmax=1, cmap='gray')
else:
plt.gca().imshow(y_reconstruct[i].transpose(1,2,0))
set_figure_setting(i, n_last=16, set_title='reconstructed')
for i in range(16):
plt.gcf().add_subplot(8,4,16 + i+1)
if black_and_white:
plt.gca().imshow(x_compare[i][0], vmin=0,vmax=1, cmap='gray')
else:
plt.gca().imshow(x_compare[i].transpose(1,2,0))
set_figure_setting(i, n_last=16, set_title='original')
plt.tight_layout()
if best_loss_recon:
img_name = 'recons_best.jpeg'
else:
if not difference:
img_name = 'recons_%s.jpeg'%(str(1000000+net.total_iter))[1:]
else:
img_name = 'recons_%s_diff.jpeg'%(str(1000000+net.total_iter))[1:]
IMG_DIR = os.path.join(OUTPUT_DIR, img_name)
plt.savefig(IMG_DIR)
plt.close()
def set_figure_setting(i, n_last, set_title=None):
if i==0:
plt.gca().set_xticks([])
if set_title:
plt.gca().set_title(set_title)
elif i+1==n_last:
plt.gca().set_yticks([])
else:
plt.gca().set_xticks([])
plt.gca().set_yticks([])
def manage_dir_for_FID(dataset_name, ROOT_DIR):
if ROOT_DIR is None: ROOT_DIR = os.getcwd()
ckpt_dir = os.path.join(ROOT_DIR,'checkpoint')
fid_folder_dir = os.path.join(ckpt_dir,'for_fid')
fid_data_dir = os.path.join(fid_folder_dir,dataset_name)
for x in [ckpt_dir, fid_folder_dir, fid_data_dir]:
if not os.path.exists(x):
os.mkdir(x)
return fid_data_dir
def manage_directories2(args):
PROJECT_ID = args['PROJECT_ID']
ROOT_DIR, PROJECT_DIR, MODEL_DIR, OUTPUT_DIR = utils.manage_directories(os.getcwd(), PROJECT_ID, verbose=0)
RECON_DIR = os.path.join(PROJECT_DIR,'imgs_for_FID')
RESULT_DIR = os.path.join(PROJECT_DIR,'recons.result')
if not os.path.exists(RECON_DIR): os.mkdir(RECON_DIR)
return ROOT_DIR, PROJECT_DIR, MODEL_DIR, OUTPUT_DIR, RECON_DIR, RESULT_DIR
def print_settings(args):
print('='*64)
for x, y in args.items():
print('%s:%s'%(str(x),str(y)))
def save_loss_image(avg_loss_iter, avg_loss, dir):
plt.figure()
plt.plot(avg_loss_iter, avg_loss)
plt.savefig(dir)
plt.close()
##################################
# Prep for evaluation
##################################
def prepare_data_for_FID(args):
print('prepare_data_for_FID()')
ROOT_DIR = os.getcwd()
FID_DATA_DIR = manage_dir_for_FID(args['dataset'], ROOT_DIR)
n_data = args['n'] if args['n']>8 else 8
print('preparing FID comparison dataset for %s'%(str(args['dataset'])))
print('FID_DATA_DIR:%s'%(str(FID_DATA_DIR)))
print('n_data: %s'%(str(n_data)))
if args['dataset'] == 'cifar10':
from .data import CIFAR10Dataset
ds = CIFAR10Dataset(train=False, root_dir=args['DATA_DIR'],download=False)
n_channel = 3
elif args['dataset'] == 'celeba64':
from .data import CelebADataset
DATA_DIR = args['DATA_DIR']
if DATA_DIR == 'data':
DATA_DIR = os.path.join(DATA_DIR, 'celeba', 'img_align_celeba.zip')
ds = CelebADataset(DATA_DIR, img_size=(64,64))
n_channel = 3
else:
raise RuntimeError('Invalid --dataset?')
import random
n_available = ds.__len__()
indices = np.array(range(n_available))
random.shuffle(indices)
if n_data>n_available: n_data = n_available
import PIL
for i in range(n_data):
x,y0 = ds.__getitem__(indices[i]) # min, max range is [0,1]
if isinstance(x, type(torch.tensor([1.]) )):
x = x.clone().detach().numpy()
x = (x.transpose(1,2,0) * 255.).astype(np.uint8)
filename = os.path.join(FID_DATA_DIR, '%s.%s.jpg'%(str(args['dataset']),str(i)))
if n_channel==3:
img = PIL.Image.fromarray(x)
elif n_channel==1:
x = np.concatenate((x,x,x),axis=2)
img = PIL.Image.fromarray(x)
else:
raise RuntimeError('wrong channel number?!')
img.save(filename)
# print(x.shape, y0, '[%s,%s]'%(str(np.min(x)),str(np.max(x))))
class Reconstructor(Trainer):
def __init__(self):
super(Reconstructor, self).__init__()
def sample(self, args):
DATA_DIR = args['DATA_DIR']
ROOT_DIR, PROJECT_DIR, MODEL_DIR, OUTPUT_DIR, RECON_DIR, RESULT_DIR = manage_directories2(args)
net = self.init_or_load_model(MODEL_DIR, args)
net.to(device=device)
net.eval()
if args['model']=='SimpleGrowth' and args['dataset']=='cifar10':
latent_dim = 15
latent_img_shape = (4,4)
elif args['model']=='SimpleGrowth' and args['dataset']=='celeba64':
latent_dim = 15
latent_img_shape = (4,4)
else:
raise RuntimeError('Invalide dataset or model?')
img_folder = os.path.join(OUTPUT_DIR,'sample')
if not os.path.exists(img_folder): os.mkdir(img_folder)
sampling_mode = args['sampling_mode']
img_samples, img_dir = self.make_img_samples(net, latent_dim, latent_img_shape, img_folder, args)
save_sixteen_images(img_samples, img_dir, title=None)
def make_img_samples(self, net, latent_dim , latent_img_shape, img_folder, args):
batch_size = 16
mode= args['sampling_mode']
H, W = latent_img_shape
if mode is None:
feature_vec = 2*torch.rand(size=(batch_size, latent_dim*H*W))-1
feature_vec = feature_vec.to(torch.float).to(device=device)
# print(feature_vec.shape)
# raise Exception('gg')
img_samples = net.sample( (batch_size,latent_dim,H,W), latent_vec=feature_vec).clone().detach().cpu().numpy().transpose(0,2,3,1)
img_dir = os.path.join(img_folder,'samples.jpeg')
elif mode == 'transit':
args['batch_size'] = 2
trainloader, _ = self.get_data_loader(args)
i,data = next(enumerate(trainloader))
x, _ = data
x = x.to(torch.float).to(device=device)
b, C, H0, W0 = x.shape
latents = net.en(net.conv1(x)).reshape(b,-1)
_,ldim = latents.shape
diff = (latents[1]-latents[0])/15
z = torch.zeros(size=(16,ldim))
for i in range(16):
z[i] = latents[0] + diff *i
z = z.to(torch.float).to(device=device)
img_samples = net.sample( (batch_size,latent_dim,H,W), latent_vec=z).clone().detach().cpu().numpy().transpose(0,2,3,1)
img_dir = os.path.join(img_folder,'samples_transit.jpeg')
return img_samples, img_dir
def compute_fid(self, args, model):
print('compute_fid()')
ROOT_DIR, PROJECT_DIR, _ , OUTPUT_DIR, RECON_DIR, RESULT_DIR = manage_directories2(args)
from .pipeline import manage_dir_for_FID
FID_DATA_DIR = manage_dir_for_FID(args['dataset'], ROOT_DIR)
if not os.path.exists(RESULT_DIR):
RESULT_DICT = {}
else:
TAB_LEVEL = 1
RESULT_DICT = self.load_pickled_data(RESULT_DIR, tv=(TAB_LEVEL,0,100), | |
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import annotations
import enum
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Callable, List, Tuple
if TYPE_CHECKING:
from minihack import MiniHack
from nle.nethack import Command, CompassDirection
Y_cmd = CompassDirection.NW
class EventType(enum.IntEnum):
MESSAGE = 0
LOC_ACTION = 1
COORD = 2
LOC = 3
COMESTIBLES = [
"orange",
"meatball",
"meat ring",
"meat stick",
"kelp frond",
"eucalyptus leaf",
"clove of garlic",
"sprig of wolfsbane",
"carrot",
"egg",
"banana",
"melon",
"candy bar",
"lump of royal jelly",
]
class Event(ABC):
"""An event which can occur in a MiniHack episode.
This is the base class of all other events.
"""
def __init__(
self,
reward: float,
repeatable: bool,
terminal_required: bool,
terminal_sufficient: bool,
):
"""Initialise the Event.
Args:
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
self.reward = reward
self.repeatable = repeatable
self.terminal_required = terminal_required
self.terminal_sufficient = terminal_sufficient
self.achieved = False
@abstractmethod
def check(self, env, previous_observation, action, observation) -> float:
"""Check whether the environment is in the state such that this event
has occured.
Args:
env (MiniHack):
The MiniHack environment in question.
previous_observation (tuple):
The previous state observation.
action (int):
The action taken.
observation (tuple):
The current observation.
Returns:
float: The reward.
"""
pass
def reset(self):
"""Reset the event, if there is any state necessary."""
self.achieved = False
def _set_achieved(self) -> float:
if not self.repeatable:
self.achieved = True
return self.reward
def _standing_on_top(env, location):
return not env.screen_contains(location)
class LocActionEvent(Event):
"""An event which checks whether an action is performed at a specified
location.
"""
def __init__(
self,
*args,
loc: str,
action: Command,
):
"""Initialise the Event.
Args:
loc (str):
The name of the location to reach.
action (int):
The action to perform.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.loc = loc
self.action = action
self.status = False
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, observation
if env._actions[action] == self.action and _standing_on_top(
env, self.loc
):
self.status = True
elif env._actions[action] == Y_cmd and self.status:
return self._set_achieved()
else:
self.status = False
return 0
def reset(self):
super().reset()
self.status = False
class LocEvent(Event):
"""An event which checks whether a specified location is reached."""
def __init__(self, *args, loc: str):
super().__init__(*args)
"""Initialise the Event.
Args:
loc (str):
The name of the location to reach.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
self.loc = loc
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, action, observation
if _standing_on_top(env, self.loc):
return self._set_achieved()
return 0.0
class CoordEvent(Event):
"""An event which occurs when reaching certain coordinates."""
def __init__(self, *args, coordinates: Tuple[int, int]):
"""Initialise the Event.
Args:
coordinates (tuple):
The coordinates to reach for the event.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.coordinates = coordinates
def check(self, env, previous_observation, action, observation) -> float:
coordinates = tuple(observation[env._blstats_index][:2])
if self.coordinates == coordinates:
return self._set_achieved()
return 0.0
class MessageEvent(Event):
"""An event which occurs when any of the `messages` appear."""
def __init__(self, *args, messages: List[str]):
"""Initialise the Event.
Args:
messages (list):
The messages to be seen to trigger the event.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.messages = messages
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, action
curr_msg = (
observation[env._original_observation_keys.index("message")]
.tobytes()
.decode("utf-8")
)
for msg in self.messages:
if msg in curr_msg:
return self._set_achieved()
return 0.0
class AlwaysEvent(Event):
def __init__(self, *args):
super().__init__(*args)
def check(self, env, previous_observation, action, observation) -> float:
return self._set_achieved()
class AbstractRewardManager(ABC):
"""This is the abstract base class for the ``RewardManager`` that is used
for defining custom reward functions.
"""
def __init__(self):
self.terminal_sufficient = None
self.terminal_required = None
@abstractmethod
def collect_reward(self) -> float:
"""Return reward calculated and accumulated in check_episode_end_call,
and then reset it.
Returns:
flaot: The reward.
"""
raise NotImplementedError
@abstractmethod
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
"""Check if the task has ended, and accumulate any reward from the
transition in ``self._reward``.
Args:
env (MiniHack):
The MiniHack environment in question.
previous_observation (tuple):
The previous state observation.
action (int):
The action taken.
observation (tuple):
The current observation.
Returns:
bool: Boolean whether the episode has ended.
"""
raise NotImplementedError
@abstractmethod
def reset(self) -> None:
"""Reset all events, to be called when a new episode occurs."""
raise NotImplementedError
class RewardManager(AbstractRewardManager):
"""This class is used for managing rewards, events and termination for
MiniHack tasks.
Some notes on the ordering or calls in the MiniHack/NetHack base class:
- ``step(action)`` is called on the environment
- Within ``step``, first a copy of the last observation is made, and then the
underlying NetHack game is stepped
- Then ``_is_episode_end(observation)`` is called to check whether this the
episode has ended (and this is overridden if we've gone over our
max_steps, or the underlying NetHack game says we're done (i.e. we died)
- Then ``_reward_fn(last_observation, observation)`` is called to calculate
the reward at this time-step
- if ``end_status`` tells us the game is done, we quit the game
- then ``step`` returns the observation, calculated reward, done, and some
statistics.
All this means that we need to check whether an observation is terminal in
``_is_episode_end`` before we're calculating the reward function.
The call of ``_is_episode_end`` in ``MiniHack`` will call
``check_episode_end_call`` in this class, which checks for termination and
accumulates any reward, which is returned and zeroed in ``collect_reward``.
"""
def __init__(self):
self.events: List[Event] = []
self.custom_reward_functions: List[
Callable[[MiniHack, Any, int, Any], float]
] = []
self._reward = 0.0
# Only used for GroupedRewardManager
self.terminal_sufficient = None
self.terminal_required = None
def add_custom_reward_fn(
self, reward_fn: Callable[[MiniHack, Any, int, Any], float]
) -> None:
"""Add a custom reward function which is called every after step to
calculate reward.
The function should be a callable which takes the environment, previous
observation, action and current observation and returns a float reward.
Args:
reward_fn (Callable[[MiniHack, Any, int, Any], float]):
A reward function which takes an environment, previous
observation, action, next observation and returns a reward.
"""
self.custom_reward_functions.append(reward_fn)
def add_event(self, event: Event):
"""Add an event to be managed by the reward manager.
Args:
event (Event):
The event to be added.
"""
self.events.append(event)
def _add_message_event(
self, msgs, reward, repeatable, terminal_required, terminal_sufficient
):
self.add_event(
MessageEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
messages=msgs,
)
)
def _add_loc_action_event(
self,
loc,
action,
reward,
repeatable,
terminal_required,
terminal_sufficient,
):
try:
action = Command[action.upper()]
except KeyError:
raise KeyError(
"Action {} is not in the action space.".format(action.upper())
)
self.add_event(
LocActionEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
loc=loc.lower(),
action=action,
)
)
def add_eat_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add an event which is triggered when `name` is eaten.
Args:
name (str):
The name of the object being eaten.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this | |
from django import template
from django.utils.safestring import mark_safe
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.conf import settings
register = template.Library()
# read setting file
try:
import_by_developer = not bool(settings.C3_IMPORT)
except AttributeError:
import_by_developer = False
###############################################################################
def import_c3():
"""Generates 'script' tags to import C3 files.
Uses 'static' function and creates urls of C3 static files, then
uses them in 'script' and 'link' HTML elements.
Returns:
A string that contains two script and one linke HTML element.
"""
# checks setting and returns empty string if user
# imports static files himself (no import occurs)
if import_by_developer:
return str()
import_c3_css = '<link type="text/css" rel="stylesheet" href="%s"/>' \
% static('django_c3/css/c3.min.css')
import_js_d3 = '<script type="text/javascript" src="%s"></script>' % \
static('django_c3/js/d3.v3.min.js')
import_js_c3 = '<script type="text/javascript" src="%s"></script>' % \
static('django_c3/js/c3.min.js')
return '%s\n%s\n%s' % (import_c3_css, import_js_d3, import_js_c3)
###############################################################################
@register.simple_tag(takes_context=True)
def step(
context, bind_to, data, title='', area=False, x_is_category=False,
labels=False, vertical_grid_line=False, horizontal_grid_line=False,
show_legend=True, zoom=False, group_tooltip=True, height=None,
width=None
):
"""Generates javascript code to show a 'step' chart.
Args:
context: Context of template.
bind_to: A string that specifics an HTML element (eg: id or class)
that chart will be shown in that. (like: '#chart')
data: It is dictinary that contains data of chart, some
informations about extra lines, grouping of data and
chart axis labels. eg:
{
'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'],
'horizontal_lines': [40],
# 'vertical_lines': [40],
'data': [
{'title': 'A', 'values': [26, 35, 52, 34, 45, 74],
'color': '#FF34FF'},
# {'title': 'B', 'values': [54, 25, 52, 26, 20, 89]},
],
# 'groups': [('A', 'B')]
}
vertical_lines works just if x_is_category seted to False.
title: A string that will be shown on top of the chart.
area: It's a boolean option. If true, the area under the curve
will be colored.
x_is_category: It's a boolean option. If false, labels of X axis
will be considered as real number and sortable. (they will
be sorted automatically)
labels: It's a boolean option. If true, value of record will be
shown on column.
vertical_grid_line: It's boolean option, If true some vertical rows
will be drawn in chart. (grid lines)
horizontal_grid_line: It's boolean option, If true some horizontal
rows will be drawn in chart. (grid lines)
show_legend: It's boolean option, If false, legends of the chart
will be hidden.
zoom: It's boolean option, If true, end user can scroll on
chart to zoom in and zoom out.
group_tooltip: It's boolean option, If true, data of all records
in that point whill be shown to gather.
height: It's an integer option, it will determine heigth of chart
in pixel.
width: It's an integer option, it will determine width of chart
in pixel.
Returns:
A string contains chart js code and import code of C3 static files, if
it did not imported yet.
You can see structure of chart in chart_structur variable.
"""
# step chart structure in JS
chart_structur = (
'\n<script type="text/javascript">'
'\n var chart = c3.generate({'
'\n bindto: "%s",'
'\n data: {'
'\n x: %s,'
'\n columns: ['
'\n %s'
'\n ],'
'\n type : "%s",'
'\n colors: {'
'\n %s'
'\n },'
'\n groups: ['
'\n %s'
'\n ],'
'\n labels : %s'
'\n },'
'\n title: { text: "%s"},'
'\n axis: { x: { type: "%s" } },'
'\n grid: {'
'\n x: { show: %s ,lines: [%s] },'
'\n y: { show: %s ,lines: [%s] },'
'\n },'
'\n legend: { show: %s },'
'\n zoom: { enabled: %s },'
'\n tooltip: { grouped: %s },'
'\n size: { height: %s, width: %s }'
'\n });'
'\n</script>'
)
# convert parameters to strings to be acceptable in JS and C3 syntax.
if area:
_type = 'area-step'
else:
_type = 'step'
if x_is_category:
x_type = 'category'
else:
x_type = ''
if labels:
labels = 'true'
else:
labels = 'false'
if vertical_grid_line:
vertical_grid_line = 'true'
else:
vertical_grid_line = 'false'
if horizontal_grid_line:
horizontal_grid_line = 'true'
else:
horizontal_grid_line = 'false'
if show_legend:
show_legend = 'true'
else:
show_legend = 'false'
if zoom:
zoom = 'true'
else:
zoom = 'false'
if group_tooltip:
group_tooltip = 'true'
else:
group_tooltip = 'false'
if height is not None:
height = int(height)
else:
height = 'null'
if width is not None:
width = int(width)
else:
width = 'null'
# read horizontal line points from data
horizontal_lines = str()
if 'horizontal_lines' in data.keys():
for line in data['horizontal_lines']:
horizontal_lines = ''.join([horizontal_lines,
'{ value: %s}' % line, ','])
# read vertical line points from data
# raise an exception if x_is_category set to true and vertical_lines exists
vertical_lines = str()
if 'vertical_lines' in data.keys():
if x_is_category:
raise Exception(
"It's meaningless to use vertical_lines with x_is_category."
)
for line in data['vertical_lines']:
vertical_lines = ''.join(
[vertical_lines, '{ value: %s}' % line, ','])
# reads 'x' field of data and creates X axis labels.
# a hash is used to naming X axis labels
x_labels = str()
if 'x' in data.keys():
if x_is_category:
x_labels = data['x']
else:
x_labels = list(filter(lambda x: int(x), data['x']))
x_labels = ','.join([repr(str(label)) for label in x_labels])
x_labels = '["2d2014226823e74c2accfcce8e0ca141", %s],' % x_labels
x_label_list_name = '"2d2014226823e74c2accfcce8e0ca141"'
else:
x_labels = ''
x_label_list_name = "null"
# read records points to draw on chart
data_title_list = list()
chart_data = str()
for item in data['data']:
values = ','.join([str(v) for v in item['values']])
item_data = '["%s", %s], ' % (item['title'], values)
chart_data = ' '.join([chart_data, item_data])
data_title_list.append(item['title'])
# add X axis labels to chart data
chart_data = ''.join([chart_data, x_labels])
# read colors of data
chart_color = str()
for item in data['data']:
if 'color' in item.keys():
item_color = '"%s": "%s", ' % (item['title'], item['color'])
chart_color = ' '.join([chart_color, item_color])
# read grouping details of data
total_group_string = str()
if 'groups' in data.keys():
for group in data['groups']:
group_string = str()
for item in group:
# raise an exception if mentioned key were not exist in data
if item not in data_title_list:
raise ValueError("%s is not exists in your data!" % item)
group_string = ''.join([group_string, ',', repr(item)])
total_group_string = ''.join(
[total_group_string, '[', group_string, ']', ','])
# pass arguments to chart structure
chart = chart_structur % (
bind_to, x_label_list_name,
chart_data, _type, chart_color, total_group_string, labels,
title, x_type, vertical_grid_line, vertical_lines,
horizontal_grid_line, horizontal_lines, show_legend, zoom,
group_tooltip, height, width
)
# add import C3 elements to it, if it does not imported yet and return it.
if not ('import_js_c3' in context and context['import_js_c3']):
context['import_js_c3'] = True
return mark_safe('%s\n%s' % (import_c3(), chart))
else:
return mark_safe(chart)
###############################################################################
@register.simple_tag(takes_context=True)
def line_xy(
context, bind_to, data, title='', angle=True, area=False,
labels=False, vertical_grid_line=False, horizontal_grid_line=False,
show_legend=True, zoom=False, show_points=True, group_tooltip=True,
height=None, width=None
):
"""Generates javascript code to show a 'bar' chart.
Args:
context: Context of template.
bind_to: A string that specifics an HTML element (eg: id or class)
that chart will be shown in that. (like: '#chart')
data: It is dictinary that contains data of chart, some
informations about extra lines, grouping of data and
chart axis labels. eg:
{
'horizontal_lines': [5],
'vertical_lines': [5],
'data': [
{'title': 'A', 'values': [
(1, 2), (2, 4), (3, 9),
(4, 4), (5, 3), (6, 2), (7, 1)]},
{'title': 'B', 'values': [
(3, 6), (5, 5), (7, 9), (6, 4),
(3, 3), (1, 2), (2, 7)],
'color': '#CCCCC'},
],
# 'groups': [('A', 'B')]
}
title: A string that will be shown on top of the chart.
area: It's a boolean option. If true, the area under the curve
will be colored.
angle: It's a boolean option. If false, chart type will be spline.
labels: It's a boolean option. If true, value of record will be
shown on column.
vertical_grid_line: It's boolean option, If true some vertical rows
will be drawn in chart. (grid lines)
horizontal_grid_line: It's boolean option, If true some horizontal
rows will be drawn in chart. (grid lines)
show_legend: It's boolean option, If false, legends of the chart
will be hidden.
zoom: It's boolean option, If true, end user can scroll on
chart to zoom in and zoom out.
group_tooltip: It's boolean option, If true, data of all records
in that point whill be shown to gather.
| |
I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if ( os . path . exists ( "lisp.py" ) and os . path . exists ( "lisp.pyo" ) == False ) :
lisp . lprint ( "In manual mode, ignoring 'lisp enable' command" )
return ( clause )
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
OO0OO00oo0 = clause . split ( " " )
OO0OO00oo0 = OO0OO00oo0 [ 0 ] + " " + OO0OO00oo0 [ 1 ]
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
I1IiIiiIiIII = lisp_core_commands [ "lisp enable" ]
I1IiIiiIiIII = I1IiIiiIiIII [ 1 ]
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
ooo0 , IIiIiiii , I1IiIiiIiIII = lisp_syntax_check ( I1IiIiiIiIII , clause )
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if ( ooo0 == True ) : return ( IIiIiiii )
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
O0OOO00 = { "itr" : "lisp-itr" , "etr" : "lisp-etr" , "rtr" : "lisp-rtr" ,
"map-resolver" : "lisp-mr" , "map-server" : "lisp-ms" ,
"ddt-node" : "lisp-ddt" }
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
for OooOo in list ( O0OOO00 . keys ( ) ) :
oOo0 = True if I1IiIiiIiIII [ OooOo ] == "yes" else False
lisp_start_stop_process ( O0OOO00 [ OooOo ] , oOo0 )
if 30 - 30: OOooOOo + II111iiii - IiII * OoooooooOO
return ( IIiIiiii )
if 19 - 19: IiII - o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00 / OOooOOo
if 87 - 87: OoOoOO00 - ooOoO0o - OOooOOo + Oo0Ooo % iIii1I11I1II1 / i11iIiiIii
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
def lisp_debug_command ( lisp_socket , clause , single_process ) :
OO0OO00oo0 = clause . split ( " " )
OO0OO00oo0 = OO0OO00oo0 [ 0 ] + " " + OO0OO00oo0 [ 1 ]
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
I1IiIiiIiIII = lisp_core_commands [ "lisp debug" ]
I1IiIiiIiIII = I1IiIiiIiIII [ 1 ]
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
ooo0 , IIiIiiii , I1IiIiiIiIII = lisp_syntax_check ( I1IiIiiIiIII , clause )
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if ( ooo0 == True ) : return ( IIiIiiii )
if 34 - 34: iII111i
O0OOO00 = { "itr" : "lisp-itr" , "etr" : "lisp-etr" , "rtr" : "lisp-rtr" ,
"map-resolver" : "lisp-mr" , "map-server" : "lisp-ms" ,
"ddt-node" : "lisp-ddt" , "core" : "" }
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
for iIIi1Ii1III in I1IiIiiIiIII :
oOoo0 = O0OOO00 [ iIIi1Ii1III ]
if ( single_process and single_process != oOoo0 ) : continue
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
oo0i1iIIi1II1iiI = I1IiIiiIiIII [ iIIi1Ii1III ]
OO0OO00oo0 = ( "lisp debug {\n" + " {} = {}\n" . format ( iIIi1Ii1III , oo0i1iIIi1II1iiI ) + "}\n" )
if 17 - 17: Ii1I
if 67 - 67: O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if ( iIIi1Ii1III == "core" ) :
lisp_process_command ( None , None , OO0OO00oo0 , None , [ None ] )
continue
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
OO0OO00oo0 = lisp . lisp_command_ipc ( OO0OO00oo0 , "lisp-core" )
lisp . lisp_ipc ( OO0OO00oo0 , lisp_socket , oOoo0 )
if ( single_process ) : break
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
return ( IIiIiiii )
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
def lisp_replace_password_in_clause ( clause , keyword_string ) :
O0OO0O = clause . find ( keyword_string )
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if ( O0OO0O == - 1 ) : return ( clause )
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
O0OO0O += len ( keyword_string )
o0o0O0O00oOOo = clause [ O0OO0O : : ] . find ( "\n" )
o0o0O0O00oOOo += O0OO0O
IIo00ooo = clause [ O0OO0O : o0o0O0O00oOOo ] . replace ( " " , "" )
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if ( len ( IIo00ooo ) != 0 and IIo00ooo [ 0 ] == "=" ) : return ( clause )
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
IIo00ooo = IIo00ooo . replace ( " " , "" )
IIo00ooo = IIo00ooo . replace ( "\t" , "" )
IIo00ooo = lisp_hash_password ( IIo00ooo )
clause = clause [ 0 : O0OO0O ] + " =" + IIo00ooo + clause [ o0o0O0O00oOOo : : ]
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
return ( clause )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + | |
from numpy import zeros
from numpy.linalg import eigvals
import time
import numpy as np
import scipy as sp
import scipy.signal as signal
import scipy.sparse as sparse
import osqp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from .controller import Controller
from ..learning.edmd import Edmd
class MPCControllerDense(Controller):
"""
Class for controllers MPC.
MPC are solved using osqp.
Use lifting=True to solve MPC in the lifted space
"""
def __init__(self, linear_dynamics, N, dt, umin, umax, xmin, xmax,
Q, R, QN, xr, plotMPC=False, plotMPC_filename="",lifting=False, edmd_object=None, name="noname", soft=False, D=None):
"""__init__ [summary]
Arguments:
linear_dynamics {dynamical sytem} -- it contains the A and B matrices in continous time
N {integer} -- number of timesteps
dt {float} -- time step in seconds
umin {numpy array [Nu,]} -- minimum control bound
umax {numpy array [Nu,]} -- maximum control bound
xmin {numpy array [Ns,]} -- minimum state bound
xmax {numpy array [Ns,]} -- maximum state bound
Q {numpy array [Ns,Ns]} -- state cost matrix
R {numpy array [Nu,Nu]} -- control cost matrix
QN {numpy array [Ns,]} -- final state cost
xr {numpy array [Ns,]} -- reference trajectory
Keyword Arguments:
plotMPC {bool} -- flag to plot results (default: {False})
plotMPC_filename {str} -- plotting filename (default: {""})
lifting {bool} -- flag to use state lifting (default: {False})
edmd_object {edmd object} -- lifting object. It contains projection matrix and lifting function (default: {Edmd()})
name {str} -- name for all saved files (default: {"noname"})
soft {bool} -- flag to enable soft constraints (default: {False})
D {[type]} -- cost matrix for the soft variables (default: {None})
"""
Controller.__init__(self, linear_dynamics)
# Load arguments
Ac, Bc = linear_dynamics.linear_system()
[nx, nu] = Bc.shape
ns = xr.shape[0]
#Discretize dynamics:
self.dt = dt
if lifting:
self.C = edmd_object.C
self.edmd_object = edmd_object
else:
self.C = sparse.eye(ns)
lin_model_d = signal.cont2discrete((Ac,Bc,self.C,zeros((ns,1))),dt)
Ad = sparse.csc_matrix(lin_model_d[0]) #TODO: If bad behavior, delete this
Bd = sparse.csc_matrix(lin_model_d[1]) #TODO: If bad behavior, delete this
self.plotMPC = plotMPC
self.plotMPC_filename = plotMPC_filename
self.q_d = xr
self.Q = Q
self.R = R
self.lifting = lifting
self.nu = nu
self.nx = nx
self.ns = ns
self.soft = soft
# Total desired path
if self.q_d.ndim==2:
self.Nqd = self.q_d.shape[1]
xr = self.q_d[:,:N]
# Prediction horizon
self.N = N
x0 = np.zeros(nx)
self.run_time = np.zeros([0,])
Rbd = sparse.kron(sparse.eye(N), R)
Qbd = sparse.kron(sparse.eye(N), Q)
Bbd = block_diag(Bd,nu).tocoo()
# Check Xmin and Xmax
if xmin.shape[0]==ns and xmin.ndim==1: # it is a single vector we tile it
x_min_flat = np.kron(np.ones(N), xmin)
x_max_flat = np.kron(np.ones(N), xmax)
elif xmin.shape[0]==ns*N: # if it is a long vector it is ok
x_min_flat = xmin
x_max_flat = xmax
elif xmin.shape[0] == ns and xmin.shape[1] == N: # if it is a block we flatten it
x_min_flat = np.reshape(xmin,(N*ns,),order='F')
x_max_flat = np.reshape(xmax,(N*ns,),order='F')
else:
raise ValueError('xmin has wrong dimensions. xmin shape={}'.format(xmin.shape))
self.x_min_flat = x_min_flat
self.x_max_flat = x_max_flat
# Check Umin and Umax
if umin.shape[0]==nu and umin.ndim==1:
u_min_flat = np.kron(np.ones(N), umin)
u_max_flat = np.kron(np.ones(N), umax)
elif umin.shape[0]==nu*N:
u_min_flat = umin
u_max_flat = umax
elif umin.shape[0] == nu and umin.shape[1] == N:
u_min_flat = np.reshape(umin,(N*nu,),order='F')
u_max_flat = np.reshape(umax,(N*nu,),order='F')
else:
raise ValueError('umin has wrong dimensions. Umin shape={}'.format(umin.shape))
self.u_min_flat = u_min_flat
self.u_max_flat = u_max_flat
#! GET a & b
# Write B:
diag_AkB = Bd
data_list = Bbd.data
row_list = Bbd.row
col_list = Bbd.col
B = sparse.coo_matrix
for i in range(N):
if i<N-1:
AkB_bd_temp = block_diag(diag_AkB,N-i)
else:
AkB_bd_temp = diag_AkB.tocoo()
data_list = np.hstack([data_list,AkB_bd_temp.data])
row_list = np.hstack([row_list,AkB_bd_temp.row+np.full((AkB_bd_temp.row.shape[0],),nx*i)])
col_list = np.hstack([col_list,AkB_bd_temp.col])
diag_AkB = Ad.dot(diag_AkB)
B = sparse.coo_matrix((data_list, (row_list, col_list)), shape=(N*nx, N*nu))
a = Ad.copy()
Ak = Ad.copy()
for i in range(N-1):
Ak = Ak.dot(Ad)
a = sparse.vstack([a,Ak])
self.a = a
self.B = B
check_ab = True
if check_ab:
x0 = np.linspace(-5,40,nx)
x00 = np.linspace(-5,40,nx)
# Store data Init
nsim = N
xst = np.zeros((nx,nsim))
ust = np.zeros((nu,nsim))
# Simulate in closed loop
for i in range(nsim):
# Fake pd controller
ctrl = np.zeros(nu,) #np.random.rand(nu,)
x0 = Ad.dot(x0) + Bd.dot(ctrl)
# Store Data
xst[:,i] = x0
ust[:,i] = ctrl
x_dense = np.reshape(a @ x00 + B @ (ust.flatten('F')),(N,nx)).T
plt.figure()
plt.subplot(2,1,1)
for i in range(nx):
plt.plot(range(nsim),xst[i,:],'d',label="sim "+str(i))
plt.plot(range(nsim),x_dense[i,:],'d',label="ax+bu "+str(i))
plt.xlabel('Time(s)')
plt.grid()
plt.legend()
plt.subplot(2,1,2)
for i in range(nu):
plt.plot(range(nsim),ust[i,:],label=str(i))
plt.xlabel('Time(s)')
plt.grid()
plt.legend()
plt.savefig("AB_check_for_"+name+".pdf",bbox_inches='tight',format='pdf', dpi=2400)
plt.close()
# Cast MPC problem to a QP: x = (x(0),x(1),...,x(N),u(0),...,u(N-1))
# Compute Block Diagonal elements
self.Cbd = sparse.kron(sparse.eye(N), self.C)
CQCbd = self.Cbd.T @ Qbd @ self.Cbd
self.CtQ = self.C.T @ Q
Cbd = self.Cbd
P = Rbd + B.T @ CQCbd @ B
self.BTQbda = B.T @ CQCbd @ a
Aineq_x = Cbd @ B
xrQB = B.T @ np.reshape(self.CtQ.dot(xr),(N*nx,),order='F')
l = np.hstack([x_min_flat - Cbd @ a @ x0, u_min_flat])
u = np.hstack([x_max_flat - Cbd @ a @ x0, u_max_flat])
x0aQb = self.BTQbda @ x0
q = x0aQb - xrQB
Aineq_u = sparse.eye(N*nu)
A = sparse.vstack([Aineq_x, Aineq_u]).tocsc()
if soft:
Pdelta = sparse.kron(sparse.eye(N), D)
P = sparse.block_diag([P,Pdelta])
qdelta = np.zeros(N*ns)
q = np.hstack([q,qdelta])
Adelta = sparse.csc_matrix(np.vstack([np.eye(N*ns),np.zeros((N*nu,N*ns))]))
A = sparse.hstack([A, Adelta])
plot_matrices = False
if plot_matrices:
#! Visualize Matrices
fig = plt.figure()
fig.suptitle("QP Matrices to solve MP in dense form. N={}, nx={}, nu={}".format(N,nx,nu),fontsize=20)
plt.subplot(2,4,1,xlabel="Ns*(N+1)", ylabel="Ns*(N+1)")
plt.imshow(a.toarray(), interpolation='nearest', cmap=cm.Greys_r)
plt.title("a in $x=ax_0+bu$")
plt.subplot(2,4,2,xlabel="Ns*(N+1)", ylabel="Nu*N")
plt.imshow(B.toarray(), interpolation='nearest', cmap=cm.Greys_r)
plt.title("b in $x=ax_0+bu$")
plt.subplot(2,4,3,xlabel="ns*(N+1) + ns*(N+1) + nu*N", ylabel="Ns*(N+1)+Nu*N")
plt.imshow(A.toarray(), interpolation='nearest', cmap=cm.Greys_r)
plt.title("A total in $l\\leq Ax \\geq u$")
plt.subplot(2,4,4)
plt.imshow(P.toarray(), interpolation='nearest', cmap=cm.Greys_r)
plt.title("P in $J=u^TPu+q^Tu$")
plt.subplot(2,4,5)
plt.imshow(Qbd.toarray(), interpolation='nearest', cmap=cm.Greys_r)
plt.title("Qbd")
#! Visualize Vectors
plt.subplot(2,4,6)
plt.plot(l)
plt.title('l in $l\\leq Ax \\geq u$')
plt.grid()
plt.subplot(2,4,7)
plt.plot(u)
plt.title("l in $l\\leq Ax \\geq u$")
plt.grid()
plt.subplot(2,4,8)
plt.plot(q)
plt.title("q in $J=u^TPu+q^Tu$")
plt.grid()
plt.tight_layout()
plt.savefig("MPC_matrices_for_"+name+".pdf",bbox_inches='tight',format='pdf', dpi=2400)
plt.close()
#plt.show()
# Create an OSQP object
self.prob = osqp.OSQP()
# Setup workspace
self.prob.setup(P=P.tocsc(), q=q, A=A, l=l, u=u, warm_start=True, verbose=False)
if self.plotMPC:
# Figure to plot MPC thoughts
self.fig, self.axs = plt.subplots(self.ns+self.nu)
if nx==4:
ylabels = ['$x$', '$\\theta$', '$\\dot{x}$', '$\\dot{\\theta}$']
else:
ylabels = [str(i) for i in range(nx)]
for ii in range(self.ns):
self.axs[ii].set(xlabel='Time(s)',ylabel=ylabels[ii])
self.axs[ii].grid()
for ii in range(self.ns,self.ns+self.nu):
self.axs[ii].set(xlabel='Time(s)',ylabel='u')
self.axs[ii].grid()
def eval(self, x, t):
'''
Args:
- x, numpy 1d array [ns,]
- time, t, float
'''
time_eval0 = time.time()
N = self.N
nu = self.nu
nx = self.nx
ns = self.ns
tindex = int(np.ceil(t/self.dt)) #TODO: Remove ceil and add back +1 if bad performance
#print("Eval at t={:.2f}, x={}".format(t,x))
# Update the local reference trajectory
if (tindex+N) < self.Nqd: # if we haven't reach the end of q_d yet
xr = self.q_d[:,tindex:tindex+N]
else: # we fill xr with copies of the last q_d
xr = np.hstack( [self.q_d[:,tindex:],np.transpose(np.tile(self.q_d[:,-1],(N-self.Nqd+tindex,1)))])
# Construct the new _osqp_q objects
if (self.lifting):
x = np.transpose(self.edmd_object.lift(x.reshape((x.shape[0],1)),xr[:,0].reshape((xr.shape[0],1))))[:,0]
#print("Eval at t={:.2f}, z={}".format(t,x))
#x = self.edmd_object.lift(x,xr[:,0])
BQxr = self.B.T @ np.reshape(self.CtQ.dot(xr),(N*nx,),order='F')
l = np.hstack([self.x_min_flat - self.Cbd @ self.a @ x, self.u_min_flat])
u = np.hstack([self.x_max_flat - self.Cbd @ self.a @ x, self.u_max_flat])
else:
BQxr = self.B.T @ np.reshape(self.Q.dot(xr),(N*nx,),order='F')
l = np.hstack([self.x_min_flat - self.a @ x, self.u_min_flat])
u = np.hstack([self.x_max_flat - self.a @ x, self.u_max_flat])
# Update initial state
BQax0 = self.BTQbda @ x
q = BQax0 - BQxr
if self.soft:
q = np.hstack([q,np.zeros(N*ns)])
self.prob.update(q=q,l=l,u=u)
#print('Time Setup {:.2f}ms'.format(1000*(time.time()-time_eval0)))
time_eval0 = time.time()
## Solve MPC Instance
self._osqp_result = self.prob.solve()
#print('Time Solve {:.2f}ms'.format(1000*(time.time()-time_eval0)))
time_eval0 = time.time()
# Check solver status
if self._osqp_result.info.status != 'solved':
print('ERROR: MPC DENSE coudl not be solved at t ={}, x = {}'.format(t, x))
raise ValueError('OSQP did not solve the problem!')
if self.plotMPC:
self.plot_MPC(t, x, xr, tindex)
self.run_time = np.append(self.run_time,self._osqp_result.info.run_time)
return self._osqp_result.x[:nu]
def parse_result(self,x,u):
"""parse_result obtain state from MPC optimization
Arguments:
x {numpy array [Ns,]} -- initial state
u {numpy array [Nu*N]} -- control action
Returns:
numpy array [Ns,N] -- state in the MPC optimization
"""
return np.transpose(np.reshape( self.a @ x + self.B @ u, (self.N+1,self.nx)))
def get_control_prediction(self):
"""get_control_prediction parse control command from MPC optimization
Returns:
numpy array [N,Nu] -- control command along MPC optimization
"""
return np.transpose(np.reshape( self._osqp_result.x[-self.N*self.nu:], (self.N,self.nu)))
def plot_MPC(self, current_time, x0, xr, tindex):
"""plot_MPC Plot MPC thoughts
Arguments:
current_time {float} -- current time
x0 {numpy array [Ns,]} | |
import filecmp
import os
import base64
import copy
import logging
import time
import jinja2
import pykube
import docker
import requests
from toscaparser.tosca_template import ToscaTemplate
from submitter.abstracts import base_adaptor as abco
from submitter.abstracts.exceptions import AdaptorCritical
from submitter import utils
logger = logging.getLogger("adaptor."+__name__)
SUPPORTED_CLOUDS = (
"ec2",
"nova",
"cloudsigma",
"cloudbroker"
)
RUNCMD_PLACEHOLDER = "echo micado runcmd placeholder"
class OccopusAdaptor(abco.Adaptor):
def __init__(self, adaptor_id, config, dryrun, validate=False, template=None):
super().__init__()
"""
Constructor method of the Adaptor
"""
if template and not isinstance(template, ToscaTemplate):
raise AdaptorCritical("Template is not a valid TOSCAParser object")
self.status = "init"
self.dryrun = dryrun
self.config = config
self.validate = validate
self.node_prefix = "node_def:"
self.node_name = ""
self.worker_infra_name = "micado_worker_infra"
self.min_instances = 1
self.max_instances = 1
self.ID = adaptor_id
self.template = template
self.auth_data_submitter = "/var/lib/micado/submitter/auth/auth_data.yaml"
self.node_path = "{}{}.yaml".format(self.config['volume'], self.ID)
self.node_path_tmp = "{}tmp_{}.yaml".format(self.config['volume'], self.ID)
self.infra_def_path_output = "{}{}-infra.yaml".format(self.config['volume'], self.ID)
self.infra_def_path_output_tmp = "{}{}-infra.tmp.yaml".format(self.config['volume'], self.ID)
self.infra_def_path_input = "./system/infrastructure_descriptor.yaml"
self.cloudinit_path = "./system/cloud_init_worker.yaml"
self.node_data = {}
self.node_def = {}
self.created = False
self.client = None
self.occopus = None
if not self.dryrun:
self._init_docker()
self.occopus_address = "occopus:5000"
self.auth_data_file = "/var/lib/micado/occopus/auth/auth_data.yaml"
self.occo_node_path = "/var/lib/micado/occopus/submitter/{}.yaml".format(self.ID)
self.occo_infra_path = "/var/lib/micado/occopus/submitter/{}-infra.yaml".format(self.ID)
logger.info("Occopus Adaptor initialised")
def translate(self, tmp=False, to_dict=False):
"""
Translate the self.tpl subset to Occopus node definition and infrastructure format
The adaptor create a mapping between TOSCA and Occopus template descriptor.
"""
self.node_def = {}
logger.info("Starting OccoTranslation")
self.status = "translating"
for node in self.template.nodetemplates:
if '_' in node.name:
raise AdaptorCritical("Underscores in node {} not allowed".format(node.name))
self.node_name = node.name
self.node_data = {}
node = copy.deepcopy(node)
occo_interface = self._node_data_get_interface(node)
if not occo_interface:
continue
self._node_resolve_interface_data(occo_interface, "resource")
cloud_type = utils.get_cloud_type(node, SUPPORTED_CLOUDS)
if cloud_type == "cloudsigma":
logger.info("CloudSigma resource detected")
self._node_data_get_cloudsigma_host_properties(node, "resource")
elif cloud_type == "ec2":
logger.info("EC2 resource detected")
self._node_data_get_ec2_host_properties(node, "resource")
elif cloud_type == "cloudbroker":
logger.info("CloudBroker resource detected")
self._node_data_get_cloudbroker_host_properties(node, "resource")
elif cloud_type == "nova":
logger.info("Nova resource detected")
self._node_data_get_nova_host_properties(node, "resource")
self._get_policies(node)
self._get_infra_def(tmp)
node_type = self.node_prefix + self.node_name
self.node_def.setdefault(node_type, [])
self.node_def[node_type].append(self.node_data)
if not self.node_def:
self.status = "no occopus nodes found"
return
if to_dict:
return self.node_def
if tmp:
utils.dump_order_yaml(self.node_def, self.node_path_tmp)
elif self.validate is False:
if not self.dryrun:
self.prepare_auth_file()
utils.dump_order_yaml(self.node_def, self.node_path)
self.status = "translated"
def execute(self):
"""
Import Occopus node definition, and build up the infrastructure
through occopus container.
"""
logger.info("Starting Occopus execution {}".format(self.ID))
self.status = "executing"
if not self._config_files_exists():
logger.info("No config generated during translation, nothing to execute")
self.status = "Skipped"
return
if self.dryrun:
logger.info("DRY-RUN: Occopus execution in dry-run mode...")
self.status = "DRY-RUN Deployment"
return
else:
if self.created:
run = False
i = 0
while not run and i < 5:
try:
logger.debug("Occopus import starting...")
result = self.occopus.exec_run("occopus-import {0}".format(self.occo_node_path))
logger.debug("Occopus import has been successful")
run = True
except Exception as e:
i += 1
logger.debug("{0}. Try {1} of 5.".format(str(e), i))
time.sleep(5)
logger.debug(result)
if "Successfully imported" in result[1].decode("utf-8"):
try:
logger.debug("Occopus build starting...")
exit_code, out = self.occopus.exec_run("occopus-build {} -i {} --auth_data_path {} --parallelize"
.format(self.occo_infra_path,
self.worker_infra_name,
self.auth_data_file))
if exit_code == 1:
raise AdaptorCritical(out)
occo_api_call = requests.post("http://{0}/infrastructures/{1}/attach"
.format(self.occopus_address, self.worker_infra_name))
if occo_api_call.status_code != 200:
raise AdaptorCritical("Cannot submit infra to Occopus API!")
logger.debug("Occopus build has been successful")
except docker.errors.APIError as e:
logger.error("{0}. Error caught in calling Docker container".format(str(e)))
except requests.exceptions.RequestException as e:
logger.error("{0}. Error caught in call to occopus API".format(str(e)))
else:
logger.error("Occopus import was unsuccessful!")
raise AdaptorCritical("Occopus import was unsuccessful!")
else:
logger.error("Not connected to Occopus container!")
raise AdaptorCritical("Occopus container connection was unsuccessful!")
logger.info("Occopus executed")
self.status = "executed"
def undeploy(self):
"""
Undeploy Occopus infrastructure through Occopus rest API
"""
self.status = "undeploying"
logger.info("Undeploy {} infrastructure".format(self.ID))
if not self._config_files_exists():
logger.info("No config generated during translation, nothing to undeploy")
self.status = "Skipped"
return
elif self.dryrun:
logger.info("DRY-RUN: deleting infrastructure...")
self.status = "DRY-RUN Delete"
else:
requests.delete("http://{0}/infrastructures/{1}".format(self.occopus_address, self.worker_infra_name))
# self.occopus.exec_run("occopus-destroy --auth_data_path {0} -i {1}"
# .format(self.auth_data_file, self.worker_infra_name))
self.status = "undeployed"
def cleanup(self):
"""
Remove the generated files under "files/output_configs/"
"""
logger.info("Cleanup config for ID {}".format(self.ID))
if not self._config_files_exists():
logger.info("No config generated during translation, nothing to cleanup")
self.status = "Skipped"
return
try:
os.remove(self.node_path)
os.remove(self.infra_def_path_output)
except OSError as e:
logger.warning(e)
# Flush the occopus-redis db
try:
redis = self.client.containers.list(filters={'label':'io.kubernetes.container.name=occopus-redis'})[0]
if redis.exec_run("redis-cli FLUSHALL").exit_code != 0:
raise AdaptorCritical
except AdaptorCritical:
logger.warning("FLUSH in occopus-redis container failed")
except IndexError:
logger.warning("Could not find occopus-redis container for FLUSH")
except Exception:
logger.warning("Could not connect to Docker for FLUSH")
def update(self):
"""
Check that if it's any change in the node definition or in the cloud-init file.
If the node definition changed then rerun the build process. If the node definition
changed first undeploy the infrastructure and rebuild it with the modified parameter.
"""
self.status = "updating"
self.min_instances = 1
self.max_instances = 1
logger.info("Updating the component config {}".format(self.ID))
self.translate(True)
if not self.node_def and os.path.exists(self.node_path):
logger.debug("No nodes in ADT, removing running nodes")
self._remove_tmp_files()
self.undeploy()
self.cleanup()
self.status = "Updated - removed all nodes"
elif not self.node_def:
logger.debug("No nodes found to be orchestrated with Occopus")
self._remove_tmp_files()
self.status = "Updated - no Occopus nodes"
elif not os.path.exists(self.node_path):
logger.debug("No running infrastructure, starting from new")
os.rename(self.node_path_tmp, self.node_path)
os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
self.execute()
self.status = "updated"
elif not self._differentiate(self.node_path,self.node_path_tmp):
logger.debug("Node def file different, replacing old config and executing")
os.rename(self.node_path_tmp, self.node_path)
os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
# Detach from the infra and rebuild
detach = requests.post("http://{0}/infrastructures/{1}/detach"
.format(self.occopus_address, self.worker_infra_name))
if detach.status_code != 200:
raise AdaptorCritical("Cannot detach infra from Occopus API!")
self.execute()
self.status = "updated"
elif not self._differentiate(self.infra_def_path_output, self.infra_def_path_output_tmp):
logger.debug("Infra tmp file different, replacing old config and executing")
os.rename(self.infra_def_path_output_tmp, self.infra_def_path_output)
self._remove_tmp_files()
# Rerun Occopus build to refresh infra definition
self.execute()
self.status = "updated"
else:
self.status = 'updated (nothing to update)'
logger.info("there are no changes in the Occopus files")
self._remove_tmp_files()
def _node_data_get_interface(self, node):
"""
Get interface for node from tosca
"""
interfaces = utils.get_lifecycle(node, "Occopus")
if not interfaces:
logger.debug("No interface for Occopus in {}".format(node.name))
return interfaces
def _node_resolve_interface_data(self, interfaces, key):
"""
Get cloud relevant information from tosca
"""
cloud_inputs = interfaces.get("create")
# TODO DEPRECATE 'endpoint_cloud' in favour of 'endpoint'
endpoint = cloud_inputs.get("endpoint", cloud_inputs.get("endpoint_cloud"))
self.node_data.setdefault(key, {}).setdefault("endpoint", endpoint)
def _node_data_get_context_section(self, properties):
"""
Create the context section in node definition
"""
self.node_data.setdefault("contextualisation", {}).setdefault(
"type", "cloudinit"
)
context = properties.get("context", {})
base_cloud_init = context.get("path") or self.cloudinit_path
cloud_config = context.get("cloud_config")
if not context:
logger.debug("The adaptor will use a default cloud-config")
self.node_data["contextualisation"].setdefault(
"context_template", self._get_cloud_init(None, base_cloud_init)
)
elif not cloud_config:
logger.debug("No cloud-config provided... using default cloud-config")
self.node_data["contextualisation"].setdefault(
"context_template", self._get_cloud_init(None, base_cloud_init)
)
elif context.get("insert"):
logger.debug("Insert the TOSCA cloud-config in the default config")
self.node_data["contextualisation"].setdefault(
"context_template", self._get_cloud_init(cloud_config, base_cloud_init, "insert")
)
elif context.get("append"):
logger.debug("Append the TOSCA cloud-config to the default config")
self.node_data["contextualisation"].setdefault(
"context_template", self._get_cloud_init(cloud_config, base_cloud_init, "append")
)
else:
logger.debug("Overwrite the default cloud-config")
self.node_data["contextualisation"].setdefault(
"context_template", self._get_cloud_init(cloud_config, base_cloud_init, "overwrite")
)
def _node_data_get_cloudsigma_host_properties(self, node, key):
"""
Get CloudSigma properties and create node definition
"""
properties = self._get_host_properties(node)
nics = dict()
self.node_data.setdefault(key, {}).setdefault("type", "cloudsigma")
self.node_data.setdefault(key, {})\
.setdefault("libdrive_id", properties["libdrive_id"])
self.node_data.setdefault(key, {})\
.setdefault("description", {})\
.setdefault("cpu", properties["num_cpus"])
self.node_data.setdefault(key, {}) \
.setdefault("description", {}) \
.setdefault("mem", properties["mem_size"])
self.node_data.setdefault(key, {})\
.setdefault("description", {})\
.setdefault("vnc_password", properties["vnc_password"])
if properties.get("public_key_id") is not None:
pubkeys = list()
pubkeys.append(properties["public_key_id"])
self.node_data[key]["description"]["pubkeys"] = pubkeys
if properties.get("hv_relaxed") is not None:
self.node_data.setdefault(key, {})\
.setdefault("description", {})\
.setdefault("hv_relaxed", properties["hv_relaxed"])
if properties.get("hv_tsc") is not None:
self.node_data.setdefault(key, {})\
.setdefault("description", {})\
.setdefault("hv_tsc", properties["hv_tsc"])
nics=properties.get("nics")
self.node_data[key]["description"]["nics"] = nics
self._node_data_get_context_section(properties)
self.node_data.setdefault("health_check", {}) \
.setdefault("ping",False)
def _node_data_get_ec2_host_properties(self, node, key):
"""
Get EC2 properties and create node definition
"""
properties = self._get_host_properties(node)
self.node_data.setdefault(key, {}).setdefault("type", "ec2")
self.node_data.setdefault(key, {}) \
.setdefault("regionname", properties["region_name"])
self.node_data.setdefault(key, {}) \
.setdefault("image_id", properties["image_id"])
self.node_data.setdefault(key, {}) \
.setdefault("instance_type", properties["instance_type"])
self._node_data_get_context_section(properties)
if properties.get("key_name") is not None:
self.node_data.setdefault(key, {}) \
.setdefault("key_name", properties["key_name"])
if properties.get("subnet_id") is not None:
self.node_data.setdefault(key, {}) \
.setdefault("subnet_id", properties["subnet_id"])
if properties.get("security_group_ids") is not None:
security_groups = list()
security_groups = properties["security_group_ids"]
self.node_data[key]["security_group_ids"] = security_groups
if properties.get("tags") is not None:
tags = properties["tags"]
self.node_data[key]["tags"] = tags
self.node_data.setdefault("health_check", {}) \
.setdefault("ping",False)
def _node_data_get_cloudbroker_host_properties(self, node, key):
"""
Get CloudBroker properties and create node definition
"""
properties = self._get_host_properties(node)
self.node_data.setdefault(key, {}).setdefault("type", "cloudbroker")
self.node_data.setdefault(key, {}) \
.setdefault("description", {}) \
.setdefault("deployment_id", properties["deployment_id"])
self.node_data.setdefault(key, {}) \
.setdefault("description", {}) \
.setdefault("instance_type_id", properties["instance_type_id"])
self.node_data.setdefault(key, {}) \
.setdefault("description", {}) \
.setdefault("key_pair_id", properties["key_pair_id"])
if properties.get("opened_port") is not None:
self.node_data.setdefault(key, {}) \
.setdefault("description", {}) \
.setdefault("opened_port", properties["opened_port"])
if properties.get("infrastructure_component_id") is not None:
self.node_data.setdefault(key,{}) \
.setdefault("description", {}) \
.setdefault("infrastructure_component_id", properties["infrastructure_component_id"])
if properties.get("dynamic_domain_name_ids") is not None:
self.node_data.setdefault(key,{}) \
.setdefault("description", {}) \
.setdefault("dynamic_domain_name_ids", {}) \
.setdefault("dynamic_domain_name_id", properties["dynamic_domain_name_ids"][0])
self._node_data_get_context_section(properties)
self.node_data.setdefault("health_check", {}) \
.setdefault("ping",False)
def _node_data_get_nova_host_properties(self, node, key):
"""
Get NOVA properties and create node | |
@property
def back_side_beamdiffuse_visible_solar_reflectance(self):
"""field `Back Side Beam-Diffuse Visible Solar Reflectance`
| Used only when Optical Data Type = SpectralAverage
| Back Side is side closest to zone air
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Back Side Beam-Diffuse Visible Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_beamdiffuse_visible_solar_reflectance` or None if not set
"""
return self["Back Side Beam-Diffuse Visible Solar Reflectance"]
@back_side_beamdiffuse_visible_solar_reflectance.setter
def back_side_beamdiffuse_visible_solar_reflectance(self, value=None):
""" Corresponds to IDD field `Back Side Beam-Diffuse Visible Solar Reflectance`
"""
self["Back Side Beam-Diffuse Visible Solar Reflectance"] = value
@property
def diffusediffuse_solar_transmittance(self):
"""field `Diffuse-Diffuse Solar Transmittance`
| Used only when Optical Data Type = SpectralAverage
| If this field is autocalculate, then the diffuse-diffuse solar
| transmittance is automatically estimated from other inputs and used
| in subsequent calculations. If this field is zero or positive, then
| the value entered here will be used.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Diffuse-Diffuse Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `diffusediffuse_solar_transmittance` or None if not set
"""
return self["Diffuse-Diffuse Solar Transmittance"]
@diffusediffuse_solar_transmittance.setter
def diffusediffuse_solar_transmittance(self, value="autocalculate"):
""" Corresponds to IDD field `Diffuse-Diffuse Solar Transmittance`
"""
self["Diffuse-Diffuse Solar Transmittance"] = value
@property
def front_side_diffusediffuse_solar_reflectance(self):
"""field `Front Side Diffuse-Diffuse Solar Reflectance`
| Used only when Optical Data Type = SpectralAverage
| If this field is autocalculate, then the front diffuse-diffuse solar
| reflectance is automatically estimated from other inputs and used in
| subsequent calculations. If this field is zero or positive, then the value
| entered here will be used. Front Side is side closest to outdoor air.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Front Side Diffuse-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `front_side_diffusediffuse_solar_reflectance` or None if not set
"""
return self["Front Side Diffuse-Diffuse Solar Reflectance"]
@front_side_diffusediffuse_solar_reflectance.setter
def front_side_diffusediffuse_solar_reflectance(
self,
value="autocalculate"):
""" Corresponds to IDD field `Front Side Diffuse-Diffuse Solar Reflectance`
"""
self["Front Side Diffuse-Diffuse Solar Reflectance"] = value
@property
def back_side_diffusediffuse_solar_reflectance(self):
"""field `Back Side Diffuse-Diffuse Solar Reflectance`
| Used only when Optical Data Type = SpectralAverage
| If this field is autocalculate, then the back diffuse-diffuse solar
| reflectance is automatically estimated from other inputs and used in
| subsequent calculations. If this field is zero or positive, then the value
| entered here will be used. Back side is side closest to indoor air.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Back Side Diffuse-Diffuse Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `back_side_diffusediffuse_solar_reflectance` or None if not set
"""
return self["Back Side Diffuse-Diffuse Solar Reflectance"]
@back_side_diffusediffuse_solar_reflectance.setter
def back_side_diffusediffuse_solar_reflectance(
self,
value="autocalculate"):
""" Corresponds to IDD field `Back Side Diffuse-Diffuse Solar Reflectance`
"""
self["Back Side Diffuse-Diffuse Solar Reflectance"] = value
@property
def diffusediffuse_visible_solar_transmittance(self):
"""field `Diffuse-Diffuse Visible Solar Transmittance`
| Used only when Optical Data Type = SpectralAverage
| This input field is not used currently.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Diffuse-Diffuse Visible Solar Transmittance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `diffusediffuse_visible_solar_transmittance` or None if not set
"""
return self["Diffuse-Diffuse Visible Solar Transmittance"]
@diffusediffuse_visible_solar_transmittance.setter
def diffusediffuse_visible_solar_transmittance(
self,
value="autocalculate"):
""" Corresponds to IDD field `Diffuse-Diffuse Visible Solar Transmittance`
"""
self["Diffuse-Diffuse Visible Solar Transmittance"] = value
@property
def front_side_diffusediffuse_visible_solar_reflectance(self):
"""field `Front Side Diffuse-Diffuse Visible Solar Reflectance`
| Used only when Optical Data Type = SpectralAverage
| This input field is not used currently.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Front Side Diffuse-Diffuse Visible Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `front_side_diffusediffuse_visible_solar_reflectance` or None if not set
"""
return self["Front Side Diffuse-Diffuse Visible Solar Reflectance"]
@front_side_diffusediffuse_visible_solar_reflectance.setter
def front_side_diffusediffuse_visible_solar_reflectance(
self,
value="autocalculate"):
""" Corresponds to IDD field `Front Side Diffuse-Diffuse Visible Solar Reflectance`
"""
self["Front Side Diffuse-Diffuse Visible Solar Reflectance"] = value
@property
def back_side_diffusediffuse_visible_solar_reflectance(self):
"""field `Back Side Diffuse-Diffuse Visible Solar Reflectance`
| Used only when Optical Data Type = SpectralAverage
| This input field is not used currently.
| Units: dimensionless
| Default value: "autocalculate"
| value <= 1.0
Args:
value (float or "Autocalculate"): value for IDD Field `Back Side Diffuse-Diffuse Visible Solar Reflectance`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autocalculate": the value of `back_side_diffusediffuse_visible_solar_reflectance` or None if not set
"""
return self["Back Side Diffuse-Diffuse Visible Solar Reflectance"]
@back_side_diffusediffuse_visible_solar_reflectance.setter
def back_side_diffusediffuse_visible_solar_reflectance(
self,
value="autocalculate"):
""" Corresponds to IDD field `Back Side Diffuse-Diffuse Visible Solar Reflectance`
"""
self["Back Side Diffuse-Diffuse Visible Solar Reflectance"] = value
@property
def infrared_transmittance_applies_to_front_and_back(self):
"""field `Infrared Transmittance (applies to front and back)`
| The long-wave hemispherical transmittance of the glazing.
| Assumed to be the same for both sides of the glazing.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Infrared Transmittance (applies to front and back)`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `infrared_transmittance_applies_to_front_and_back` or None if not set
"""
return self["Infrared Transmittance (applies to front and back)"]
@infrared_transmittance_applies_to_front_and_back.setter
def infrared_transmittance_applies_to_front_and_back(self, value=None):
"""Corresponds to IDD field `Infrared Transmittance (applies to front
and back)`"""
self["Infrared Transmittance (applies to front and back)"] = value
@property
def front_side_infrared_emissivity(self):
"""field `Front Side Infrared Emissivity`
| The front side long-wave hemispherical emissivity of the glazing.
| Units: dimensionless
| Default value: 0.84
| value < 1.0
Args:
value (float): value for IDD Field `Front Side Infrared Emissivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `front_side_infrared_emissivity` or None if not set
"""
return self["Front Side Infrared Emissivity"]
@front_side_infrared_emissivity.setter
def front_side_infrared_emissivity(self, value=0.84):
"""Corresponds to IDD field `Front Side Infrared Emissivity`"""
self["Front Side Infrared Emissivity"] = value
@property
def back_side_infrared_emissivity(self):
"""field `Back Side Infrared Emissivity`
| The back side long-wave hemispherical emissivity of the glazing.
| Units: dimensionless
| Default value: 0.84
| value < 1.0
Args:
value (float): value for IDD Field `Back Side Infrared Emissivity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `back_side_infrared_emissivity` or None if not set
"""
return self["Back Side Infrared Emissivity"]
@back_side_infrared_emissivity.setter
def back_side_infrared_emissivity(self, value=0.84):
"""Corresponds to IDD field `Back Side Infrared Emissivity`"""
self["Back Side Infrared Emissivity"] = value
class WindowMaterialGapEquivalentLayer(DataObject):
""" Corresponds to IDD object `WindowMaterial:Gap:EquivalentLayer`
Gas material properties that are used in Windows Equivalent Layer
References only WindowMaterial:Gas properties
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'gas type',
{'name': u'Gas Type',
'pyname': u'gas_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'AIR',
u'ARGON',
u'KRYPTON',
u'XENON',
u'CUSTOM'],
'autocalculatable': False,
'type': 'alpha'}),
(u'thickness',
{'name': u'Thickness',
'pyname': u'thickness',
'minimum>': 0.0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'gap vent type',
{'name': u'Gap Vent Type',
'pyname': u'gap_vent_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Sealed',
u'VentedIndoor',
u'VentedOutdoor'],
'autocalculatable': False,
'type': 'alpha'}),
(u'conductivity coefficient a',
{'name': u'Conductivity Coefficient A',
'pyname': u'conductivity_coefficient_a',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K'}),
(u'conductivity coefficient b',
{'name': u'Conductivity Coefficient B',
'pyname': u'conductivity_coefficient_b',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K2'}),
(u'conductivity coefficient c',
{'name': u'Conductivity Coefficient C',
'pyname': u'conductivity_coefficient_c',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/m-K3'}),
(u'viscosity coefficient a',
{'name': u'Viscosity Coefficient A',
'pyname': u'viscosity_coefficient_a',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kg/m-s'}),
(u'viscosity coefficient b',
{'name': | |
:param val_c_UnitID: If op_UnitID is specified, this value will be compared to the value in UnitID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UnitID must be specified if op_UnitID is specified.
:type val_c_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Vendor: The operator to apply to the field Vendor. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Vendor: The vendor devices against which to try this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Vendor: If op_Vendor is specified, the field named in this input will be compared to the value in Vendor using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Vendor must be specified if op_Vendor is specified.
:type val_f_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Vendor: If op_Vendor is specified, this value will be compared to the value in Vendor using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Vendor must be specified if op_Vendor is specified.
:type val_c_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` PasswordID
:param sort: The data field(s) to use for sorting the output. Default is PasswordID. Valid values are PasswordID, UnitID, Protocol, Type, Origination, HitCount, Vendor, SNMPAuthProto, SNMPPrivProto, Priority, PasswordSecure, SNMPAuthPWSecure, SNMPPrivPWSecure, SecureVersion.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SNMPCredential. Valid values are PasswordID, UnitID, Protocol, Type, Origination, HitCount, Vendor, SNMPAuthProto, SNMPPrivProto, Priority, PasswordSecure, SNMPAuthPWSecure, SNMPPrivPWSecure, SecureVersion. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return snmp_credentials: An array of the SNMPCredential objects that match the specified input criteria.
:rtype snmp_credentials: Array of SNMPCredential
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified snmp credential.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param PasswordID: The internal NetMRI identifier for this credential.
:type PasswordID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return snmp_credential: The snmp credential identified by the specified PasswordID.
:rtype snmp_credential: SNMPCredential
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def create(self, **kwargs):
"""Creates a new snmp credential.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Origination: Indicates the source and use of the credential. 'User' indicates this is a user-entered password. 'Default' indicates that these are used by the Weak Password issue, and may be modified or removed on upgrade. 'Vendor' indicates a password tested in the Vendor Default Credential Guessing, and may be modified or removed on upgrade. 'Vendor (User Add)' is a user-added vendor default credential.
:type Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param PasswordSecure: (alias Password) Is the community string.
:type PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Priority: The priority order in which to attempt this credential.
:type Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Protocol: The protocol for which to use this password.
:type Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param SNMPAuthPWSecure: (alias SNMPAuthPW) Is the SNMPv3 authentication protocol password.
:type SNMPAuthPWSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param SNMPAuthProto: The SNMPv3 authentication protocol to use with this credential.
:type SNMPAuthProto: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param SNMPPrivPWSecure: (alias SNMPPrivPW) Is the SNMPv3 privacy protocol password.
:type SNMPPrivPWSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param SNMPPrivProto: The SNMPv3 privacy protocol to use with this credential.
:type SNMPPrivProto: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Type: Whether this is a read or read/write community.
:type Type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Vendor: The vendor devices against which to try this credential.
:type Vendor: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created snmp credential.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created snmp credential.
:rtype | |
<gh_stars>0
#---Graph plotting script is written inline in data processing function
#——Author: <NAME>
import re, calendar, csv, datetime #re is for regex
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches #for matching colour of legend and dictionary
from matplotlib.ticker import ScalarFormatter #classes for configuring tick locating and formatting
from datetime import datetime
from datetime import timedelta
from pathlib import Path #Note to self: library Path is capitalized, import path instead of Path would incur error
budget = [ #Change this list manually in code if need arises
180000, #July 2020
360000, #Aug 2020
520000, #Sep 2020
700000, #Oct 2020
900000, #Nov 2020
110000, #Dec 2020
130000, #Jan 2021
70000, #Feb 2021
100000, #Mar 2021
130000, #Apr 2021
100000, #May 2021
800000, #June 2021
]
#---Dictionary for sale person and responsible region mapping
region_dict = {
"Adam":"Europe",
"Betty":"India & MENA",
"Charlie":"China”,
"David":"Japan",
"Elsa":"ANZ",
"Fiona":"North America"
}
all_stages = ["Prospect", "Quote Sent", "Quote Accepted", "Contract in Negotiation", "Contract Executed"]
selection = ['person', 'region', 'stage', 'product']
selection_text = ["0 for sale person", "1 for region", "2 for deal stage", "3 for product category"]
avail_colours = {'ANZ': '#b3e0ff', 'China':'#A72B2A', 'Europe':'#34657F', 'India & MENA': '#D7D2CB', 'Japan': '#382F2D', 'North America':'#A9794D'}
pct_colours = {'ANZ': '#000000', 'China': '#FFFFFF', 'Europe': '#FFFFFF', 'India & MENA': '#000000', 'Japan': '#FFFFFF', 'North America':'#FFFFFF'}
#---On screen display prettifier
space, star, hash, arrow = " ","*", "#", ">"
space, star, hash, arrow = (x *80 for x in [space, star, hash, arrow])
blankline = [""]
params = {'legend.fontsize': 'small',
'legend.labelspacing': 0.35,
'legend.title_fontsize': 'small',
'axes.labelsize': 'small',
'axes.titlesize':'large',
'axes.titleweight':'bold',
'axes.titlecolor':'slategrey',
'font.size': 7,
'figure.facecolor': 'w',
'savefig.dpi': 300,
}
plt.rcParams.update(params)
#Create lists of all months in FY
m1, m2 = list(range(7, 13)), list(range(1, 7))
fy2 = input("\nEnter the Fiscal Year in concern: \n>>")
fy1 = str(int(fy2)-1)
yearmonth = [] #yearmonth list contains all months in this FY in yyyymm format as strings
month_end = [] #month end list contains all month-end-date in this FY in dd-mm-yyyy format as strings
#---Use calendar module to find the last day of each month of this FY
for month in m1: #---Purpose of this is to prevent excel from changing June 2020 to 1/6/2020 by default
mm = str(month).zfill(2) #Zfill-Fill the strings with leading zeros until they are x characters long:
yearmonth.append(fy1 + mm)
findday = calendar.monthrange(int(fy1),int(mm)) #find the last day of the month
me = str(findday[1]) + "-" + str(mm) + "-" + str(fy1)
month_end.append(me)
for month in m2:
mm = str(month).zfill(2)
yearmonth.append(fy2 + str(month).zfill(2))
findday = calendar.monthrange(int(fy2),int(mm))
me = str(findday[1]) + "-" + str(mm) + "-" + str(fy2)
month_end.append(me)
#---Definition of function StackedBarChart
def StackedBarChart(plot_data, title, patches, fc, gc, graph_name):
#---Convert source data to pandas DataFrame
df = pd.DataFrame(data=plot_data)
#---Convert monthend to mmm-yyyy format for axis labels
for ym in yearmonth:
df = df.rename(index={ym: datetime.strptime(str(ym), '%Y%m').strftime('%b %Y')})
fig, axs = plt.subplots(figsize=(7,5))
x = df.index #RangeIndex(start=0, stop=12, step=1)
indexes = np.argsort(df.values).T #.T means tranpose, argsort returns the index in value sorted order
heights = np.sort(df.values).T
order = -1
bottoms = heights[::order].cumsum(axis=0) #bottoms contain the start point in y-axis after each value is added
bottoms = np.insert(bottoms, 0, np.zeros(len(bottoms[0])), axis=0) #add a row contains all zero, numpy.insert(arr, obj, values, axis=None)[source]
for btms, (idxs, vals) in enumerate(list(zip(indexes, heights))[::order]):
mps = np.take(np.array(df.columns), idxs) #df.columns contain the header of the orginal dataframe
#---np.take, return elememts from array along the mentioned axis and indices.
#not sure what mps does need to ask on stackflow
axs.bar(x, height=vals, bottom=bottoms[btms], width=0.65, color=[avail_colours[m] for m in mps])
axs.set_title(title, y=1.01)
axs.legend(handles=patches)
axs.set_facecolor(fc)
axs.grid(axis='y', color=gc)
axs.set_axisbelow(True) #put gridline behind the bars instead of in front of them
axs.set_ylim(bottom=0, top=2200000) #y axis range
axs.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('US${x:,.0f}')) #convert y-axis values to currency format
plt.xticks(rotation=45) #rotate label by 45 deg
fig.subplots_adjust(left=0.145, right=0.949, top=0.863, bottom=0.152)
fig.savefig(graph_name, transparent=False)
#---End of definition of StackedBarChart
#---Function definition of Monthlyaverage
def MonthlyAverage(sc, yearmonth):
output = []
m_total, rwm_total = dict(), dict()
for ym in yearmonth:
m_total[ym], rwm_total[ym] = 0, 0
for rec in sc:
row_monthly = []
edyyyy, sdyyyy, edmm, sdmm = int(rec['edyyyy']), int(rec['sdyyyy']), int(rec['edmm']), int(rec['sdmm'])
for thisym in yearmonth:
thisyear = thisym[0:4]
thismonth = thisym[4:]
if edyyyy < int(thisyear) or (edyyyy == int(thisyear) and edmm < int(thismonth)):#past deals
income, riskincome = 0, 0
elif sdyyyy > int(thisyear) or (sdyyyy == int(thisyear) and sdmm > int(thismonth)): #future deals
income, riskincome = 0, 0
else:
income = rec['spc']
riskincome = rec['rspc']
m_total[thisym] += income
rwm_total[thisym] += riskincome
row_monthly.append(income) #list contains averaged monthly sale from this deal for the entire FY
row_monthly.append(sum(row_monthly)) #total income from this deal in this FY
output.append(row_monthly) #add calculated results to final output by row
global monthly_total, rwmonthly_total
monthly_total, rwmonthly_total = [], []
monthly_total = list(m_total.values())
monthly_total.append(sum(monthly_total)) #monthly total of all deals based on averaged income i.e. vertical total
rwmonthly_total = list(rwm_total.values())
rwmonthly_total.append(sum(rwmonthly_total)) #rw-monthly total of all deals based on averaged rw-income
return output
#End of function by_Monthlyaverage
#---Function definition of MonthlyAveragePrint
def MonthlyAveragePrint(source, m_avg, m_avg_head):
keep_keys = ['id', 'deal', 'person', 'region', 'stage', 'company', 'product', 'quote', 'potential', 'amt', 'likelihood', 'sd', 'ed']
count = 0
m_avg_head.extend(month_end)
m_avg_head.append("FY Total")
output = [m_avg_head]
while count < len(m_avg):
line = []
for key, value in source[count].items(): #get key value pairs of each row
for item in keep_keys: #add value to current row of output if the key is in the keep list
if item == key:
line.append(value)
line.extend(m_avg[count]) #combine data of this row in the source file with calculated monthly averaged sale amount/income
output.append(line) #add this row of output to the list for output
count += 1
return output
#End of function MonthlyAveragePrint
#---Function defintion of ByFiscalYear
def ByFiscalYear(sc):
counter, rwsale, sale = (dict(),dict(),dict())
for year in years_found:
counter[year], rwsale[year], sale[year] = 0, 0, 0
for rec in sc:
if int(rec['sdmm']) < 7:
counter[rec['sdyyyy']] += 1
rwsale[rec['sdyyyy']] += rec['rw_amt']
sale[rec['sdyyyy']] += rec['amt']
else:
counter[str(int(rec['sdyyyy'])+1)] += 1
rwsale[str(int(rec['sdyyyy'])+1)] += rec['rw_amt']
sale[str(int(rec['sdyyyy'])+1)] += rec['amt']
ct, rws, s, avg = (["Count"], ["Risk Weighted$"], ["$"], ["Average deal size"])
for year in years_found:
ct.append(counter[year])
rws.append(rwsale[year])
s.append(sale[year])
if sale[year] != 0:
avg.append(sale[year]/counter[year])
else:
avg.append(0)
title = "By Fiscal Year (All Deals)"
cy_head = years_found[:]
cy_head.insert(0,"")
cy_head.append("Total")
t_ct = sum(ct[1:]) #the first element in list ct is the row header, so we need to slice the list, if we want to get the sum of all elements
t_s = sum(s[1:]) #t_ct is total count, t_s is total sale amount. Defining as new variables as it wil be reused for calculation of avg size of deal
ct.append(t_ct)
rws.append(sum(rws[1:]))
s.append(t_s)
avg.append(t_s/t_ct)
FiscalChart(ct[1:-1], avg[1:-1], t_s, sum(rws[1:-1]), rws[1:-1], s[1:-1], years_found)
output = [blankline, [title], cy_head, ct, rws, s, avg]
return output
#---End of function ByFiscalYear
def FiscalChart(count, average, sum, rw_sum, weighted, non_weighted, years_found):
width = 0.5
list = [weighted, non_weighted]
df = pd.DataFrame(data=list, columns=years_found, index=['Risk Weighted US$', 'Revenue US$']).T
ax = df[['Risk Weighted US$', 'Revenue US$']].plot(kind='bar', width=width, color=['#D7D2CB', '#A9794D'])
ax.set_title('Revenue View by Fiscal Year', pad=15)
ax.legend(loc='best')
ax.grid(axis='y')
ax.set_axisbelow(True) #put gridline behind the bars instead of in front of them
ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('US${x:,.0f}')) #convert y-axis values to currency format
plt.xticks(rotation=45) #rotate label by 45 deg
plt.tight_layout(pad=3, h_pad=None, w_pad=None)
plt.savefig('fiscalyear_view.png')
return
#---Function definition of ByLikelihood
def ByLikelihood(sc, lp, hp):
output = []
low_count, mid_count, high_count = 0, 0, 0
low_sum, mid_sum, high_sum = 0, 0, 0
low_rw_sum, mid_rw_sum, high_rw_sum = 0, 0, 0
for rec in sc:
if rec['likelihood'] < float(lp):
low_count += 1
low_sum += rec['amt']
low_rw_sum += rec['rw_amt']
elif rec['likelihood'] > float(hp):
high_count += 1
high_sum += rec['amt']
high_rw_sum += rec['rw_amt']
else:
mid_count += 1
mid_sum += rec['amt']
mid_rw_sum += rec['rw_amt']
total_count = low_count + mid_count + high_count
total_sum = low_sum + mid_sum + high_sum
total_rw_sum = low_rw_sum + mid_rw_sum + high_rw_sum
#---Formatting of data for output
lper = "<" + str(int(float(lp) * 100)) + "%"
hper = ">" + str(int(float(hp) * 100)) + "%"
mper = str(int(float(lp) * 100)) + "-" + str(int(float(hp) * 100)) + "%"
title = "By Likelihood (All Deals)"
output = [blankline,[title]]
output.append(["", lper, mper, hper, "Total"])
output.append(["Count", low_count, mid_count, high_count, total_count])
output.append(["Risk Weighted$", low_rw_sum, mid_rw_sum, high_rw_sum, total_rw_sum])
output.append(["$", low_sum, mid_sum, high_sum, total_sum])
return output
#---End of function ByLikelihood
#---Function definition of ByDealStage
def ByDealStage(sc):
count_row, rwm_row, m_row = ["Count"],["Risk Weighted$"],["$"] #row header
ds_count, ds_m, ds_rwm = dict(), dict(), dict()
title = "By Deal Stage (All Deals)"
for rec in sc:
stage = rec['stage']
if stage not in | |
return True
return False
class Text(Control):
"""Display some text in the UI."""
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
self.can_focus = False
@property
def text(self):
return self.value
def render(self, app):
return Display(self.value, fg=self.fg, bg=self.bg)
def propagate_focus(ev, controls, layer, keys_back, keys_fwd):
"""Propagate focus events forwards and backwards through a list of controls."""
if ev.type == 'key' and ev.key in keys_back + keys_fwd:
back = ev.key in keys_back
current = ev.app.find_ancestor(ev.last, controls)
if not current:
return False
i = controls.index(current)
while 0 <= i < len(controls) and ev.propagating:
i += -1 if back else 1
if 0 <= i < len(controls) and controls[i].enter_focus(ev.key, ev.app):
ev.stop()
return True
return False
class Panel(Control):
"""Contains other controls vertically, surrounds them with a box."""
def __init__(self, controls, caption=None, under_script=None, **kwargs):
super().__init__(**kwargs)
self.controls = controls
self.caption = caption
self.under_script = under_script
def render(self, app):
return Box(Vertical([c.render(app) for c in self.controls]),
caption=self.caption.render(app) if self.caption else None,
under_script=self.under_script.render(app) if self.under_script else None)
def on_event(self, ev):
propagate_focus(ev, self.controls, ev.app.layer(self),
[curses.KEY_UP, SHIFT_TAB],
[curses.KEY_DOWN, curses.ascii.TAB])
class Stacked(Control):
"""Just lays out other controls vertically, no decoration."""
def __init__(self, controls, **kwargs):
super().__init__(**kwargs)
self.controls = controls
def render(self, app):
return Vertical([c.render(app) for c in self.controls])
def on_event(self, ev):
propagate_focus(ev, self.controls, ev.app.layer(self),
[curses.KEY_UP, SHIFT_TAB],
[curses.KEY_DOWN, curses.ascii.TAB])
class Option(object):
"""Helper class to attach data to a string.
This object contains one value, but stringifies to a user-chosen
string, making it ideal for separating symbolic values from human
representation in selection lists/combo-boxes.
"""
def __init__(self, value, caption=None):
self.value = value
self.caption = caption or str(value)
def __str__(self):
return self.caption
def __eq__(self, other):
if not isinstance(other, Option):
return self.value == other
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
def __repr__(self):
return 'Option(%r, %r)' % (self.value, self.caption)
class Labeled(Control):
"""Applies an offset to a control, fill it with a text label."""
def __init__(self, label, control, **kwargs):
super().__init__(**kwargs)
assert control
self.label = label
self.control = control
def render(self, app):
fg = white if app.contains_focus(self) else green
attr = curses.A_BOLD if app.contains_focus(self) else 0
return Horizontal([Display(self.label, min_width=16, fg=fg, attr=attr),
self.control.render(app)])
def children(self):
return [self.control]
class SelectList(Control):
"""Selection list.
Displays a set of values with an initial selected index, and lets the user
change the selection.
The `selectList.value` property contains the selected value.
"""
def __init__(self, choices, index=0, width=30, height=-1, show_captions_at=0, **kwargs):
super().__init__(**kwargs)
if height == -1:
height = len(choices)
self.choices = choices
self.index = index
self.width = width
self.height = height
self.scroll_offset = max(0, min(self.index, len(self.choices) - height))
self.can_focus = True
self.show_captions_at = show_captions_at
def adjust(self, d):
"""Scroll by the given delta through the options."""
if len(self.choices) > 1:
self.index = (self.index + d + len(self.choices)) % len(self.choices)
self.scroll_offset = min(self.scroll_offset, self.index)
self.scroll_offset = max(self.scroll_offset, self.index - self.height + 1)
def sanitize_index(self):
self.index = min(max(0, self.index), len(self.choices) - 1)
return 0 <= self.index < len(self.choices)
@property
def value(self):
"""Return the currently selected value."""
return get_value(self.choices[self.index])
@value.setter
def value(self, value):
"""Set the currently selected value.
Reset to 0 if not in the list.
"""
self.index = max(0, self.choices.index(value))
def _render_line(self, line, selected):
attr = curses.A_STANDOUT if selected else 0
if self.show_captions_at and isinstance(line, Option):
rem = self.width - self.show_captions_at
return Horizontal([
Display(str(line.value)[:self.show_captions_at], min_width=self.show_captions_at, attr=attr),
Display(str(line.caption)[:rem], min_width=rem, attr=attr, fg=cyan if not selected else white)
])
return Display(line, min_width=self.width, attr=attr)
def render(self, app):
self.sanitize_index()
lines = self.choices[self.scroll_offset:self.scroll_offset + self.height]
lines.extend([''] * (self.height - len(lines)))
self.last_render = Vertical(
[self._render_line(l, i + self.scroll_offset == self.index) for i, l in enumerate(lines)])
# FIXME: Scroll bar
return self.last_render
def on_event(self, ev):
if ev.type == 'key':
change, self.index, self.scroll_offset = handle_scroll_key(ev.key, self.index, len(self.choices),
self.scroll_offset, self.last_render.rect.h)
if change:
ev.stop()
class SelectDate(Control):
"""A Calendar control for selecting a date.
`.value` contains the date as a datetime.datetime.
`.date` contains the date date as a datetime.date.
"""
def __init__(self, value=None, **kwargs):
super().__init__(**kwargs)
self.can_focus = True
self.value = value or datetime.datetime.now()
self.controls = []
def _render_month_cell(self, cell):
if not cell:
return Display('')
attr = 0
if cell == self.value.day:
attr = curses.A_STANDOUT if self.has_focus else curses.A_UNDERLINE
return Display(str(cell), attr=attr)
@property
def date(self):
return self.value.date()
def render(self, app):
self.has_focus = app.contains_focus(self)
cal_data = calendar.monthcalendar(self.value.year, self.value.month)
cal_header = [[Display(t, fg=green) for t in calendar.weekheader(3).split(' ')]]
assert (len(cal_data[0]) == len(cal_header[0]))
cells = [[self._render_month_cell(cell) for cell in row]
for row in cal_data]
month_name = Display('%s, %s' % (self.value.strftime('%B'), self.value.year))
grid = Grid(cal_header + cells, align_right=True)
return Vertical([month_name, grid])
def on_event(self, ev):
if ev.type == 'key':
if ev.what == ord('t'):
self.value = datetime.datetime.now()
ev.stop()
if ev.what == curses.KEY_LEFT:
self.value += datetime.timedelta(days=-1)
ev.stop()
if ev.what == curses.KEY_RIGHT:
self.value += datetime.timedelta(days=1)
ev.stop()
if ev.what == curses.KEY_UP:
self.value += datetime.timedelta(weeks=-1)
ev.stop()
if ev.what == curses.KEY_DOWN:
self.value += datetime.timedelta(weeks=1)
ev.stop()
class Composite(Control):
"""Horizontal composition of other controls."""
def __init__(self, controls, margin=0, **kwargs):
super().__init__(**kwargs)
self.controls = controls
self.margin = margin
def render(self, app):
m = Display(' ' * self.margin)
xs = [c.render(app) for c in self.controls]
rendered = list(itertools.chain(*list(zip(xs, itertools.repeat(m)))))
return Horizontal(rendered)
def on_event(self, ev):
propagate_focus(ev, self.controls, ev.app.layer(self),
[curses.KEY_LEFT, SHIFT_TAB],
[curses.KEY_RIGHT, curses.ascii.TAB])
def _focus_order(self, key):
"""If we enter the control from the bottom, still focus the first element."""
return (reversed
if key in [curses.KEY_LEFT, SHIFT_TAB] else
ident)
class Popup(Control):
"""Show a modal popup that contains another control.
After instantiating this object, call `popup.show(app)`.
The popup is automatically removed when an ENTER or ESC
keypress escapes the focused control, but on_close will
only be called if ENTER was used to remove the popup.
"""
def __init__(self, inner, on_close, x=-1, y=-1, caption='', under_script='', **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.inner = inner
self.on_close = on_close
self.caption = caption
self.under_script = under_script
def render(self, app):
inner = Box(self.inner.render(app),
x_fill=False,
caption=Display(self.caption),
under_script=Display(self.under_script))
if self.x == -1 or self.y == -1:
return Centered(inner)
return Positioned(inner, x=self.x, y=self.y)
def children(self):
return [self.inner]
def show(self, app):
self.layer = app.push_layer(self)
def on_event(self, ev):
if ev.type == 'key':
if ev.key == curses.ascii.ESC:
self.layer.remove()
ev.stop()
if is_enter(ev):
self.on_close(self, ev.app)
self.layer.remove()
ev.stop()
def edit_popup(app, on_close, value='', caption=''):
"""Show a popup with to edit a value."""
Popup(Edit(value=value, min_size=30, fg=cyan), caption=caption, on_close=on_close).show(app)
class Combo(Control):
"""A SelectList in a popup."""
def __init__(self, choices, on_change=None, index=0, **kwargs):
super().__init__(**kwargs)
self._choices = choices
self.on_change = on_change
self.index = index
self.can_focus = True
self.last_combo = None
def sanitize_index(self):
self.index = min(max(0, self.index), len(self.choices) - 1)
return 0 <= self.index < len(self.choices)
@property
def choices(self):
if callable(self._choices):
return self._choices()
return self._choices
@property
def value(self):
if not self.sanitize_index():
return None
return get_value(self.choices[self.index])
@value.setter
def value(self, value):
try:
self.index = max(0, self.choices.index(value))
except ValueError:
self.index = 0
@property
def caption(self):
if not self.sanitize_index():
return '-unset-'
return str(self.choices[self.index])
def render(self, app):
attr = curses.A_STANDOUT if app.contains_focus(self) else 0
self.last_combo = Display(self.caption, attr=attr)
return self.last_combo
def on_event(self, ev):
if ev.type == 'key' and is_enter(ev):
x = max(0, self.last_combo.rect.x - 2)
y = max(0, self.last_combo.rect.y - 1)
Popup(SelectList(self.choices, self.index), self.on_popup_close, x=x, y=y).show(ev.app)
ev.stop()
def on_popup_close(self, popup, app):
if self.index != popup.inner.index:
was = str(self.choices[self.index])
tobe = str(self.choices[popup.inner.index])
self.index = popup.inner.index
if callable(self.on_change):
self.on_change(app, was, tobe)
class Toasty(Control):
def __init__(self, text, duration=datetime.timedelta(seconds=3), border=True, **kwargs):
super().__init__(**kwargs)
self.text = text
self.duration = duration
self.border = border
def render(self, app):
inner = Display(self.text, fg=self.fg)
if self.border:
inner = Box(inner, x_fill=False)
return AlignRight(inner)
def show(self, app):
self.layer = app.push_layer(self, modal=False)
app.enqueue(self.duration, self._done)
def _done(self, app):
self.layer.remove()
class DateCombo(Control):
"""A SelectDate in a popup."""
def __init__(self, value=None, **kwargs):
super().__init__(**kwargs)
self.value = value or datetime.datetime.now()
self.can_focus = True
@property
def date(self):
return self.value.date()
def render(self, app):
attr = curses.A_STANDOUT if app.contains_focus(self) else 0
visual = self.value.strftime('%B %d, %Y')
self.last_combo = Display(visual, attr=attr)
return self.last_combo
def on_event(self, ev):
if ev.type == 'key' and is_enter(ev):
x = max(0, self.last_combo.rect.x - 2)
y = max(0, self.last_combo.rect.y - 1)
Popup(SelectDate(self.value), self.on_popup_close, x=x, y=y).show(ev.app)
ev.stop()
def on_popup_close(self, popup, app):
self.value = popup.inner.value
class Time(Composite):
"""A time selection control."""
def __init__(self, value=None, **kwargs):
self.value = value or datetime.datetime.utcnow().time()
now_h = self.value.strftime('%H')
now_m = '%02d' % (int(self.value.minute / 5) * 5)
hours = ['%02d' % h for h in range(0, 24)]
minutes = ['%02d' % m for m | |
# -*- coding: utf-8 -*-
"""
This enables to parameterize the contributivity measurements to be performed.
"""
from __future__ import print_function
import bisect
import datetime
from itertools import combinations
from math import factorial
from timeit import default_timer as timer
import numpy as np
from loguru import logger
from scipy.stats import norm
from sklearn.linear_model import LinearRegression
from . import multi_partner_learning, constants
class KrigingModel:
def __init__(self, degre, covariance_func):
self.X = np.array([[]])
self.Y = np.array([[]])
self.cov_f = covariance_func
self.degre = degre
self.beta = np.array([[]])
self.H = np.array([[]])
self.K = np.array([[]])
self.invK = np.array([[]])
def fit(self, X, Y):
self.X = X
self.Y = Y
K = np.zeros((len(X), len(X)))
H = np.zeros((len(X), self.degre + 1))
for i, d in enumerate(X):
for j, b in enumerate(X):
K[i, j] = self.cov_f(d, b)
for j in range(self.degre + 1):
H[i, j] = np.sum(d) ** j
self.H = H
self.K = np.linalg.inv(K)
self.invK = np.linalg.inv(K)
Ht_invK_H = H.transpose().dot(self.invK).dot(H)
self.beta = np.linalg.inv(Ht_invK_H).dot(H.transpose()).dot(self.invK).dot(self.Y)
def predict(self, x):
gx = []
for i in range(self.degre + 1):
gx.append(np.sum(x) ** i)
gx = np.array(gx)
cx = []
for i in range(len(self.X)):
cx.append([self.cov_f(self.X[i], x)])
cx = np.array(cx)
pred = gx.transpose().dot(self.beta) + cx.transpose().dot(self.invK).dot(
self.Y - self.H.dot(self.beta)
)
return pred
class Contributivity:
def __init__(self, scenario, name=""):
self.name = name
self.scenario = scenario
nb_partners = len(self.scenario.partners_list)
self.contributivity_scores = np.zeros(nb_partners)
self.scores_std = np.zeros(nb_partners)
self.normalized_scores = np.zeros(nb_partners)
self.computation_time_sec = 0.0
self.first_charac_fct_calls_count = 0
self.charac_fct_values = {(): 0}
self.increments_values = [{} for _ in self.scenario.partners_list]
def __str__(self):
computation_time_sec = str(datetime.timedelta(seconds=self.computation_time_sec))
output = "\n" + self.name + "\n"
output += "Computation time: " + computation_time_sec + "\n"
output += (
"Number of characteristic function computed: "
+ str(self.first_charac_fct_calls_count)
+ "\n"
)
output += f"Contributivity scores: {np.round(self.contributivity_scores, 3)}\n"
output += f"Std of the contributivity scores: {np.round(self.scores_std, 3)}\n"
output += f"Normalized contributivity scores: {np.round(self.normalized_scores, 3)}\n"
return output
def not_twice_characteristic(self, subset):
if len(subset) > 0:
subset = np.sort(subset)
if tuple(subset) not in self.charac_fct_values:
# Characteristic_func(permut) has not been computed yet...
# ... so we compute, store, and return characteristic_func(permut)
self.first_charac_fct_calls_count += 1
small_partners_list = np.array([self.scenario.partners_list[i] for i in subset])
if len(small_partners_list) > 1:
mpl = self.scenario._multi_partner_learning_approach(self.scenario,
partners_list=small_partners_list,
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
else:
mpl = multi_partner_learning.SinglePartnerLearning(self.scenario,
partner=small_partners_list[0],
is_early_stopping=True,
save_folder=None,
**self.scenario.mpl_kwargs
)
mpl.fit()
self.charac_fct_values[tuple(subset)] = mpl.history.score
# we add the new increments
for i in range(len(self.scenario.partners_list)):
if i in subset:
subset_without_i = np.delete(subset, np.argwhere(subset == i))
if (
tuple(subset_without_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset_without_i)] = (
self.charac_fct_values[tuple(subset)]
- self.charac_fct_values[tuple(subset_without_i)]
)
else:
subset_with_i = np.sort(np.append(subset, i))
if (
tuple(subset_with_i) in self.charac_fct_values
): # we store the new known increments
self.increments_values[i][tuple(subset)] = (
self.charac_fct_values[tuple(subset_with_i)]
- self.charac_fct_values[tuple(subset)]
)
# else we will Return the characteristic_func(permut) that was already computed
return self.charac_fct_values[tuple(subset)]
# %% Generalization of Shapley Value computation
def compute_SV(self):
start = timer()
logger.info("# Launching computation of Shapley Value of all partners")
# Initialize list of all players (partners) indexes
partners_count = len(self.scenario.partners_list)
partners_idx = np.arange(partners_count)
# Define all possible coalitions of players
coalitions = [
list(j) for i in range(len(partners_idx)) for j in combinations(partners_idx, i + 1)
]
# For each coalition, obtain value of characteristic function...
# ... i.e.: train and evaluate model on partners part of the given coalition
characteristic_function = []
for coalition in coalitions:
characteristic_function.append(self.not_twice_characteristic(coalition))
# Compute Shapley Value for each partner
# We are using this python implementation: https://github.com/susobhang70/shapley_value
# It requires coalitions to be ordered - see README of https://github.com/susobhang70/shapley_value
list_shapley_value = shapley_value(partners_count, characteristic_function)
# Return SV of each partner
self.name = "Shapley"
self.contributivity_scores = np.array(list_shapley_value)
self.scores_std = np.zeros(len(list_shapley_value))
self.normalized_scores = list_shapley_value / np.sum(list_shapley_value)
end = timer()
self.computation_time_sec = end - start
# %% compute independent raw scores
def compute_independent_scores(self):
start = timer()
logger.info(
"# Launching computation of perf. scores of models trained independently on each partner"
)
# Initialize a list of performance scores
performance_scores = []
# Train models independently on each partner and append perf. score to list of perf. scores
for i in range(len(self.scenario.partners_list)):
performance_scores.append(self.not_twice_characteristic(np.array([i])))
self.name = "Independent scores raw"
self.contributivity_scores = np.array(performance_scores)
self.scores_std = np.zeros(len(performance_scores))
self.normalized_scores = performance_scores / np.sum(performance_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method
def truncated_MC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and
a characteristic function using the truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "<NAME>"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
# Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
while (
t < 100 or t < q ** 2 * v_max / sv_accuracy ** 2
):
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
char_partnerlists[j + 1] = char_partnerlists[j]
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "TMC Shapley"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# %% compute Shapley values with the truncated Monte-carlo method with a small bias correction
def interpol_TMC(self, sv_accuracy=0.01, alpha=0.9, truncation=0.05):
"""Return the vector of approximated Shapley value corresponding to a list of partner and a characteristic
function using the interpolated truncated monte-carlo method."""
start = timer()
n = len(self.scenario.partners_list)
# Characteristic function on all partners
characteristic_all_partners = self.not_twice_characteristic(np.arange(n))
if n == 1:
self.name = "ITMCS"
self.contributivity_scores = np.array([characteristic_all_partners])
self.scores_std = np.array([0])
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
else:
contributions = np.array([[]])
permutation = np.zeros(n) # Store the current permutation
t = 0
q = norm.ppf((1 - alpha) / 2, loc=0, scale=1)
v_max = 0
while (
t < 100 or t < q ** 2 * v_max / (sv_accuracy) ** 2
): # Check if the length of the confidence interval
# is below the value of sv_accuracy*characteristic_all_partners
t += 1
if t == 1:
contributions = np.array([np.zeros(n)])
else:
contributions = np.vstack((contributions, np.zeros(n)))
permutation = np.random.permutation(n) # Store the current permutation
char_partnerlists = np.zeros(
n + 1
) # Store the characteristic function on each ensemble built with the first elements of the permutation
char_partnerlists[-1] = characteristic_all_partners
first = True
for j in range(n):
# here we suppose the characteristic function is 0 for the empty set
if abs(characteristic_all_partners - char_partnerlists[j]) < truncation:
if first:
size_of_rest = 0
for i in range(j, n):
size_of_rest += len(self.scenario.partners_list[i].y_train)
a = (characteristic_all_partners - char_partnerlists[j]) / size_of_rest
first = False
size_of_S = len(self.scenario.partners_list[j].y_train)
char_partnerlists[j + 1] = char_partnerlists[j] + a * size_of_S
else:
char_partnerlists[j + 1] = self.not_twice_characteristic(
permutation[: j + 1]
)
contributions[-1][permutation[j]] = (
char_partnerlists[j + 1] - char_partnerlists[j]
)
v_max = np.max(np.var(contributions, axis=0))
sv = np.mean(contributions, axis=0)
self.name = "ITMCS"
self.contributivity_scores = sv
self.scores_std = np.std(contributions, axis=0) / np.sqrt(t - 1)
self.normalized_scores = self.contributivity_scores / np.sum(self.contributivity_scores)
end = timer()
self.computation_time_sec = end - start
# # %% compute Shapley values with the importance sampling method
def IS_lin(self, sv_accuracy=0.01, alpha=0.95):
"""Return the vector of approximated Shapley value corresponding to a list of partner and \
a characteristic function using the importance sampling method and a linear interpolation model."""
start = timer()
n = | |
interaction",
u"detector: fabrication",
u"gravitation: spinor",
u"vector meson: decay modes",
u"velocity: expansion",
u"pi: density",
u"gauge field theory: higher-dimensional",
u"p p",
u"chargino: exchange",
u"B-: rare decay",
u"nonpolynomial",
u"n nucleus: elastic scattering",
u"space-time: complex",
u"Grenoble ESRF",
u"J/psi(3100): mass",
u"dark matter: spatial distribution",
u"Lambda/c+: electroproduction",
u"pi1(1600)",
u"perturbation theory: nonlinear",
u"black hole: deformation",
u"electromagnetic interaction: effect",
u"D*(2010): pair production",
u"star: orbit",
u"calorimeter: satellite",
u"baryon: hybrid",
u"cross section: helicity",
u"gravitation: Einstein-Hilbert",
u"fermion: quasiparticle",
u"entropy: Hawking",
u"neutrino: momentum",
u"damage: time dependence",
u"neutrino p: deep inelastic scattering",
u"superconductivity: string",
u"tau: jet",
u"star: velocity",
u"approximation: thermodynamical",
u"Delta(1232): propagator",
u"graviton: absorption",
u"pressure: transverse",
u"astrophysics: wave function",
u"electron: interaction",
u"bottom: width",
u"invariance: SU(2)",
u"fermion: Kaluza-Klein",
u"sum rule: momentum",
u"analyzing power: angular dependence",
u"sphericity",
u"alignment: error",
u"mass spectrum: discrete",
u"spinor: chiral",
u"surface: effect",
u"psi(3685): width",
u"event shape analysis: planarity",
u"dilaton: massless",
u"eta: production",
u"operator: lattice",
u"coupling constant: pseudoscalar",
u"electroweak interaction: dynamical symmetry breaking",
u"scaling: transformation",
u"meson: coupling constant",
u"gluon: constituent",
u"DIRAC",
u"geometry: Euclidean",
u"recoil: polarization",
u"parity: spontaneous symmetry breaking",
u"attractor: de Sitter",
u"fluid: collapse",
u"stau: LSP",
u"algebra: SL(2,R)",
u"dispersion relation: linear",
u"galaxy: density",
u"iodine",
u"quantum mechanics: Yang-Mills",
u"W-: hadroproduction",
u"momentum: finite",
u"baryon: mass formula",
u"Z0: final state",
u"background field: tensor",
u"quark antiquark: elastic scattering",
u"trigger: upgrade",
u"photofission",
u"fluctuation: random",
u"three-body problem: relativistic",
u"chromaticity",
u"solution: semiclassical",
u"magnetic field: dependence",
u"charged particle: capture",
u"neutrino: mass ratio",
u"algebra: Moyal",
u"WIMP nucleus: inelastic scattering",
u"photon: time delay",
u"Gross-Neveu model: chiral",
u"hypernucleus: binding energy",
u"space: S(N)",
u"charge conjugation: operator",
u"symmetry breaking: SU(2) x U(1)",
u"quasiparticle: width",
u"tau: lifetime",
u"hypernucleus: lifetime",
u"soliton: charge",
u"Beltrami",
u"emission",
u"beam transport: colliding beams",
u"muon-: pair production",
u"neodymium",
u"meson resonance: mass spectrum",
u"slepton: mixing",
u"neutrino/tau: mass",
u"J/psi(3100): decay",
u"jet: hadronization",
u"model: kinetic",
u"quantum gravity: fluctuation",
u"beam-beam: interaction",
u"gluon: gas",
u"zirconium",
u"conservation law: strangeness",
u"Drinfeld double",
u"parton: elastic scattering",
u"symmetry: SO(8)",
u"space-time: boundary condition",
u"Higgs particle: exclusive production",
u"curvature: induced",
u"pi: superfluid",
u"Skyrme model: SU(2)",
u"charmed particle: decay",
u"potential: chiral",
u"bolometer: crystal",
u"energy: gravitation",
u"deuterium: mesic atom",
u"matter: viscosity",
u"dissociation: temperature",
u"B*(5320)",
u"Klein-Gordon equation: massless",
u"Kerr-Newman",
u"nucleus: magnetic moment",
u"two-point function: scalar",
u"axion: flux",
u"tracks: resolution",
u"laser: polarization",
u"K+: width",
u"cosmological constant: decay",
u"charmonium: hybrid",
u"renormalization: algebra",
u"photon deuteron: Compton scattering",
u"LSP: production",
u"charge: correlation function",
u"energy: decay",
u"hadron: energy",
u"parton: momentum",
u"total cross section: correction",
u"atom: transition",
u"lepton: universality: violation",
u"sfermion: mixing angle",
u"multiplicity: scaling",
u"antideuteron: cosmic radiation",
u"neutrino p: inclusive reaction",
u"rho(770): form factor",
u"inflaton: axion",
u"lepton: photoproduction",
u"B0 anti-B0",
u"astrophysics: matter",
u"baryon: density: high",
u"gauge field theory: SO(5)",
u"cavity: microwaves",
u"space-time: oscillation",
u"halo: formation",
u"graviton: background",
u"hadron: particle identification",
u"charmed particle: photoproduction",
u"differential cross section: rapidity",
u"charged particle: angular distribution",
u"pi nucleon: coupling",
u"beam: energy loss",
u"rho(770): photoproduction",
u"channel cross section: transverse momentum",
u"photon: interference",
u"pi: correlation function",
u"quantum gravity: validity test",
u"Type I",
u"charged particle: heavy",
u"muon: acceleration",
u"B: B-parameter",
u"approximation: fluid",
u"symmetry breaking: SU(4)",
u"solenoid: design",
u"boson: heavy",
u"model: triplet",
u"pi0: multiple production",
u"baryon resonance: decay",
u"n: superfluid",
u"heavy quark: diffusion",
u"supersymmetry: 5",
u"resolution: impact parameter",
u"plasma: expansion",
u"field theory: Yang-Mills",
u"neutrino antineutrino: asymmetry",
u"Pauli-Villars",
u"radioactivity: induced",
u"length",
u"electron: photoproduction",
u"algebra: Yangian",
u"horizontal symmetry: SU(3)",
u"curvaton: mass",
u"antineutrino: mass difference",
u"membrane: rotation",
u"ion: capture",
u"helium: irradiation",
u"GNO",
u"foam",
u"top: right-handed",
u"invariance: relativistic",
u"X-ray: background",
u"attenuation: length",
u"f0(600): propagator",
u"model: weak interaction",
u"wormhole: mass",
u"neural network: Bayesian",
u"neutralino p: elastic scattering",
u"bound state: mass spectrum",
u"polarization: angular dependence",
u"pi: charge radius",
u"D anti-D: molecule",
u"asymmetry: Collins",
u"spectrum: x-dependence",
u"K- p: exclusive reaction",
u"interpretation of experiments: PHOBOS",
u"Goldstone particle: multiplet",
u"B/s: excited state",
u"interferometer: sensitivity",
u"calorimeter: imaging",
u"omega(783): electroproduction",
u"flux: U(1)",
u"string: bound state",
u"scattering amplitude: eikonal",
u"anti-B0: width",
u"electron: annihilation",
u"scalar particle: composite",
u"hydrodynamics: nonlinear",
u"family: 1",
u"interference: Bethe-Heitler",
u"f0(980): model",
u"muon nucleus: interaction",
u"K0(S): radiative decay",
u"mass spectrum: moment",
u"atmosphere: monitoring",
u"Hartree approximation: relativistic",
u"Darmstadt Lab",
u"Laplace transformation",
u"turbulence: hydrodynamics",
u"pi+ p: interaction",
u"p-brane: 8",
u"baryon: coupling",
u"algebra: lattice",
u"pi: charge",
u"saxion: decay",
u"anomaly: effect",
u"dilaton: massive",
u"Higgs particle: decoupling",
u"hyperon: coupling",
u"cosmic radiation: atmosphere",
u"Hamiltonian formalism: light front",
u"Uppsala CELSIUS Stor",
u"gauge field theory: nonlinear",
u"postulated particle: lifetime",
u"cosmic radiation: temperature",
u"positronium: decay",
u"scattering amplitude: singularity",
u"pi- p: interaction",
u"detector: alignment",
u"particle: mechanics",
u"tau: showers",
u"color spin locked phase",
u"symmetry breaking: SO(3)",
u"transverse energy: density",
u"nucleon nucleon: bremsstrahlung",
u"factorization: approximation",
u"invariance: SL(2,R)",
u"duality: cascade",
u"matrix model: Yang-Mills",
u"p: injection",
u"entropy: ratio",
u"Z0: penguin",
u"gluon: current",
u"cavity: rotation",
u"Antilambda: hadroproduction",
u"stop: NLSP",
u"soliton: string",
u"billiard",
u"wire: quantum",
u"inflation: tachyon",
u"J/psi(3100): elliptic flow",
u"quantum group: SL(2)",
u"dark energy: anisotropy",
u"background: thermal",
u"strong interaction: new interaction",
u"coupling: magnetic",
u"dimension: space-time",
u"plasma: model",
u"dimension: dependence",
u"flavor: universality",
u"lead: target",
u"field equations: nonlocal",
u"pressure: high",
u"gravitational radiation: effect",
u"effect: surface",
u"electric moment: multipole",
u"Sunyaev-Zel'dovich effect",
u"meson nucleon: coupling",
u"bottom meson: charmed meson",
u"K0: branching ratio",
u"particle: exchange",
u"symmetry breaking: SU(5)",
u"entropy: density: ratio",
u"rubidium",
u"Skyrmion: mass",
u"gravitation: metric",
u"defect: formation",
u"quantum mechanics: scattering",
u"strangeness: hadroproduction",
u"field theory: coupling",
u"Z': effect",
u"momentum: difference",
u"mass: position dependence",
u"CELLO",
u"fibre bundle: spin",
u"spin: 4",
u"neutrino: superfield",
u"muon nucleus: deep inelastic scattering",
u"orbit: circle",
u"gravitation: magnetic field",
u"SO(32)",
u"gravitational radiation: shock waves",
u"effect: anisotropy",
u"field theory: geometrical",
u"orientifold: torus",
u"time: operator",
u"quantum electrodynamics: perturbation theory",
u"quark: magnetic moment",
u"meson resonance: exotic",
u"potential: relativistic",
u"bound state: pole",
u"particle: energy spectrum",
u"pseudoscalar particle: mass",
u"decoherence: time",
u"space: embedding",
u"tachyon: field theory",
u"ion: cosmic radiation",
u"dimension: fractional",
u"monitoring: gas",
u"stop: decay modes",
u"Upsilon(9460): photoproduction",
u"decay: vertex",
u"bottom: production",
u"group: affine",
u"antimatter: asymmetry",
u"interaction: pseudoscalar",
u"eta/c(2980): associated production",
u"resonance: cavity",
u"algebra: E(11)",
u"quantum mechanics: validity test",
u"operator: Weinberg",
u"nucleon: three-body problem",
u"magnetic monopole: nonabelian",
u"algebra: Becchi-Rouet-Stora",
u"Higgsino: NLSP",
u"neutrino: anisotropy",
u"space-time: Nariai",
u"Sp(4)",
u"Sigma+",
u"Sigma0",
u"charge: confinement",
u"fermion: neutral particle",
u"magnetic monopole: density",
u"photon: angular momentum",
u"Kerr-Schild",
u"Klebanov-Strassler model",
u"transition: Hagedorn",
u"photon: statistics",
u"DESY XFEL",
u"pi0: leptonic decay",
u"particle: propagation",
u"symmetry: SU(1,1)",
u"quantum chromodynamics: bound state",
u"antinucleus",
u"correlation: two-photon",
u"space-time: causality",
u"SO(12)",
u"geometry: induced",
u"Nahm transformation",
u"p n",
u"renormalon: infrared",
u"operator: Weyl",
u"Upsilon(10020): radiative decay",
u"cross section: dissociation",
u"helium: energy spectrum",
u"dark matter: strong interaction",
u"B-: leptonic decay",
u"gauge field theory: discrete",
u"membrane model: higher-dimensional",
u"model: Veneziano",
u"holonomy: flux",
u"K: potential",
u"graviton: polarization",
u"symmetry: SU(3) x SU(3) x U(1)",
u"selenium",
u"resonance: exotic",
u"orientifold: Z(2) x Z(2)",
u"sneutrino: leptonic decay",
u"effect: viscosity",
u"pi: massless",
u"particle: wave function",
u"quark: scattering",
u"Z': associated production",
u"Y(2175)",
u"transition: photon axion",
u"electron: structure function",
u"saxion",
u"Pauli principle: violation",
u"electron electron: scattering",
u"neutrino: showers",
u"Sigma(1385): hadronic decay",
u"J/psi(3100): momentum spectrum",
u"zinc",
u"lepton nucleon: exclusive reaction",
u"D/s1(2536)",
u"gluino: effect",
u"neutrino nucleus",
u"p: leading particle",
u"quantum algebra: representation",
u"defect: integrability",
u"fixed point: orbifold",
u"electric field: static",
u"pi nucleon: inclusive reaction",
u"black hole: bound state",
u"meson nucleon: elastic scattering",
u"Sigma+: semileptonic decay",
u"electron: bremsstrahlung",
u"eta/c(3590): hadronic decay",
u"Z(3930)",
u"microwaves: coupling",
u"cosmological model: oscillation",
u"KASCADE",
u"proton synchrotron: booster",
u"tau: neutrinoproduction",
u"lepton: mass: hierarchy",
u"B/c: rare decay",
u"tunneling: effect",
u"charmonium: decay modes",
u"potential: D-term",
u"nucleus: scattering",
u"pi: fragmentation function",
u"quark: Regge",
u"tungsten: target",
u"invariance: SL(2,C)",
u"inflaton: trajectory",
u"K anti-K: molecule",
u"perturbation theory: string",
u"anti-K: condensation",
u"W W: elastic scattering",
u"Dirac equation: massive",
u"Los Alamos Lab",
u"interaction: topological",
u"magnetic monopole: flux",
u"K nucleon: scattering amplitude",
u"NA60",
u"W: photoproduction",
u"non-Gaussianity: scale dependence",
u"algebra: Calabi-Yau",
u"form factor: calculated",
u"horizon: mass",
u"correction: screening",
u"energy levels: width",
u"atom: ionization",
u"charmed particle: production",
u"fibre bundle: torus",
u"interpretation of experiments: Juelich COSY PS",
u"approximation: diffusion",
u"drift chamber: liquid argon",
u"meson: oscillation",
u"polarization: monitoring",
u"fermion: string model",
u"Delta(1232): magnetic moment",
u"dimuon: charge: asymmetry",
u"charmed particle: decay modes",
u"gluon: fluctuation",
u"mechanics: relativistic",
u"neutralino p: interaction",
u"gamma ray: pulsed",
u"operator: higher-order",
u"approximation: effective range",
u"space-time: Kaluza-Klein",
u"Dalitz plot: slope",
u"neutrino: associated production",
u"quantum chromodynamics: weak coupling",
u"gravitation: emission",
u"neutrino: bremsstrahlung",
u"geometry: thermodynamical",
u"radiation: absorption",
u"coupling: energy dependence",
u"flavor: correlation",
u"algebra: SU(4)",
u"scale: transformation",
u"superpotential: coupling",
u"deep inelastic scattering: neutral current",
u"O(N,N)",
u"wave function: Dirac",
u"neon: nuclide",
u"electron: synchrotron radiation",
u"correction: semiclassical",
u"rho(770)0: propagator",
u"colliding beam detector: proposed",
u"plasma: formation",
u"lepton: left-handed",
u"gluon: scalar",
u"inflation: de Sitter",
u"fermion: pole",
u"cosmic radiation: absorption",
u"nucleon: distribution | |
means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c_b(sememe_h)))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float().cuda(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class BILSTM_cell_baseline(nn.Module):
def __init__(self, config):
super(BILSTM_cell_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememesumlstm = SememeSumLstm(512, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(512, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c)
c = torch.mul(i, u) + fc + fc_s#sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h) + self.ious_b(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
f_s_b = torch.sigmoid(
self.fs_b(sememe_h) + self.fx_s_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s_b, sememe_c)
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_vecs, device):
# hx: (child_c, child_h)
sememe_c, sememe_h = self.sememe_sum(sememe_vecs)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).to(device)
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
sememe_c = sememe_c.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).to(device)
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_c[time, 0:pack_length[time]], sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = device)], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_c[max_time-time-1, 0:pack_length[max_time-time-1]], sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = device)], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).to(device)], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).to(device)], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).to(device)
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
# emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(input_s[i].float())
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
class BILSTM_cell(nn.Module):
def __init__(self, config):
super(BILSTM_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c)
c = torch.mul(i, u) + fc + fc_s#sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h) + self.ious_b(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
f_s_b = torch.sigmoid(
self.fs_b(sememe_h) + self.fx_s_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s_b, sememe_c)
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_c, sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
sememe_c = sememe_c.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_c[time, 0:pack_length[time]], sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_c[max_time-time-1, 0:pack_length[max_time-time-1]], sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = | |
obj['canFillWithLiquid'] and not obj['isFilledWithLiquid']:
empty_objects.add(obj['objectId'])
empty_object_types.add(obj['objectType'])
#
# for ot in empty_object_types:
# env.controller.step(action='SetObjectStates', SetObjectStates={
# 'objectType': ot,
# 'stateChange': 'canFillWithLiquid',
# 'isFilledWithLiquid': True,
# })
for obj in empty_objects:
liquid = random.choice(['coffee', 'wine', 'water'])
env.controller.step({'action': 'FillObjectWithLiquid', 'objectId': obj, 'fillLiquid': liquid,
'forceVisible': True})
def close_everything(env: AI2ThorEnvironment):
object_types_open = set()
for obj in env.all_objects():
if obj['openable'] and obj['isOpen']:
object_types_open.add(obj['objectType'])
for x in object_types_open:
env.controller.step(action='SetObjectStates', SetObjectStates={
'objectType': x,
'stateChange': 'openable',
'isOpen': False,
})
def retract_hand(env: AI2ThorEnvironment):
"""
Move the hand back so it's not super visible. this will be reset.
:param env:
:return:
"""
oih = env.object_in_hand()
if oih is None:
return
# current_rotation = clip_angle_to_nearest(env.get_agent_location()['rotation'])
env.controller.step(action='MoveHandDown', moveMagnitude=0.2)
env.controller.step(action='MoveHandBack', moveMagnitude=0.1)
def group_knobs_and_burners(env: AI2ThorEnvironment):
"""
We'll group together stoves and knobs that go together
:param env:
:return:
"""
stoveburners = {x['name']: x for x in env.all_objects_with_properties({'objectType': 'StoveBurner'})}
knobs = {x['name']: x for x in env.all_objects_with_properties({'objectType': 'StoveKnob'})}
knobs_and_burners = []
for k, v in knobs.items():
if k not in KNOB_TO_BURNER:
print(f"{k} not found???")
continue
bn = KNOB_TO_BURNER[k]
if bn in stoveburners:
knobs_and_burners.append((v, stoveburners[bn]))
else:
print("Oh no burner {} not found?".format(bn), flush=True)
return knobs_and_burners
class RecordingEnv(object):
"""
Record
# this class needs to be able to record the path:
# 1. actions
# 2. images
# 3. bounding boxes, including object attrs
"""
def __init__(self, env: AI2ThorEnvironment, text='', main_object_ids=(), ):
self.env = env
# Things we're recording
# "inputs"
self.meta_info = None
self.frames = []
self.object_id_to_states = {} # objid -> {t} -> obj. Default prior is that states don't change
self.bboxes = []
self.agent_states = []
# "outputs"
self.output_actions = []
self.output_action_results = []
self.alias_object_id_to_old_object_id = {}
self.meta_info = {
'scene_name': self.env.scene_name,
'text': text,
'main_object_ids': main_object_ids,
'width': env._start_player_screen_width,
'height': env._start_player_screen_height,
}
self.log_observations()
def __len__(self):
return len(self.frames)
@property
def new_items(self):
return {k: v[len(self) - 1] for k, v in self.object_id_to_states.items() if (len(self) - 1) in v}
def step(self, action_dict, action_can_fail=False, record_failure=True):
"""
:param action_dict:
:param action_can_fail: Whether we're OK with the action failing
:return:
"""
res = self.env.step(action_dict)
if (not self.env.last_action_success) and (not action_can_fail):
raise ValueError("Action {} failed, {}".format(action_dict, self.env.last_event.metadata['errorMessage']))
if (not record_failure) and (not self.env.last_action_success):
return False
self.output_actions.append(action_dict)
self.output_action_results.append({
'action_success': self.env.last_action_success,
'action_err_msg': self.env.last_event.metadata['errorMessage'],
})
self.log_observations()
return self.env.last_action_success
def log_observations(self):
t = len(self.frames)
# frames
self.frames.append(self.env.last_event.frame)
# Update object states
for obj in self.env.all_objects():
# We must have added a new object
if obj['objectId'] not in self.object_id_to_states:
# if (t > 0) and (not 'Slice' in obj['name']):
# import ipdb
# ipdb.set_trace()
self.object_id_to_states[obj['objectId']] = {t: obj}
else:
last_t = sorted(self.object_id_to_states[obj['objectId']].keys())[-1]
# Object changed
if not objects_equal(self.object_id_to_states[obj['objectId']][last_t], obj):
self.object_id_to_states[obj['objectId']][t] = obj
# Bounding boxes
self.bboxes.append({k: v.tolist() for k, v in self.env.last_event.instance_detections2D.items()})
self.agent_states.append(self.env.get_agent_location())
def save(self, fn='temp.h5'):
writer = GCSH5Writer(fn)
# Get object
object_ids = sorted(self.object_id_to_states)
object_id_to_ind = {id: i for i, id in enumerate(object_ids)}
# No need to store identities
for k, v in sorted(self.alias_object_id_to_old_object_id.items()):
if k == v:
self.alias_object_id_to_old_object_id.pop(k)
writer.create_dataset('alias_object_id_to_old_object_id',
data=np.string_(json.dumps(self.alias_object_id_to_old_object_id).encode('utf-8')))
writer.create_dataset('object_ids', np.string_(object_ids))
writer.create_dataset('frames', data=np.stack(self.frames), compression='gzip', compression_opts=9)
# Get bounding box -> ind
# [ind, box_x1, box_y1, box_x2, box_y2]
bbox_group = writer.create_group('bboxes')
for t, bbox_dict_t in enumerate(self.bboxes):
# NOTE: This filters out some objects.
bbox_list = [[object_id_to_ind[k]] + v for k, v in bbox_dict_t.items() if k in object_id_to_ind]
bbox_array = np.array(sorted(bbox_list, key=lambda x: x[0]), dtype=np.uint16) ###### oh no I was saving this as uint8 earlier and things were clipping. UGH
bbox_group.create_dataset(f'{t}', data=bbox_array)
# Position / Rotation / center / size
xyz_coords = writer.create_group('pos3d')
for k in sorted(self.object_id_to_states):
xyz_coords_k = xyz_coords.create_group(k)
for t in sorted(self.object_id_to_states[k]):
coords_to_use = [self.object_id_to_states[k][t].pop('position'),
self.object_id_to_states[k][t].pop('rotation'),
self.object_id_to_states[k][t]['axisAlignedBoundingBox'].pop('center'),
self.object_id_to_states[k][t]['axisAlignedBoundingBox'].pop('size')]
xyz_coords_kt_np = np.array([[p[k2] for k2 in 'xyz'] for p in coords_to_use], dtype=np.float32)
xyz_coords_k.create_dataset(f'{t}', data=xyz_coords_kt_np)
self.object_id_to_states[k][t].pop('axisAlignedBoundingBox')
self.object_id_to_states[k][t].pop('objectOrientedBoundingBox')
# print("{} total object things".format(sum([len(v) for v in self.object_id_to_states.values()])))
writer.create_dataset('object_id_to_states', data=np.string_(
json.dumps(self.object_id_to_states).encode('utf-8'),
))
writer.create_dataset('agent_states', data=np.array([[s[k] for k in ['x', 'y', 'z', 'rotation', 'horizon']]
for s in self.agent_states], dtype=np.float32))
writer.create_dataset('output_actions', data=np.string_(json.dumps(self.output_actions).encode('utf-8')))
writer.create_dataset('output_action_results',
data=np.string_(json.dumps(self.output_action_results).encode('utf-8')))
writer.create_dataset('meta_info', data=np.string_(json.dumps(self.meta_info).encode('utf-8')))
writer.close()
#############################################################################
def inch_closer_path(renv: RecordingEnv, target_receptacle):
"""
Assuming the receptacle is open, scoot a bit closer. Keep existing horizon and rotation.
:param env:
:param target_receptacle:
:return:
"""
env.recompute_reachable()
reachable_pts = renv.env.currently_reachable_points
start_location = renv.env.get_agent_location()
start_location['horizon'] = clip_angle_to_nearest(start_location['horizon'], [-30, 0, 30, 60])
start_location['rotation'] = clip_angle_to_nearest(start_location['rotation'])
start_dist = renv.env.position_dist(start_location, target_receptacle['position'])
closer_pts = []
for pt in reachable_pts:
pt_dist = renv.env.position_dist(pt, target_receptacle['position'])
if pt_dist >= (start_dist - 0.05):
continue
pt2 = {k: v for k, v in pt.items()}
pt2['dist'] = pt_dist
pt2['dist_to_me'] = renv.env.position_dist(start_location, pt)
pt2['horizon'] = start_location['horizon']
if pt2['horizon'] != clip_angle_to_nearest(horizon_angle_to_object(target_receptacle, pt), [-30, 0, 30, 60]):
continue
pt2['rotation'] = start_location['rotation']
if pt2['rotation'] != clip_angle_to_nearest(rotation_angle_to_object(target_receptacle, pt)):
continue
closer_pts.append(pt2)
if len(closer_pts) == 0:
return []
dists = np.array([(pt2['dist'], pt2['dist_to_me']) for pt2 in closer_pts])
score = 20.0 * dists[:, 0] + dists[:, 1] + 0.5 * np.random.randn(dists.shape[0])
pt = closer_pts[int(np.argmin(score))]
forward_path = env.get_fancy_shortest_path(start_location, pt, fix_multi_moves=False, num_inner_max=4)
if forward_path is None:
return []
forward_path = [x for x in forward_path if not x['action'].startswith('Look')]
if any([x['action'] in ('JumpAhead', 'RotateLeft', 'RotateRight') for x in forward_path]):
forward_path = []
# Now we can have a reverse path too
antonyms = {'MoveAhead': 'MoveBack', 'MoveLeft': 'MoveRight', 'MoveRight': 'MoveLeft'}
reverse_path = []
for p in forward_path[::-1]:
reverse_path.append({
'action': antonyms[p['action']],
'_start_state_key': p['_end_state_key'],
'_end_state_key': p['_start_state_key'],
})
for i, p in enumerate(forward_path):
good = renv.step(p, action_can_fail=True, record_failure=False)
if not good:
my_key = env.get_key(env.get_agent_location())
key_matches = np.array([rp['_start_state_key'] == my_key for rp in reverse_path])
if not np.any(key_matches):
reverse_path = []
else:
ki = int(np.where(key_matches)[0][0])
reverse_path = reverse_path[ki:]
break
return reverse_path
def pickup_object(renv: RecordingEnv, target_object_id, navigate=True,
action_can_fail=False, force_visible=False):
"""
:param renv:
:param target_object_id:
:param navigate: Whether to navigate there
:param action_can_fail: Whether the final pickup action can fail
:return:
"""
target_object = renv.env.get_object_by_id(target_object_id)
if target_object is None:
raise ValueError("No target object")
# Check if any objects are held, drop if so
held_object = renv.env.object_in_hand()
if held_object is not None:
raise ValueError("held object??")
must_open = False
if target_object['parentReceptacles'] is not None and len(target_object['parentReceptacles']) > 0:
pr = env.get_object_by_id(target_object['parentReceptacles'][0])
if pr['openable'] and not pr['isOpen']:
must_open = True
else:
pr = None
if navigate:
d2op = (random.random()*0.5 - 0.25) if must_open else random.random()
fgsp = 10.0 if random.random() > 0.5 else 0.0
for p in path_to_object(renv.env, target_object=target_object, dist_to_obj_penalty=d2op, faces_good_side_penalty=fgsp):
renv.step(p, action_can_fail=False)
# Inch closer
if must_open and (pr is not None):
renv.step({'action': 'OpenObject', 'objectId': pr['objectId']}, action_can_fail=False)
reverse_path = inch_closer_path(renv, pr)
else:
pr = None
reverse_path = []
renv.step({'action': 'PickupObject', 'objectId': target_object_id, 'forceVisible': force_visible}, action_can_fail=action_can_fail)
for p in reverse_path:
good = renv.step(p, action_can_fail=True, record_failure=False)
if not good:
break
if (pr is not None) and (pr['openable'] and not pr['isOpen']):
renv.step({'action': 'CloseObject', 'objectId': pr['objectId']}, action_can_fail=False)
return True
def put_object_in_receptacle(renv: RecordingEnv, target_receptacle_id, navigate=True, close_receptacle=True,
action_can_fail=False):
"""
:param renv:
:param target_receptacle:
:return:
"""
target_receptacle = renv.env.get_object_by_id(target_receptacle_id)
# Sanity check we're holding an object
held_object = renv.env.object_in_hand()
if held_object is None:
if action_can_fail:
return False
raise ValueError("No held object")
if target_receptacle is None:
raise ValueError("No receptacle?")
# Don't go too close to the object if it's open
must_open = target_receptacle['openable'] and not target_receptacle['isOpen']
d2op = (random.random() * 0.5 - 0.25) if must_open else random.random()
if navigate:
fgsp = 10.0 if random.random() > 0.5 else 0.0
path = path_to_object(renv.env, target_object=target_receptacle, dist_to_obj_penalty=d2op, faces_good_side_penalty=fgsp)
for p in path:
renv.step(p, action_can_fail=False)
# Open if needed
if must_open:
renv.step({'action': 'OpenObject', 'objectId': target_receptacle_id}, action_can_fail=False)
reverse_path = inch_closer_path(renv, renv.env.get_object_by_id(target_receptacle_id))
else:
reverse_path = []
# Move hand back
if not env.get_object_by_id(held_object['objectId'])['visible']:
retract_hand(renv.env)
succeeds = renv.step(
{'action': 'PutObject', 'objectId': held_object['objectId'], 'receptacleObjectId': target_receptacle_id},
action_can_fail=action_can_fail)
for p in reverse_path:
good = renv.step(p, action_can_fail=True, record_failure=False)
if not good:
break
if succeeds and close_receptacle and target_receptacle['openable']:
succeeds = renv.step({'action': 'CloseObject', 'objectId': target_receptacle_id}, action_can_fail=False)
return succeeds
def pour_out_liquid(renv: RecordingEnv, action_can_fail=True):
"""
Assuming we're holding stuff -- pour it out
:param renv:
:param action_can_fail:
:return:
"""
held_obj = renv.env.object_in_hand()
if held_obj is None:
if not action_can_fail:
raise ValueError("NO held obj")
return False
# Option 1: Rotate 180 degrees upside down
if random.random() < 0.5:
renv.step({'action': 'RotateHand', 'x': 180}, action_can_fail=action_can_fail)
renv.step({'action': 'RotateHand', 'x': 0}, action_can_fail=action_can_fail)
else:
if held_obj['isFilledWithLiquid']:
renv.step({'action': 'EmptyLiquidFromObject', 'objectId': held_obj['objectId']},
action_can_fail=action_can_fail)
return True
#########################
def use_water_source_to_fill_or_clean_held(renv: RecordingEnv, water_source,
skip_turning_on_water=False,
empty_afterwards=False,
action_can_fail=True):
"""
Start with a held object. Go to the water source and put it in. Water will | |
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage
from skimage.transform import radon
from skimage.measure.fit import _dynamic_max_trials
from ..generic.test_tools import \
construct_phase_plane, cross_spectrum_to_coordinate_list
from ..generic.data_tools import gradient_descent, secant
from ..preprocessing.shadow_transforms import pca
from .matching_tools_frequency_filters import \
raised_cosine, thresh_masking, normalize_power_spectrum, \
make_fourier_grid
from .matching_tools_frequency_metrics import local_coherence
def phase_jac(Q, m, W=np.array([]),
F1=np.array([]), F2=np.array([]), rank=2): # wip
"""
Parameters
----------
Q : numpy.array, size=(_,_), dtype=complex
cross spectrum
m : numpy.array, size=(2,1), dtype=float
displacement estimate, in pixel coordinate system
W : numpy.array, size=(m,n), dtype=float | boolean
weigthing matrix, in a range of 0...1
F1 : np,array, size=(m,n), dtype=integer
coordinate of the first axis from the Fourier spectrum.
F2 : np,array, size=(m,n), dtype=integer
coordinate of the second axis from the Fourier spectrum
rank : TYPE, optional
DESCRIPTION. The default is 2.
Returns
-------
dQdm : numpy.array, size=(m,n)
Jacobian of phase estimate
"""
# metric system: Fourier-based flip
# y +------><------+
# ^ | |
# | | |
# | v v
# <------+-------> x
# | ^ ^
# | | |
# v +------><------+
#
# indexing | indexing ^ y
# system 'ij'| system 'xy' |
# | |
# | i | x
# --------+--------> --------+-------->
# | |
# | |
# | j |
# v |
assert type(Q)==np.ndarray, ("please provide an array")
if Q.shape[0]==Q.shape[1]: # if Q is a cross-spectral matrix
if W.size==0: # if W is not given
W = np.ones((Q.shape[0], Q.shape[1]), dtype=float)
if F1.size==0:
F1,F2 = make_fourier_grid(Q, indexing='ij')
else: # list format
F1,F2 = Q[:,0], Q[:,1]
Q = Q[:,-1]
if W.size==0:
W = np.ones_like(Q, dtype=float)
if rank==2: # default
dXY = 1 - np.multiply(np.real(Q), +np.cos(F1*m[0]+F2*m[1])) \
- np.multiply(np.imag(Q), -np.sin(F1*m[0]+F2*m[1]))
else:
C_hat = construct_phase_plane(Q, m[0], m[1], indexing='ij')
QC = Q-C_hat # convert complex vector difference to metric
dXY = np.abs(np.multiply(W, QC)**rank)
dQdm = np.array([np.multiply(2*W.flatten()*F1.flatten(),dXY.flatten()), \
np.multiply(2*W.flatten()*F2.flatten(),dXY.flatten())]).T
return dQdm
def phase_secant(data, W=np.array([]), x_0=np.zeros((2))): # wip
"""get phase plane of cross-spectrum through secant
find slope of the phase plane through secant method (or Newton's method)
in multiple dimensions it is known as the Broyden's method.
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last collumn
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
or numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_gradient_descend
References
----------
.. [1] <NAME>. "A class of methods for solving nonlinear simultaneous
equations" Mathematics and computation. vol.19(92) pp.577--593, 1965.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_secant(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
J = phase_jac(data, x_0)
x_hat,_ = secant(data[:,:-1], data[:,-1], J, x_0, \
n_iters=10)
di,dj = 2*x_hat[0], 2*x_hat[1]
return di,dj
def phase_gradient_descend(data, W=np.array([]), x_0=np.zeros((2))): # wip
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
data : numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last collumn
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
W : numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_lsq
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_gradient_descend(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
x_hat,_ = gradient_descent(data[:,:-1], data[:,-1], x_0, \
learning_rate=1, n_iters=50)
di,dj = x_hat[1], x_hat[0]
return di,dj
def phase_tpss(Q, W, m, p=1e-4, l=4, j=5, n=3): #wip
"""get phase plane of cross-spectrum through two point step size iteration
find slope of the phase plane through
two point step size for phase correlation minimization
Parameters
----------
Q : numpy.array, size=(_,_), dtype=complex
cross spectrum
m0 : numpy.array, size=(2,1)
initial displacement estimate
p : float, default=1e4
closing error threshold
l : integer, default=4
number of refinements in iteration
j : integer, default=5
number of sub routines during an estimation
n : integer, default=3
mask convergence factor
Returns
-------
m : numpy.array, size=(2,1)
sub-pixel displacement
snr: float
signal-to-noise ratio
See Also
--------
phase_svd, phase_radon, phase_difference, phase_jac
References
----------
.. [1] Barzilai & Borwein. "Two-point step size gradient methods", IMA
journal of numerical analysis. vol.8 pp.141--148, 1988.
.. [2] Leprince, et al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images,
application to ground deformation measurements", IEEE Transactions on
geoscience and remote sensing vol.45(6) pp.1529-1558, 2007.
"""
m = np.squeeze(m)
s = 1.#1.25#.5#1.#.5#2.
Q = normalize_power_spectrum(Q)
#W = W/np.sum(W) # normalize weights
Fx,Fy = make_fourier_grid(Q)
# initialize
m_min = m.copy().ravel()
m_min += np.array([-.1, -.1])
J_min = phase_jac(Q, m_min, W=W)
g_min = np.sum(J_min, axis=0)
#print('di:{:+.4f}'.format(m[0])+' dj:{:+.4f}'.format(m[1]))
for i in range(l):
k = 1
while True:
J = phase_jac(Q, m, W=W)
g = np.sum(J, axis=0)
# difference
dm,dg = m - m_min, g - g_min
#alpha = np.dot(dm,dg)/np.dot(dg,dg)
alpha = np.dot(dm,dm)/(s*np.dot(dm,dg))
if (np.all(np.abs(m - m_min)<=p)) or (k>=j):
break
# update
m_min, g_min = np.copy(m), np.copy(g)
#if i ==0:
m -= alpha*dg
#else:
# m -= alpha*dg
print('di:{:+.4f}'.format(m[0])+' dj:{:+.4f}'.format(m[1]))
k += 1
# optimize weighting matrix
#phi = np.abs(QC*np.conjugate(QC))/2
C = 1j*-np.sin(Fx*m[1] + Fy*m[0])
C += np.cos(Fx*m[1] + Fy*m[0])
QC = (Q-C)**2 # np.abs(Q-C)#np.abs(Q-C)
dXY = np.abs(np.multiply(W, QC))
W = W*(1-(dXY/4))**n
# phi = np.multiply(2*W,\
# (1 - np.multiply(np.real(Q),
# +np.cos(Fx*m[1] + Fy*m[0])) - \
# np.multiply(np.imag(Q),
# -np.sin(Fx*m[1] + Fy*m[0]))))
#W = np.multiply(W, (1-(phi/4))**n)
# snr = 1 - (np.sum(phi)/(4*np.sum(W)))
snr = 0
m = -1*m
return (m, snr)
def phase_slope_1d(t, rad=.1):
""" estimate the slope and intercept for one-dimensional signal
Parameters
----------
t : numpy.array, size=(m,1), dtype=complex
angle values.
rad : float, range=(0.0,0.5)
radial inclusion, seen from the center
Returns
-------
x_hat : numpy.array, size=(2,1)
estimated slope and intercept.
See also
--------
phase_svd
"""
assert type(t)==np.ndarray, ("please provide an array")
idx_sub = np.arange(np.ceil((0.5-rad)*len(t)), \
np.ceil((0.5+rad)*len(t))+1).astype(int)
y_ang = np.unwrap(np.angle(t[idx_sub]),axis=0)
A = np.vstack([np.transpose(idx_sub-1), np.ones((len(idx_sub)))]).T
x_hat = np.linalg.lstsq(A, y_ang, rcond=None)[0]
return x_hat
def phase_svd(Q, W, rad=0.1):
"""get phase plane of cross-spectrum through single value decomposition
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
cross spectrum
W : numpy.array, size=(m,n), dtype=float
weigthing matrix
rad : float, range=(0.0,0.5)
radial inclusion, seen from the center
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_difference
References
----------
.. [1] <NAME>. "A subspace identification extension to the phase
correlation method", IEEE transactions on medical imaging, vol. 22(2)
pp.277-280, 2003.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_svd(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(Q)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
rad = np.minimum(rad, 0.5)
(m,n) = Q.shape
Q,W = np.fft.fftshift(Q), np.fft.fftshift(W)
# decompose axis
n_elements = 1
try:
u,s,v = np.linalg.svd(W*Q) # singular-value decomposition
except:
return 0, 0
sig = np.zeros((m,n))
sig[:m,:m] = np.diag(s)
sig = sig[:,:n_elements] # select first element only
# v = v[:n_elements,:]
# reconstruct
# b = u.dot(sig.dot(v))
t_m = np.transpose(v).dot(sig)
t_n = u.dot(sig)# transform
d_n = phase_slope_1d(t_n, rad)
d_m = phase_slope_1d(t_m, rad)
di = -d_n[0][0]*n / (2*np.pi)
dj = -d_m[0][0]*m / (2*np.pi)
return di, dj
def phase_difference_1d(Q, W=np.array([]), axis=0):
"""get displacement from phase plane along one axis through differencing
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
W : numpy.array, size=(m,n), dtype=boolean
weigthing matrix
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
References
----------
.. [1] <NAME>. "A fast and accurate frequency estimator", | |
field is not
specified, it is assumed to be PREMIUM.
Possible values are `PREMIUM` and `STANDARD`.
:param pulumi.Input[str] port_range: This field is used along with the target field for TargetHttpProxy,
TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway,
TargetPool, TargetInstance.
Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets
addressed to ports in the specified range will be forwarded to target.
Forwarding rules with the same [IPAddress, IPProtocol] pair must have
disjoint port ranges.
Some types of forwarding target have constraints on the acceptable
ports:
* TargetHttpProxy: 80, 8080
* TargetHttpsProxy: 443
* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995,
1883, 5222
* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995,
1883, 5222
* TargetVpnGateway: 500, 4500
:param pulumi.Input[Sequence[pulumi.Input[str]]] ports: This field is used along with internal load balancing and network
load balancer when the forwarding rule references a backend service
and when protocol is not L3_DEFAULT.
A single port or a comma separated list of ports can be configured.
Only packets addressed to these ports will be forwarded to the backends
configured with this forwarding rule.
You can only use one of ports and portRange, or allPorts.
The three are mutually exclusive.
You may specify a maximum of up to 5 ports, which can be non-contiguous.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the regional forwarding rule resides.
This field is not applicable to global forwarding rules.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[str] service_label: An optional prefix to the service name for this Forwarding Rule.
If specified, will be the first label of the fully qualified service
name.
The label must be 1-63 characters long, and comply with RFC1035.
Specifically, the label must be 1-63 characters long and match the
regular expression `a-z?` which means the first
character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
This field is only used for INTERNAL load balancing.
:param pulumi.Input[str] service_name: [Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal
load balancing.
:param pulumi.Input[str] subnetwork: The subnetwork that the load balanced IP should belong to for this
Forwarding Rule. This field is only used for INTERNAL load balancing.
If the network specified is in auto subnet mode, this field is
optional. However, if the network is in custom subnet mode, a
subnetwork must be specified.
:param pulumi.Input[str] target: The URL of the target resource to receive the matched traffic.
The target must live in the same region as the forwarding rule.
The forwarded traffic must be of a type appropriate to the target
object.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ForwardingRuleState.__new__(_ForwardingRuleState)
__props__.__dict__["all_ports"] = all_ports
__props__.__dict__["allow_global_access"] = allow_global_access
__props__.__dict__["backend_service"] = backend_service
__props__.__dict__["creation_timestamp"] = creation_timestamp
__props__.__dict__["description"] = description
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["ip_protocol"] = ip_protocol
__props__.__dict__["is_mirroring_collector"] = is_mirroring_collector
__props__.__dict__["label_fingerprint"] = label_fingerprint
__props__.__dict__["labels"] = labels
__props__.__dict__["load_balancing_scheme"] = load_balancing_scheme
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["network_tier"] = network_tier
__props__.__dict__["port_range"] = port_range
__props__.__dict__["ports"] = ports
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["self_link"] = self_link
__props__.__dict__["service_label"] = service_label
__props__.__dict__["service_name"] = service_name
__props__.__dict__["subnetwork"] = subnetwork
__props__.__dict__["target"] = target
return ForwardingRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allPorts")
def all_ports(self) -> pulumi.Output[Optional[bool]]:
"""
This field can be used with internal load balancer or network load balancer
when the forwarding rule references a backend service, or with the target
field when it references a TargetInstance. Set this to true to
allow packets addressed to any ports to be forwarded to the backends configured
with this forwarding rule. This can be used when the protocol is TCP/UDP, and it
must be set to true when the protocol is set to L3_DEFAULT.
Cannot be set if port or portRange are set.
"""
return pulumi.get(self, "all_ports")
@property
@pulumi.getter(name="allowGlobalAccess")
def allow_global_access(self) -> pulumi.Output[Optional[bool]]:
"""
If true, clients can access ILB from all regions.
Otherwise only allows from the local region the ILB is located at.
"""
return pulumi.get(self, "allow_global_access")
@property
@pulumi.getter(name="backendService")
def backend_service(self) -> pulumi.Output[Optional[str]]:
"""
A BackendService to receive the matched traffic. This is used only
for INTERNAL load balancing.
"""
return pulumi.get(self, "backend_service")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional description of this resource. Provide this property when
you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
The IP address that this forwarding rule serves. When a client sends
traffic to this IP address, the forwarding rule directs the traffic to
the target that you specify in the forwarding rule. The
loadBalancingScheme and the forwarding rule's target determine the
type of IP address that you can use. For detailed information, refer
to [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).
An address can be specified either by a literal IP address or a
reference to an existing Address resource. If you don't specify a
reserved IP address, an ephemeral IP address is assigned.
The value must be set to 0.0.0.0 when the target is a targetGrpcProxy
that has validateForProxyless field set to true.
For Private Service Connect forwarding rules that forward traffic to
Google APIs, IP address must be provided.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipProtocol")
def ip_protocol(self) -> pulumi.Output[str]:
"""
The IP protocol to which this rule applies.
When the load balancing scheme is INTERNAL, only TCP and UDP are
valid.
Possible values are `TCP`, `UDP`, `ESP`, `AH`, `SCTP`, `ICMP`, and `L3_DEFAULT`.
"""
return pulumi.get(self, "ip_protocol")
@property
@pulumi.getter(name="isMirroringCollector")
def is_mirroring_collector(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether or not this load balancer can be used
as a collector for packet mirroring. To prevent mirroring loops,
instances behind this load balancer will not have their traffic
mirrored even if a PacketMirroring rule applies to them. This
can only be set to true for load balancers that have their
loadBalancingScheme set to INTERNAL.
"""
return pulumi.get(self, "is_mirroring_collector")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> pulumi.Output[str]:
"""
Used internally during label updates.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Labels to apply to this forwarding rule. A list of key->value pairs.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="loadBalancingScheme")
def load_balancing_scheme(self) -> pulumi.Output[Optional[str]]:
"""
This signifies what the ForwardingRule will be used for and can be
EXTERNAL, EXTERNAL_MANAGED, INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic
Cloud VPN gateways, protocol forwarding to VMs from an external IP address,
and HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP load balancers.
INTERNAL is used for protocol forwarding to VMs from an internal IP address,
and internal TCP/UDP load balancers.
EXTERNAL_MANAGED is used for regional external HTTP(S) load balancers.
INTERNAL_MANAGED is used for internal HTTP(S) load balancers.
Default value is `EXTERNAL`.
Possible values are `EXTERNAL`, `EXTERNAL_MANAGED`, `INTERNAL`, and `INTERNAL_MANAGED`.
"""
return pulumi.get(self, "load_balancing_scheme")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output[str]:
"""
For internal load balancing, this field identifies the network that
the load balanced IP should belong to for this Forwarding Rule. If
this field is not specified, the default network will be used.
This field is only used for INTERNAL load balancing.
"""
return pulumi.get(self, | |
!= set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
self.set_visible(user, user_dict['visible'], auth=auth)
users.append(user)
user_ids.append(user_dict['id'])
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
contributor_added.send(self, contributor=contributor, auth=auth)
return True
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param contributors: A list of User objects to add as contributors.
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(username=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth informtion including user, API key.
"""
if permissions == 'public' and not self.is_public:
self.is_public = True
elif permissions == 'private' and self.is_public:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message)
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
self.save()
return True
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': html_parser.unescape(self.title),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration
}
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted | |
<reponame>tvorogme/dataops
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os, re
import traceback
import inspect
from os.path import dirname
# Local imports
from resource_management.core.logger import Logger
from ambari_server.serverConfiguration import get_ambari_properties, get_ambari_version
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
SCRIPT_DIR = dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE,
('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
DB_TYPE_DEFAULT_PORT_MAP = {
"mysql": "3306",
"oracle": "1521",
"postgresql": "5432"
}
class STREAMLINE050ServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(STREAMLINE050ServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
Logger.initialize_logger()
def getServiceConfigurationRecommenderDict(self):
"""
Recommend configurations to set. Streamline does not have any recommendations in this version.
"""
Logger.info(
"Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
return self.as_super.getServiceConfigurationRecommenderDict()
def getServiceConfigurationValidators(self):
"""
Get a list of errors. Streamline does not have any validations in this version.
"""
Logger.info(
"Class: %s, Method: %s. Validating Service Component Layout." %
(self.__class__.__name__, inspect.stack()[0][3]))
return self.as_super.getServiceConfigurationValidators()
def recommendConfigurations(self, configurations, clusterData, services,
hosts):
"""
Recommend configurations for this service.
"""
Logger.info(
"Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
pass
def getServiceConfigurationRecommendations(self, configurations,
clusterData, services, hosts):
Logger.info(
"Class: %s, Method: %s. Get Service Configuration Recommendations."
% (self.__class__.__name__, inspect.stack()[0][3]))
servicesList = [
service["StackServices"]["service_name"]
for service in services["services"]
]
security_enabled = self.isSecurityEnabled(services)
if 'AMBARI_METRICS' in servicesList:
putAmsSiteProperty = self.putProperty(configurations, "ams-site")
putAmsSiteProperty(
'timeline.metrics.downsampler.event.metric.patterns',
'topology\.%')
if 'STORM' in servicesList and security_enabled:
storm_site = self.getServicesSiteProperties(services, "storm-site")
streamline_env = self.getServicesSiteProperties(
services, "streamline-env")
if storm_site is not None and streamline_env is not None:
putStormSiteProperty = self.putProperty(
configurations, "storm-site", services)
putStormSiteAttributes = self.putPropertyAttribute(
configurations, "storm-site")
storm_env = self.getServicesSiteProperties(
services, "storm-env")
storm_nimbus_impersonation_acl = storm_site[
"nimbus.impersonation.acl"] if "nimbus.impersonation.acl" in storm_site else None
streamline_env = self.getServicesSiteProperties(
services, "streamline-env")
_streamline_principal_name = streamline_env[
'streamline_principal_name'] if 'streamline_principal_name' in streamline_env else None
if _streamline_principal_name is not None and storm_nimbus_impersonation_acl is not None:
streamline_bare_principal = get_bare_principal(
_streamline_principal_name)
storm_nimbus_impersonation_acl = "{ " + streamline_bare_principal + " : {hosts: ['*'], groups: ['*']}, {{storm_bare_jaas_principal}} : {hosts: ['*'], groups: ['*']}}"
putStormSiteProperty('nimbus.impersonation.acl',
storm_nimbus_impersonation_acl)
storm_nimbus_autocred_plugin_classes = storm_site[
"nimbus.autocredential.plugins.classes"] if "nimbus.autocredential.plugins.classes" in storm_site else None
# Here storm_nimbus_autocred_plugin_classes is resulting in none. There is no nimbus.autocredential.plugins.classes in storm-site.xml
if storm_nimbus_autocred_plugin_classes is not None:
new_storm_nimbus_autocred_plugin_classes = [
'org.apache.storm.hdfs.security.AutoHDFS',
'org.apache.storm.hbase.security.AutoHBase',
'org.apache.storm.hive.security.AutoHive'
]
new_conf = self.appendToYamlString(
storm_nimbus_autocred_plugin_classes,
new_storm_nimbus_autocred_plugin_classes)
putStormSiteProperty(
"nimbus.autocredential.plugins.classes", new_conf)
else:
putStormSiteProperty(
"nimbus.autocredential.plugins.classes",
"['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']"
)
storm_nimbus_credential_renewer_classes = storm_site[
"nimbus.credential.renewers.classes"] if "nimbus.credential.renewers.classes" in storm_site else None
if storm_nimbus_credential_renewer_classes is not None:
new_storm_nimbus_credential_renewer_classes_array = [
'org.apache.storm.hdfs.security.AutoHDFS',
'org.apache.storm.hbase.security.AutoHBase',
'org.apache.storm.hive.security.AutoHive'
]
new_conf = self.appendToYamlString(
storm_nimbus_credential_renewer_classes,
new_storm_nimbus_credential_renewer_classes_array)
putStormSiteProperty(
"nimbus.autocredential.plugins.classes", new_conf)
else:
putStormSiteProperty(
"nimbus.credential.renewers.classes",
"['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']"
)
putStormSiteProperty("nimbus.credential.renewers.freq.secs",
"82800")
properties = get_ambari_properties()
ambari_version = get_ambari_version(properties)
if not (ambari_version) or not (ambari_version.startswith('2.5')):
putStreamlineLogSearchConfAttribute = self.putPropertyAttribute(
configurations, "streamline-logsearch-conf")
putStreamlineLogSearchConfAttribute('service_name', 'visible',
'false')
putStreamlineLogSearchConfAttribute('component_mappings',
'visible', 'false')
putStreamlineLogSearchConfAttribute('content', 'visible', 'false')
pass
def validateSTREAMLINEConfigurations(self, properties, recommendedDefaults,
configurations, services, hosts):
streamline_common = properties
validationItems = []
warning_message = ""
url_error_message = ""
password_error_message = ""
# Find number of services installed, get them all and find streamline service json obj in them.
number_services = len(services['services'])
for each_service in range(0, number_services):
if services['services'][each_service]['components'][0][
'StackServiceComponents']['service_name'] == 'STREAMLINE':
# Warnings related to streamine is in HA mode
num_streamline_nodes = len(
services['services'][each_service]['components'][0]
['StackServiceComponents']['hostnames'])
if int(num_streamline_nodes) > 1:
if streamline_common['jar.storage.type'] == "local":
warning_message += "You choose 'local' option. Please choose HDFS or Database option. " \
"If your jar.storage.type=Database and if you choose MYSQL as Database, " \
"please make sure to set value of MYSQL's property max_allowed_packet larger " \
"than size of your udf or custom jar as it will be stored as blob in MYSQL."
validationItems.append({
"config-name":
'jar.storage.type',
"item":
self.getWarnItem(warning_message)
})
if streamline_common['jar.storage.type'] == "database":
warning_message += "If choose 'Database' option. If you choose MYSQL as Database, " \
"please make sure to set value of MYSQL's property max_allowed_packet larger " \
"than size of your udf or custom jar as it will be stored as blob in MYSQL."
validationItems.append({
"config-name":
'jar.storage.type',
"item":
self.getWarnItem(warning_message)
})
# Errors related to httpProxyServer for streamline
http_proxy_server_url = streamline_common['httpProxyServer']
if http_proxy_server_url:
from urlparse import urlparse
url_list = urlparse(http_proxy_server_url)
# if missing protocol or hostname:port_number
if (not url_list[0] or not url_list[1]):
url_error_message += "Please enter httpProxyServer in following format : protocol_name://httpProxy_host_name:port_number"
validationItems.append({
"config-name":
'httpProxyServer',
"item":
self.getErrorItem(url_error_message)
})
else:
try:
httpProxy_hostname = url_list[1].split(":")[0]
httpProxy_port = url_list[1].split(":")[1]
# empty hostname or empty port_number
if len(httpProxy_hostname) < 1 or len(
httpProxy_port) < 1:
url_error_message += "Please enter httpProxyServer in following format : protocol_name://httpProxy_host_name:port_number"
validationItems.append({
"config-name":
'httpProxyServer',
"item":
self.getErrorItem(url_error_message)
})
# only hostname or only port_number
except:
url_error_message += "Please enter httpProxyServer in following format : protocol_name://httpProxy_host_name:port_number"
validationItems.append({
"config-name":
'httpProxyServer',
"item":
self.getErrorItem(url_error_message)
})
# Errors related to absence of httpProxyServer and httpProxyPassword for streamline.
http_proxy_server_password = streamline_common[
'httpProxyPassword']
http_proxy_server_username = streamline_common[
'httpProxyUsername']
if http_proxy_server_url and not (
(http_proxy_server_password and http_proxy_server_username)
or (not http_proxy_server_password
and not http_proxy_server_username)):
if not http_proxy_server_password:
password_error_message = "Please provide the httpProxyPassword"
validationItems.append({
"config-name":
'httpProxyPassword',
"item":
self.getErrorItem(password_error_message)
})
elif not http_proxy_server_username:
username_error_message = "Please provide the httpProxyUsername"
validationItems.append({
"config-name":
'httpProxyUsername',
"item":
self.getErrorItem(username_error_message)
})
elif not http_proxy_server_url and (
http_proxy_server_password
or http_proxy_server_username):
url_error_message += "Please enter httpProxyServer in following format : protocol_name://httpProxy_host_name:port_number"
validationItems.append({
"config-name":
'httpProxyServer',
"item":
self.getErrorItem(url_error_message)
})
return self.toConfigurationValidationProblems(validationItems,
"streamline-common")
def validateConfigurationsForSite(self, configurations,
recommendedDefaults, services, hosts,
siteName, method):
properties = self.getSiteProperties(configurations, siteName)
if properties:
if siteName == 'streamline-common':
return method(properties, None, configurations, services,
hosts)
else:
return super(STREAMLINE050ServiceAdvisor,
self).validateConfigurationsForSite(
configurations, recommendedDefaults, services,
hosts, siteName, method)
else:
return []
def getServiceConfigurationsValidationItems(
self, configurations, recommendedDefaults, services, hosts):
"""
Validate configurations for the service. Return a list of errors.
"""
Logger.info(
"Class: %s, Method: %s. Validating Service Configuration Items." %
(self.__class__.__name__, inspect.stack()[0][3]))
siteName = "streamline-common"
method = self.validateSTREAMLINEConfigurations
items = self.validateConfigurationsForSite(
configurations, recommendedDefaults, services, hosts, siteName,
method)
return items
def getCardinalitiesDict(self, hosts):
Logger.info("Called STREAMLINE getCardinalitiesDict")
return {'STREAMLINE_SERVER': {"min": 1}}
def putProperty(self, config, configType, services=None):
userConfigs = {}
changedConfigs = []
# if services parameter, prefer values, set by user
if services:
if 'configurations' in services.keys():
userConfigs = services['configurations']
if 'changed-configurations' in services.keys():
changedConfigs = services["changed-configurations"]
if configType not in config:
config[configType] = {}
if "properties" not in config[configType]:
config[configType]["properties"] = {}
def appendProperty(key, value):
# If property exists in changedConfigs, do not override, use user defined property
if self.__isPropertyInChangedConfigs(configType, key,
changedConfigs):
config[configType]["properties"][key] = userConfigs[
configType]['properties'][key]
else:
config[configType]["properties"][key] = str(value)
return appendProperty
def __isPropertyInChangedConfigs(self, configType, propertyName,
changedConfigs):
for changedConfig in changedConfigs:
if changedConfig['type'] == configType and changedConfig[
'name'] == propertyName:
return True
return False
def putPropertyAttribute(self, config, configType):
if configType not in config:
config[configType] = {}
def appendPropertyAttribute(key, attribute, attributeValue):
if "property_attributes" not in config[configType]:
if "property_attributes" not in config[configType]:
config[configType]["property_attributes"] = {}
if key not in config[configType]["property_attributes"]:
config[configType]["property_attributes"][key] = {}
config[configType]["property_attributes"][key][
attribute] = attributeValue if isinstance(
attributeValue, list) else str(attributeValue)
return appendPropertyAttribute
def getSiteProperties(self, configurations, siteName):
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def getServicesSiteProperties(self, services, siteName):
configurations = services.get("configurations")
if not configurations:
return None
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def getServiceComponentLayoutValidations(self, services, hosts):
items = super(STREAMLINE050ServiceAdvisor,
self).getServiceComponentLayoutValidations(
services, hosts)
return items
def appendToYamlString(self, yaml_string, list_classes):
updated_yaml_string = ""
try:
strip_yaml_str = re.sub('[\[\]\']', ' ', yaml_string)
klass_array = [x.strip() for x in strip_yaml_str.split(',')]
if yaml_string:
for klass in list_classes:
klass = klass.strip()
klass_array.append(klass)
klass_set = set(klass_array)
klass_list = [("'" + e + "'") for e in klass_set]
updated_yaml_string = "[" + ",".join(klass_list) + "]"
except Exception:
klass_list = [("'" + e + "'") for e in list_classes]
updated_yaml_string = "[" + ",".join(klass_list) + "]"
return updated_yaml_string
class | |
<reponame>thirtywang/OpenPNM<filename>OpenPNM/Network/__DelaunayVoronoiDual__.py
"""
===============================================================================
DelaunayVoronoiDual: Generate a random network with complementary Delaunay and
Voronoi networks, including connectings between them
===============================================================================
"""
from OpenPNM.Network import tools
import scipy as sp
import scipy.spatial as sptl
from OpenPNM.Network import GenericNetwork
from OpenPNM.Base import logging
logger = logging.getLogger(__name__)
class DelaunayVoronoiDual(GenericNetwork):
r"""
A dual network based on complementary Voronoi and Delaunay networks. A
Delaunay tessellation or triangulation is performed on randomly distributed
base points, then the corresponding Voronoi diagram is generated. Finally,
each Delaunay nodes is connected to it's neighboring Voronoi vertices to
create interaction between the two networks.
All pores and throats are labelled according to their network (i.e.
'pore.delaunay'), so they can be each assigned to a different Geometry.
The dual-nature of this network is meant for modeling transport in the void
and solid space simultaneously by treating one network (i.e. Delaunay) as
voids and the other (i.e. Voronoi) as solid. Interation such as heat
transfer between the solid and void can be accomplished via the
interconnections between the Delaunay and Voronoi nodes.
Parameters
----------
num_points : integer
The number of random base points to distribute inside the domain.
These points will become connected by the Delaunay triangulation. The
points will be generated by calling ``generate_base_points`` in
Network.tools.
points : array_like (num_points x 3)
A list of coordinates for pre-generated points, typically produced
using ``generate_base_points`` in Network.tools. Note that base points
should extend beyond the ``domain_size`` so that degenerate Voronoi
points can be trimmed.
domain_size : array_like
The size and shape of the domain using for generating and trimming
excess points. The argument is treated as follows:
**sphere** : If a scalar or single element list is received, it's
treated as the radius [r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as
the radius and height of a cylinder [r, z] whose central axis
starts at [0, 0, 0] and extends in the positive z-direction.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner
lies at [0, 0, 0].
By default, a domain size of [1, 1, 1] is used.
trim_domain : Boolean
If true (default) all nodes outside the given ``domain_size`` are
removed, along with all their throats. Setting this argument to False
will skip this removal if an alternative manual trimming is preferred.
Examples
--------
Points will be automatically generated if none are given:
>>> import OpenPNM as op
>>> net = op.Network.DelaunayVoronoiDual(num_points=50)
The resulting network can be quickly visualized with
``op.Network.tools.plot_connections(net)``. This plotting function also
supports showing limited sets of throats for more clear inspectionk such as
``op.Network.tools.plot_connections(net, throats=net.throats('surface'))``.
See its documentation for details.
The default shape is a unit cube, but it's also possible to generate
cylinders and spheres by specifying the domain size as [r, z] or [r],
respectively:
>>> sph = op.Network.DelaunayVoronoiDual(num_points=50, domain_size=[1])
>>> cyl = op.Network.DelaunayVoronoiDual(num_points=50, domain_size=[1, 1])
More control over the distribution of base points can be achieved by
calling ``Network.tools.generate_base_points`` directly:
>>> pts = op.Network.tools.generate_base_points(num_points=50,
... domain_size=[1, 5])
>>> pts -= [0, 0, 1] # Shift points in the negative z-direction
>>> cyl = op.Network.DelaunayVoronoiDual(points=pts, domain_size=[1, 3])
All points lying below the z=0 plane and above the z=3 plane are trimmed,
which gives the network *rough* ends since the points near the plane of
reflection are all trimmed.
"""
def __init__(self, num_points=None, points=None, domain_size=[1, 1, 1],
trim_domain=True, **kwargs):
super().__init__(**kwargs)
if points is None:
if num_points is None:
raise Exception('Must specify either "points" or "num_points"')
points = tools.generate_base_points(num_points=num_points,
domain_size=domain_size)
# Perform tessellation
vor = sptl.Voronoi(points=points)
# Combine points
pts_vor = vor.vertices
pts_all = sp.vstack((points, pts_vor))
Npts = sp.size(points, 0)
Nvor = sp.size(pts_vor, 0)
Nall = Nvor + Npts
# Create adjacency matrix in lil format for quick matrix construction
am = sp.sparse.lil_matrix((Nall, Nall))
for ridge in vor.ridge_dict.keys():
# Make Delaunay-to-Delauny connections
[am.rows[i].extend([ridge[0], ridge[1]]) for i in ridge]
row = vor.ridge_dict[ridge]
if -1 not in row:
# Index Voronoi vertex numbers by Npts
row = [i + Npts for i in row]
# Make Voronoi-to-Delaunay connections
[am.rows[i].extend(row) for i in ridge]
# Make Voronoi-to-Voronoi connections
row.append(row[0])
[am.rows[row[i]].append(row[i+1]) for i in range(len(row)-1)]
# Ensure connections are made symmetrically
[am.rows[row[i+1]].append(row[i]) for i in range(len(row)-1)]
# Finalize adjacency matrix by assigning data values to each location
am.data = am.rows # Values don't matter, only shape, so use 'rows'
# Retrieve upper triangle and convert to csr to remove duplicates
am = sp.sparse.triu(A=am, k=1, format='csr')
# Convert to COO format for OpenPNM compatibility
am = am.tocoo()
# Translate adjacency matrix and points to OpenPNM format
coords = pts_all
conns = sp.vstack((am.row, am.col)).T
Np = sp.size(coords, axis=0)
Nt = sp.size(conns, axis=0)
self.update({'pore.all': sp.ones((Np, ), dtype=bool)})
self.update({'throat.all': sp.ones((Nt, ), dtype=bool)})
self['throat.conns'] = conns
self['pore.coords'] = sp.around(coords, decimals=10)
# Label all pores and throats by type
self['pore.delaunay'] = False
self['pore.delaunay'][0:Npts] = True
self['pore.voronoi'] = False
self['pore.voronoi'][Npts:] = True
# Label throats between Delaunay pores
self['throat.delaunay'] = False
Ts = sp.all(self['throat.conns'] < Npts, axis=1)
self['throat.delaunay'][Ts] = True
# Label throats between Voronoi pores
self['throat.voronoi'] = False
Ts = sp.all(self['throat.conns'] >= Npts, axis=1)
self['throat.voronoi'][Ts] = True
# Label throats connecting a Delaunay and a Voronoi pore
self['throat.interconnect'] = False
Ts = self.throats(labels=['delaunay', 'voronoi'], mode='not')
self['throat.interconnect'][Ts] = True
# Trim all pores that lie outside of the specified domain
if trim_domain:
self._trim_domain(domain_size=domain_size)
def _trim_domain(self, domain_size=None):
r"""
Trims pores that lie outside the specified domain.
Parameters
----------
domain_size : array_like
The size and shape of the domain beyond which points should be
trimmed. The argument is treated as follows:
**sphere** : If a scalar or single element list is received, it's
treated as the radius [r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as
the radius and height of a cylinder [r, z] whose central axis
starts at [0, 0, 0] and extends in the positive z-direction.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner
lies at [0, 0, 0].
Notes
-----
This function assumes that some Delaunay nodes exist outside the
given ``domain_size``. These points can either be the result of
reflecting the base points or simply creating points beyond the
domain. Without these extra points the Voronoi network would contain
points at inf.
"""
# Label external pores for trimming below
self['pore.external'] = False
if len(domain_size) == 1: # Spherical
# Trim external Delaunay pores
r = sp.sqrt(sp.sum(self['pore.coords']**2, axis=1))
Ps = (r > domain_size)*self['pore.delaunay']
self['pore.external'][Ps] = True
# Trim external Voronoi pores
Ps = ~self['pore.external']*self['pore.delaunay']
Ps = self.find_neighbor_pores(pores=Ps)
Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']
self['pore.external'][Ps] = True
elif len(domain_size) == 2: # Cylindrical
# Trim external Delaunay pores outside radius
r = sp.sqrt(sp.sum(self['pore.coords'][:, [0, 1]]**2, axis=1))
Ps = (r > domain_size[0])*self['pore.delaunay']
self['pore.external'][Ps] = True
# Trim external Delaunay pores above and below cylinder
Ps1 = self['pore.coords'][:, 2] > domain_size[1]
Ps2 = self['pore.coords'][:, 2] < 0
Ps = self['pore.delaunay']*(Ps1 + Ps2)
self['pore.external'][Ps] = True
# Trim external Voronoi pores
Ps = ~self['pore.external']*self['pore.delaunay']
Ps = self.find_neighbor_pores(pores=Ps)
Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']
self['pore.external'][Ps] = True
elif len(domain_size) == 3: # Rectilinear
# Trim external Delaunay pores
Ps1 = sp.any(self['pore.coords'] > domain_size, axis=1)
Ps2 = sp.any(self['pore.coords'] < [0, 0, 0], axis=1)
Ps = self['pore.delaunay']*(Ps1 + Ps2)
self['pore.external'][Ps] = True
# Trim external Voronoi pores
Ps = ~self['pore.external']*self['pore.delaunay']
Ps = self.find_neighbor_pores(pores=Ps)
Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']
self['pore.external'][Ps] = True
# Begin process of removing, adjusting, and labeling pores
self['pore.surface'] = False
self['throat.surface'] = False
# Label Delaunay pores on the surface
Ps = self.pores('external', mode='not')
Ps = self.find_neighbor_pores(pores=Ps)
Ps = self.filter_by_label(pores=Ps, labels='delaunay')
self['pore.surface'][Ps] = True
self['pore.external'][Ps] = False # So they aren't deleted below
# Label Voronoi pores on surface
Ps = self.pores('external')
Ps = self.find_neighbor_pores(pores=Ps)
Ps = self.filter_by_label(pores=Ps, labels='voronoi')
self['pore.surface'][Ps] = True
# Label Voronoi and interconnect throats on | |
datetime.strptime(data['1_day_back'], pattern)
assert delta < timedelta(days=3)
delta = datetime.strptime(data['1_day_forward'], pattern) - utcnow
assert delta > timedelta(seconds=82800)
delta = utcnow - datetime.strptime(data['1_hour_back'], pattern)
assert delta < timedelta(seconds=3700)
delta = datetime.strptime(data['1_hour_forward'], pattern) - utcnow
assert delta > timedelta(seconds=3500)
delta = utcnow - datetime.strptime(data['1_minute_back'], pattern)
assert delta < timedelta(seconds=120)
@pytest.mark.parametrize(('config'), [
'configs/yaml/hbs/core/connection_reset.yaml'
])
def test_connection_reset(self, config):
self.mock_server_process = run_mock_server(get_config_path(config))
resp = httpx.get(SRV_8001 + '/normal')
assert 200 == resp.status_code
assert resp.text == 'Hello world'
try:
httpx.get(SRV_8001 + '/reset')
except httpx.ReadError as e:
assert str(e) == '[Errno 104] Connection reset by peer'
try:
httpx.get(SRV_8001 + '/close')
except httpx.RemoteProtocolError as e:
assert 'ConnectionClosed' in str(e)
try:
httpx.get(SRV_8001 + '/reset2')
except httpx.ReadError as e:
assert str(e) == '[Errno 104] Connection reset by peer'
try:
httpx.get(SRV_8001 + '/close2')
except httpx.RemoteProtocolError as e:
assert 'ConnectionClosed' in str(e)
@pytest.mark.parametrize(('config'), [
'configs/json/hbs/core/faker.json',
'configs/json/j2/core/faker.json'
])
def test_faker(self, config):
self.mock_server_process = run_mock_server(get_config_path(config))
resp = httpx.get(SRV_8001 + '/faker')
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert isinstance(data['bothify'], str) and len(data['bothify']) == 5
assert isinstance(data['bothify_args'], str) and len(data['bothify_args']) == 5
assert isinstance(data['hexify'], str) and len(data['hexify']) == 4
assert isinstance(data['hexify_args'], str) and len(data['hexify_args']) == 30
assert isinstance(data['language_code'], str) and 2 <= len(data['language_code']) <= 3
assert isinstance(data['lexify'], str) and len(data['lexify']) == 4
assert isinstance(data['lexify_args'], str) and len(data['lexify_args']) == 29
assert isinstance(data['lexify'], str) and len(data['lexify']) == 4
assert isinstance(data['locale'], str) and 5 <= len(data['locale']) <= 6
assert isinstance(data['numerify'], str) and 0 <= int(data['numerify']) <= 999
assert isinstance(data['random_choices'][0], str)
assert 0 <= data['random_digit'] <= 9
assert 1 <= data['random_digit_not_null'] <= 9
assert isinstance(data['random_element'], str)
assert isinstance(data['random_elements'][0], str)
assert 0 <= data['random_int'] <= 9999
assert 10000 <= data['random_int_args'] <= 50000
assert isinstance(data['random_letter'], str)
assert isinstance(data['random_letters'][0], str)
assert isinstance(data['random_letters_args'][0], str)
assert data['random_lowercase_letter'].lower() == data['random_lowercase_letter']
assert isinstance(data['random_sample'][0], str)
assert data['random_uppercase_letter'].upper() == data['random_uppercase_letter']
@pytest.mark.parametrize(('config'), [
'configs/yaml/hbs/core/escape_html.yaml',
'configs/yaml/j2/core/escape_html.yaml'
])
def test_escape_html(self, config):
self.mock_server_process = run_mock_server(get_config_path(config))
resp = httpx.get(SRV_8001 + '/endp1')
assert 200 == resp.status_code
assert 'Content-Type' not in resp.headers
assert resp.text == '& < " >'
@pytest.mark.parametrize(('config'), [
'configs/yaml/hbs/core/cors.yaml',
])
def test_cors(self, config):
self.mock_server_process = run_mock_server(get_config_path(config))
hdr = {
"origin": "http://someorigin",
"Access-Control-Request-Headers": "authorization, x-api-key"
}
resp = httpx.options(SRV_8001 + '/cors-request', headers=hdr)
assert 204 == resp.status_code
assert hdr['origin'] == resp.headers.get("access-control-allow-origin")
assert hdr['Access-Control-Request-Headers'] == resp.headers.get("Access-Control-Allow-Headers")
assert "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT" == resp.headers.get("access-control-allow-methods")
resp = httpx.post(SRV_8001 + '/cors-request', json={}, headers=hdr)
assert hdr['origin'] == resp.headers.get("access-control-allow-origin")
assert hdr['Access-Control-Request-Headers'] == resp.headers.get("Access-Control-Allow-Headers")
assert "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT" == resp.headers.get("access-control-allow-methods")
assert 201 == resp.status_code
resp = httpx.options(SRV_8001 + '/cors-request-overridden', headers=hdr)
assert 401 == resp.status_code
resp = httpx.options(SRV_8001 + '/nonexistent', headers=hdr)
assert 404 == resp.status_code
resp = httpx.options(SRV_8001 + '/cors-request')
assert 404 == resp.status_code
@pytest.mark.parametrize(('config'), [
'configs/yaml/hbs/core/random.yaml'
])
def test_404_image(self, config):
self.mock_server_process = run_mock_server(get_config_path(config))
resp = httpx.get(SRV_8001 + '/404-img.png')
assert 404 == resp.status_code
assert b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A' == resp.content[:8]
assert resp.headers['Content-Type'] == 'image/png'
@pytest.mark.parametrize(('config'), [
'configs/yaml/hbs/body/config.yaml'
])
def test_data_dir_override(self, config):
os.environ['%s_DATA_DIR' % PROGRAM.upper()] = 'tests/configs/yaml/hbs/data_dir_override'
self.mock_server_process = run_mock_server(get_config_path(config))
TestBody.test_body_json_schema(TestBody, config)
del os.environ['%s_DATA_DIR' % PROGRAM.upper()]
@pytest.mark.parametrize(('config'), [
'configs/json/hbs/status/status_code.json'
])
class TestStatus():
def setup_method(self):
config = self._item.callspec.getparam('config')
self.mock_server_process = run_mock_server(get_config_path(config))
def teardown_method(self):
self.mock_server_process.terminate()
def test_status_code(self, config):
resp = httpx.get(SRV_8001 + '/service1', headers={'Host': SRV_8001_HOST})
assert 202 == resp.status_code
assert 'Content-Type' not in resp.headers
assert resp.text == 'service1'
resp = httpx.get(SRV_8002 + '/service2', headers={'Host': SRV_8002_HOST})
assert 403 == resp.status_code
assert 'Content-Type' not in resp.headers
assert resp.text == 'service2'
def test_status_code_templated(self, config):
query = '?rc=303'
resp = httpx.get(SRV_8002 + '/service2-endpoint2' + query, headers={'Host': SRV_8002_HOST})
assert 303 == resp.status_code
query = '?rc=wrong'
resp = httpx.get(SRV_8002 + '/service2-endpoint2' + query, headers={'Host': SRV_8002_HOST})
assert 500 == resp.status_code
assert 'Status code is neither an integer nor in \'RST\', \'FIN\'!' == resp.text
@pytest.mark.parametrize(('config'), [
'configs/json/hbs/headers/config.json'
])
class TestHeaders():
def setup_method(self):
config = self._item.callspec.getparam('config')
self.mock_server_process = run_mock_server(get_config_path(config))
def teardown_method(self):
self.mock_server_process.terminate()
def test_parameter(self, config):
param = str(int(time.time()))
resp = httpx.get(SRV_8001 + '/parameter', headers={"hdr1": param})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.text == 'matched with parameter: %s' % param
resp = httpx.get(SRV_8001 + '/parameter/template-file', headers={"hdr1": param})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert data['matched with parameter'] == param
def test_static_value(self, config):
static_val = 'myValue'
resp = httpx.get(SRV_8001 + '/static-value', headers={"hdr1": static_val})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.text == 'matched with static value: %s' % static_val
resp = httpx.get(SRV_8001 + '/static-value/template-file', headers={"hdr1": static_val})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert data['matched with static value'] == static_val
def test_regex_capture_group(self, config):
param = str(int(time.time()))
resp = httpx.get(SRV_8001 + '/regex-capture-group', headers={"hdr1": 'prefix-%s-suffix' % param})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.text == 'matched with regex capture group: %s' % param
resp = httpx.get(SRV_8001 + '/regex-capture-group/template-file', headers={"hdr1": 'prefix-%s-suffix' % param})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert data['matched with regex capture group'] == param
def test_missing_header_should_400(self, config):
static_val = 'myValue'
resp = httpx.get(SRV_8001 + '/static-value', headers={"hdrX": static_val})
assert 400 == resp.status_code
assert '\'Hdr1\' not in the request headers!' == resp.text
resp = httpx.get(SRV_8001 + '/static-value/template-file', headers={"hdrX": static_val})
assert 400 == resp.status_code
assert '\'Hdr1\' not in the request headers!' == resp.text
def test_wrong_static_value_should_400(self, config):
static_val = 'wrongValue'
resp = httpx.get(SRV_8001 + '/static-value', headers={"hdr1": static_val})
assert 400 == resp.status_code
assert ('Request header value %r on key \'Hdr1\' does not match to regex: ^myValue$') % static_val == resp.text
resp = httpx.get(SRV_8001 + '/static-value/template-file', headers={"hdr1": static_val})
assert 400 == resp.status_code
assert ('Request header value %r on key \'Hdr1\' does not match to regex: ^myValue$') % static_val == resp.text
def test_wrong_regex_pattern_should_400(self, config):
param = str(int(time.time()))
resp = httpx.get(SRV_8001 + '/regex-capture-group', headers={"hdr1": 'idefix-%s-suffix' % param})
assert 400 == resp.status_code
assert ('Request header value \'idefix-%s-suffix\' on key \'Hdr1\' does not match to regex: ^prefix-(.+)-suffix$' % param) == resp.text
resp = httpx.get(SRV_8001 + '/regex-capture-group/template-file', headers={"hdr1": 'idefix-%s-suffix' % param})
assert 400 == resp.status_code
assert ('Request header value \'idefix-%s-suffix\' on key \'Hdr1\' does not match to regex: ^prefix-(.+)-suffix$' % param) == resp.text
def test_first_alternative(self, config):
static_val = 'myValue'
param2 = str(int(time.time()))
param3 = str(int(time.time() / 2))
resp = httpx.get(SRV_8001 + '/alternative', headers={
"hdr1": static_val,
"hdr2": param2,
"hdr3": 'prefix-%s-suffix' % param3
})
assert 201 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.text == 'headers match: %s %s %s' % (static_val, param2, param3)
resp = httpx.get(SRV_8001 + '/alternative/template-file', headers={
"hdr1": static_val,
"hdr2": param2,
"hdr3": 'prefix-%s-suffix' % param3
})
assert 201 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert data['request.headers.hdr1'] == static_val
assert data['anyValIntoVar'] == param2
assert data['capturedVar'] == param3
def test_second_alternative(self, config):
static_val = 'another header'
resp = httpx.get(SRV_8001 + '/alternative', headers={
"hdr4": static_val
})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.text == 'hdr4 request header: %s' % static_val
resp = httpx.get(SRV_8001 + '/alternative/template-file', headers={
"hdr4": static_val
})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
data = resp.json()
assert data['hdr4 request header'] == static_val
def test_nonexisting_alternative_should_400(self, config):
static_val = 'another header'
resp = httpx.get(SRV_8001 + '/alternative', headers={
"hdr5": static_val
})
assert 400 == resp.status_code
assert '\'Hdr4\' not in the request headers!' == resp.text
resp = httpx.get(SRV_8001 + '/alternative/template-file', headers={
"hdr5": static_val
})
assert 400 == resp.status_code
assert '\'Hdr4\' not in the request headers!' == resp.text
def test_response_headers_in_first_alternative(self, config):
static_val = 'myValue'
param2 = str(int(time.time()))
param3 = str(int(time.time() / 2))
resp = httpx.get(SRV_8001 + '/alternative', headers={
"hdr1": static_val,
"hdr2": param2,
"hdr3": 'prefix-%s-suffix' % param3
})
assert 201 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.cookies['name1'] == param2
assert resp.cookies['name2'] == 'prefix-%s-suffix' % param3
resp = httpx.get(SRV_8001 + '/alternative/template-file', headers={
"hdr1": static_val,
"hdr2": param2,
"hdr3": 'prefix-%s-suffix' % param3
})
assert 201 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
assert resp.cookies['name1'] == param2
assert resp.cookies['name2'] == 'prefix-%s-suffix' % param3
def test_response_headers_in_second_alternative(self, config):
static_val = 'another header'
resp = httpx.get(SRV_8001 + '/alternative', headers={
"hdr4": static_val
})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'text/html; charset=UTF-8'
assert resp.headers['Hdr4'] == 'hdr4 request header: %s' % static_val
resp = httpx.get(SRV_8001 + '/alternative/template-file', headers={
"hdr4": static_val
})
assert 200 == resp.status_code
assert resp.headers['Content-Type'] == 'application/json; charset=UTF-8'
| |
<filename>open_cp/gui/predictors/stscan.py
"""
stscan
~~~~~~
Uses the "space/time scan statistic" method of prediction.
Things we can vary:
- How far back in time to look
- The bandwidth/cutoff rules for clusters
- Quantise space and/or time?
- Expand clusters to their maximum size?
Pending further work, we shall not worry about:
- Pass via a "continuous" prediction first
"""
from . import predictor
import open_cp.stscan
import tkinter as tk
import tkinter.ttk as ttk
import open_cp.gui.tk.util as util
import open_cp.gui.tk.richtext as richtext
import open_cp.gui.tk.tooltips as tooltips
import numpy as np
import datetime
import enum
import logging
_text = {
"main" : ("Space/Time scan Predictor.\n\n"
+ "A statistical test, as implemented in the SaTScan software package, is used to identify possible "
+ "'clusters' of events in space and time. We use the algorithm in 'prediction' mode whereby only "
+ "clusters extending to the current time are considered. We then form a grid based prediction by "
+ "marking the first cluster as being at 'risk', then marking the second cluster as being of slightly "
+ "lower risk, and so on. Currently, we only assign 'risk' to a grid cell if the centre of the grid "
+ "is in the cluster disk. As such, very small disks may give rise to very small grid-based 'disks'. "
+ "There is the option to 'expand disks' to a larger radius but so that the contained set of events "
+ "does not change. This is likely only to make a difference if you have selected a 'grid' option "
+ "in the 'Quantise data' panel.\n"
+ "Each cluster is a 'space/time cylinder'. It encompases a spatial region which is a disk, and time region which is an interval.\n"
+ "Training Data usage: Ignored. For each prediction point, all data in a 'window' before the prediction point is used."
),
"no_data" : "No data points found in time range: have enough crime types been selected?",
"tw" : "Time Window",
"tw1" : "Use data from start of training range",
"tw1tt" : "For each prediction, all the data from the start of the 'training' time range up to the prediction date is used",
"tw2" : "Window from prediction date of",
"tw2tt" : "For each prediction, only data in this length of time 'window' before the prediction date is used",
"tw3" : "Days",
"qu" : "Quantise data",
"quu" : "Time bin length",
"quutt" : "The length of each 'time bin' to put timestamps into. For example, if '24 hours' then each timestamp is assigned to the day it falls in.",
"qu1" : "Use data as is",
"qu1tt" : "Do not do anything to the input data",
"qu2" : "Grid data",
"qu2tt" : "Move the location of each event to the centre of the grid cell it falls in",
"qu3" : "Bin time",
"qu3tt" : "Convert timestamp to be the same for each 'bin'",
"qu4" : "Both",
"qu4tt" : "Grid locations and bin time",
"qu5" : "Length",
"qu6" : "Hours",
"opt1" : "Maximum cluster size",
"opt2" : "Space population limit:",
"opt2tt" : "No cluster can contain more than this percentage of the total data (here just considering the spatial disk, ignoring all timestamps)",
"optper" : "%",
"opt3" : "Space radius limit:",
"opt3tt" : "No cluster can have a radius greater than this",
"optm" : "Meters",
"opt4" : "Time population limit:",
"opt4tt" : "No cluster can contain more than this percentage of the total data (here just considering the time intrval, ignoraing all event coordinates)",
"opt5" : "Time length limit:",
"opt5tt" : "No cluster can have a time length greater than this",
"optdays" : "Days",
"clop" : "Cluster options",
"clop1" : "Use as is",
"clop1tt" : "Use the clusters as found",
"clop2" : "Expand",
"clop2tt" : "Expand clusters to the maximum radii subject to them not containing further events",
}
class STScan(predictor.Predictor):
def __init__(self, model):
super().__init__(model)
self.time_window_choice = 1
self.time_window_length = datetime.timedelta(days=60)
self.quantisation_choice = 1
self.time_bin_length = datetime.timedelta(days=1)
self.geographic_population_limit = 50
self.time_population_limit = 50
self.geographic_radius_limit = 3000
self.time_max_interval = datetime.timedelta(days=60)
self.cluster_option = 1
@staticmethod
def describe():
return "Space Time Scan Predictor"
@staticmethod
def order():
return predictor._TYPE_GRID_PREDICTOR
def config(self):
return {}
def make_view(self, parent):
return STScanView(parent, self)
@property
def name(self):
return "Space Time Scan Predictor"
@property
def settings_string(self):
out = ""
if self._time_window_choice == self.TimeWindowChoice.window:
days = int(self.time_window_length / datetime.timedelta(days=1))
out += "<={}days ".format(days)
if (self.quantisation_choice == self.QuantiseDataChoice.space.value or
self.quantisation_choice == self.QuantiseDataChoice.both.value):
out += "grid "
if (self.quantisation_choice == self.QuantiseDataChoice.time.value or
self.quantisation_choice == self.QuantiseDataChoice.both.value):
hours = int(self.time_bin_length / datetime.timedelta(hours=1))
out += "bins({}hours) ".format(hours)
out += "geo({}%/{}m) ".format(int(self.geographic_population_limit),
self.geographic_radius_limit)
days = int(self.time_max_interval / datetime.timedelta(days=1))
out += "time({}%/{}days)".format(int(self.time_population_limit),days)
if self.cluster_option == self.ClusterOption.expand.value:
out += " max"
return out
def make_tasks(self):
return [self.Task(self)]
def to_dict(self):
return {
"time_window_choice" : self.time_window_choice,
"time_window_length" : self.time_window_length.total_seconds(),
"quantisation_choice" : self.quantisation_choice,
"time_bin_length" : self.time_bin_length.total_seconds(),
"geographic_population_limit" : self.geographic_population_limit,
"time_population_limit" : self.time_population_limit,
"geographic_radius_limit" : self.geographic_radius_limit,
"time_max_interval" : self.time_max_interval.total_seconds(),
"cluster_option" : self.cluster_option
}
def from_dict(self, data):
self.time_window_choice = int(data["time_window_choice"])
self.time_window_length = datetime.timedelta(seconds = int(data["time_window_length"]))
self.quantisation_choice = int(data["quantisation_choice"])
self.time_bin_length = datetime.timedelta(seconds = int(data["time_bin_length"]))
self.geographic_population_limit = int(data["geographic_population_limit"])
self.time_population_limit = int(data["time_population_limit"])
self.geographic_radius_limit = int(data["geographic_radius_limit"])
self.time_max_interval = datetime.timedelta(seconds = int(data["time_max_interval"]))
self.cluster_option = int(data["cluster_option"])
class TimeWindowChoice(enum.Enum):
from_training = 1
window = 2
@property
def time_window_choice(self):
return self._time_window_choice.value
@time_window_choice.setter
def time_window_choice(self, value):
self._time_window_choice = self.TimeWindowChoice(value)
@property
def time_window_length(self):
return self._time_window_length
@time_window_length.setter
def time_window_length(self, value):
self._time_window_length = value
class QuantiseDataChoice(enum.Enum):
none = 1
space = 2
time = 3
both = 4
@property
def quantisation_choice(self):
return self._quan_choice.value
@quantisation_choice.setter
def quantisation_choice(self, value):
self._quan_choice = self.QuantiseDataChoice(value)
class ClusterOption(enum.Enum):
none = 1
expand = 2
@property
def cluster_option(self):
return self._cluster_option.value
@cluster_option.setter
def cluster_option(self, value):
self._cluster_option = self.ClusterOption(value)
@property
def time_bin_length(self):
return self._time_bin
@time_bin_length.setter
def time_bin_length(self, value):
self._time_bin = value
@property
def geographic_population_limit(self):
return self._geo_pop_limit * 100
@geographic_population_limit.setter
def geographic_population_limit(self, value):
if value < 0 or value > 100:
raise ValueError()
self._geo_pop_limit = value / 100
@property
def geographic_radius_limit(self):
return self._geo_radius
@geographic_radius_limit.setter
def geographic_radius_limit(self, value):
self._geo_radius = value
@property
def time_population_limit(self):
return self._time_pop_limit * 100
@time_population_limit.setter
def time_population_limit(self, value):
if value < 0 or value > 100:
raise ValueError()
self._time_pop_limit = value / 100
@property
def time_max_interval(self):
return self._time_max_interval
@time_max_interval.setter
def time_max_interval(self, value):
self._time_max_interval = value
class Task(predictor.GridPredictorTask):
def __init__(self, model):
super().__init__()
self._geo_pop_limit_perc = model.geographic_population_limit
self._time_pop_limit_perc = model.time_population_limit
self._geo_radius = model.geographic_radius_limit
self._time_max = model.time_max_interval
self._start_time_option = model.time_window_choice
self._start_time_window = model.time_window_length
self._quant_choice = model.quantisation_choice
self._quant_bin_length = model.time_bin_length
self._max_clusters = model.cluster_option == STScan.ClusterOption.expand.value
def _points_to_centre_grid(self, timed_points, grid):
return open_cp.stscan.grid_timed_points(timed_points,
grid.region(), grid.xsize)
def __call__(self, analysis_model, grid_task, project_task):
timed_points = self.projected_data(analysis_model, project_task)
if timed_points.number_data_points == 0:
raise predictor.PredictionError(_text["no_data"])
grid = grid_task(timed_points)
if self._start_time_option == 1:
start = analysis_model.time_range[0]
timed_points = timed_points[timed_points.timestamps >= start]
time_window = None
elif self._start_time_option == 2:
time_window = self._start_time_window
else:
raise ValueError()
if timed_points.number_data_points == 0:
raise predictor.PredictionError(_text["no_data"])
if self._quant_choice == 2 or self._quant_choice == 4:
timed_points = self._points_to_centre_grid(timed_points, grid)
bin_length = None
if self._quant_choice == 3 or self._quant_choice == 4:
bin_length = self._quant_bin_length
return STScan.SubTask(timed_points, grid, self, time_window,
bin_length, self._max_clusters)
class SubTask(predictor.SingleGridPredictor):
def __init__(self, timed_points, grid, task, time_window,
time_bin_length, max_clusters):
# Is memory intensive, but should be okay except for huge datasets
# (which are prohibitively slow anyway....)
super().__init__(True)
self.grid_size = grid.xsize
self.predictor = open_cp.stscan.STSTrainer()
self.predictor.region = grid.region()
self.predictor.geographic_population_limit = task._geo_pop_limit_perc / 100
self.predictor.geographic_radius_limit = task._geo_radius
self.predictor.time_population_limit = task._time_pop_limit_perc / 100
self.predictor.time_max_interval = np.timedelta64(task._time_max)
self._time_window = time_window
self._timed_points = timed_points
self._bin_length = time_bin_length
self._max_clusters = max_clusters
def __call__(self, predict_time, length=None):
self.predictor.data = self._timed_points
if self._time_window is not None:
mask = self._timed_points.timestamps >= predict_time - self._time_window
self.predictor.data = self._timed_points[mask]
predict_time = np.datetime64(predict_time)
if self._bin_length is not None:
self.predictor.data = open_cp.stscan.bin_timestamps(self.predictor.data,
predict_time, self._bin_length)
result = self.predictor.predict(time = predict_time)
return result.grid_prediction(self.grid_size,
use_maximal_clusters=self._max_clusters)
class STScanView(tk.Frame):
def __init__(self, parent, model):
self._model = model
super().__init__(parent)
self._text = richtext.RichText(self, height=12, scroll="v")
self._text.grid(sticky=tk.NSEW, row=0, column=0)
self._text.add_text(_text["main"])
frame = tk.Frame(self)
frame.grid(sticky=tk.NSEW, row=1, column=0)
self.add_widgets(frame)
self.update()
def add_widgets(self, frame):
subframe = ttk.LabelFrame(frame, text=_text["tw"])
subframe.grid(row=0, column=0, padx=2, pady=2, sticky=tk.NSEW)
self._time_window_option = tk.IntVar()
rb = ttk.Radiobutton(subframe, text=_text["tw1"], variable=self._time_window_option,
value=STScan.TimeWindowChoice.from_training.value, command=self._time_window_option_changed)
rb.grid(row=0, column=0, padx=2, pady=2, sticky=tk.W)
tooltips.ToolTipYellow(rb, _text["tw1tt"])
subsubframe = ttk.Frame(subframe)
| |
import ctypes
import numpy
import sys
import os
import os.path
from numpy.compat import asbytes, asstr
def _generate_candidate_libs():
# look for likely library files in the following dirs:
lib_dirs = [os.path.dirname(__file__),
'/lib',
'/usr/lib',
'/usr/local/lib',
'/opt/local/lib',
os.path.join(sys.prefix, 'lib'),
os.path.join(sys.prefix, 'DLLs')
]
if 'HOME' in os.environ:
lib_dirs.append(os.path.join(os.environ['HOME'], 'lib'))
lib_dirs = [ld for ld in lib_dirs if os.path.exists(ld)]
lib_names = ['libfreeimage', 'freeimage'] # should be lower-case!
# Now attempt to find libraries of that name in the given directory
# (case-insensitive and without regard for extension)
lib_paths = []
for lib_dir in lib_dirs:
for lib_name in lib_names:
files = os.listdir(lib_dir)
lib_paths += [os.path.join(lib_dir, lib) for lib in files
if lib.lower().startswith(lib_name) and not
os.path.splitext(lib)[1] in ('.py', '.pyc', '.ini')]
lib_paths = [lp for lp in lib_paths if os.path.exists(lp)]
return lib_dirs, lib_paths
if sys.platform == 'win32':
LOADER = ctypes.windll
FUNCTYPE = ctypes.WINFUNCTYPE
else:
LOADER = ctypes.cdll
FUNCTYPE = ctypes.CFUNCTYPE
def handle_errors():
global FT_ERROR_STR
if FT_ERROR_STR:
tmp = FT_ERROR_STR
FT_ERROR_STR = None
raise RuntimeError(tmp)
FT_ERROR_STR = None
# This MUST happen in module scope, or the function pointer is garbage
# collected, leading to a segfault when error_handler is called.
@FUNCTYPE(None, ctypes.c_int, ctypes.c_char_p)
def c_error_handler(fif, message):
global FT_ERROR_STR
FT_ERROR_STR = 'FreeImage error: %s' % message
def load_freeimage():
freeimage = None
errors = []
# First try a few bare library names that ctypes might be able to find
# in the default locations for each platform. Win DLL names don't need the
# extension, but other platforms do.
bare_libs = ['FreeImage', 'libfreeimage.dylib', 'libfreeimage.so',
'libfreeimage.so.3']
lib_dirs, lib_paths = _generate_candidate_libs()
lib_paths = bare_libs + lib_paths
for lib in lib_paths:
try:
freeimage = LOADER.LoadLibrary(lib)
break
except Exception:
if lib not in bare_libs:
# Don't record errors when it couldn't load the library from
# a bare name -- this fails often, and doesn't provide any
# useful debugging information anyway, beyond "couldn't find
# library..."
# Get exception instance in Python 2.x/3.x compatible manner
e_type, e_value, e_tb = sys.exc_info()
del e_tb
errors.append((lib, e_value))
if freeimage is None:
if errors:
# No freeimage library loaded, and load-errors reported for some
# candidate libs
err_txt = ['%s:\n%s' % (l, str(e)) for l, e in errors]
raise RuntimeError('One or more FreeImage libraries were found, '
'but could not be loaded due to the following '
'errors:\n' +
'\n'.join(err_txt))
else:
# No errors, because no potential libraries found at all!
raise RuntimeError('Could not find a FreeImage library in any of:'
'\n' +
'\n'.join(lib_dirs))
# FreeImage found
freeimage.FreeImage_SetOutputMessage(c_error_handler)
return freeimage
_FI = load_freeimage()
API = {
# All we're doing here is telling ctypes that some of the FreeImage
# functions return pointers instead of integers. (On 64-bit systems,
# without this information the pointers get truncated and crashes result).
# There's no need to list functions that return ints, or the types of the
# parameters to these or other functions -- that's fine to do implicitly.
# Note that the ctypes immediately converts the returned void_p back to a
# python int again! This is really not helpful, because then passing it
# back to another library call will cause truncation-to-32-bits on 64-bit
# systems. Thanks, ctypes! So after these calls one must immediately
# re-wrap the int as a c_void_p if it is to be passed back into FreeImage.
'FreeImage_AllocateT': (ctypes.c_void_p, None),
'FreeImage_FindFirstMetadata': (ctypes.c_void_p, None),
'FreeImage_GetBits': (ctypes.c_void_p, None),
'FreeImage_GetPalette': (ctypes.c_void_p, None),
'FreeImage_GetTagKey': (ctypes.c_char_p, None),
'FreeImage_GetTagValue': (ctypes.c_void_p, None),
'FreeImage_Load': (ctypes.c_void_p, None),
'FreeImage_LockPage': (ctypes.c_void_p, None),
'FreeImage_OpenMultiBitmap': (ctypes.c_void_p, None)
}
# Albert's ctypes pattern
def register_api(lib, api):
for f, (restype, argtypes) in api.items():
func = getattr(lib, f)
func.restype = restype
func.argtypes = argtypes
register_api(_FI, API)
class FiTypes(object):
FIT_UNKNOWN = 0
FIT_BITMAP = 1
FIT_UINT16 = 2
FIT_INT16 = 3
FIT_UINT32 = 4
FIT_INT32 = 5
FIT_FLOAT = 6
FIT_DOUBLE = 7
FIT_COMPLEX = 8
FIT_RGB16 = 9
FIT_RGBA16 = 10
FIT_RGBF = 11
FIT_RGBAF = 12
dtypes = {FIT_BITMAP: numpy.uint8,
FIT_UINT16: numpy.uint16,
FIT_INT16: numpy.int16,
FIT_UINT32: numpy.uint32,
FIT_INT32: numpy.int32,
FIT_FLOAT: numpy.float32,
FIT_DOUBLE: numpy.float64,
FIT_COMPLEX: numpy.complex128,
FIT_RGB16: numpy.uint16,
FIT_RGBA16: numpy.uint16,
FIT_RGBF: numpy.float32,
FIT_RGBAF: numpy.float32,
}
fi_types = {(numpy.dtype('uint8'), 1): FIT_BITMAP,
(numpy.dtype('uint8'), 3): FIT_BITMAP,
(numpy.dtype('uint8'), 4): FIT_BITMAP,
(numpy.dtype('uint16'), 1): FIT_UINT16,
(numpy.dtype('int16'), 1): FIT_INT16,
(numpy.dtype('uint32'), 1): FIT_UINT32,
(numpy.dtype('int32'), 1): FIT_INT32,
(numpy.dtype('float32'), 1): FIT_FLOAT,
(numpy.dtype('float64'), 1): FIT_DOUBLE,
(numpy.dtype('complex128'), 1): FIT_COMPLEX,
(numpy.dtype('uint16'), 3): FIT_RGB16,
(numpy.dtype('uint16'), 4): FIT_RGBA16,
(numpy.dtype('float32'), 3): FIT_RGBF,
(numpy.dtype('float32'), 4): FIT_RGBAF,
}
extra_dims = {FIT_UINT16: [],
FIT_INT16: [],
FIT_UINT32: [],
FIT_INT32: [],
FIT_FLOAT: [],
FIT_DOUBLE: [],
FIT_COMPLEX: [],
FIT_RGB16: [3],
FIT_RGBA16: [4],
FIT_RGBF: [3],
FIT_RGBAF: [4],
}
@classmethod
def get_type_and_shape(cls, bitmap):
w = _FI.FreeImage_GetWidth(bitmap)
handle_errors()
h = _FI.FreeImage_GetHeight(bitmap)
handle_errors()
fi_type = _FI.FreeImage_GetImageType(bitmap)
handle_errors()
if not fi_type:
raise ValueError('Unknown image pixel type')
dtype = cls.dtypes[fi_type]
if fi_type == cls.FIT_BITMAP:
bpp = _FI.FreeImage_GetBPP(bitmap)
handle_errors()
if bpp == 8:
extra_dims = []
elif bpp == 24:
extra_dims = [3]
elif bpp == 32:
extra_dims = [4]
else:
raise ValueError('Cannot convert %d BPP bitmap' % bpp)
else:
extra_dims = cls.extra_dims[fi_type]
return numpy.dtype(dtype), extra_dims + [w, h]
class IoFlags(object):
# loading: load the image header only (not supported by all plugins)
FIF_LOAD_NOPIXELS = 0x8000
BMP_DEFAULT = 0
BMP_SAVE_RLE = 1
CUT_DEFAULT = 0
DDS_DEFAULT = 0
EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression
EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended)
EXR_NONE = 0x0002 # save with no compression
EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines
EXR_PIZ = 0x0008 # save with piz-based wavelet compression
EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression
# save with lossy 44% float compression (22% when combined with EXR_LC)
EXR_B44 = 0x0020
# one luminance and two chroma channels rather than as RGB (lossy)
EXR_LC = 0x0040
FAXG3_DEFAULT = 0
GIF_DEFAULT = 0
# Load as 256 color image with ununsed palette entries if 16 or 2 color
GIF_LOAD256 = 1
# 'Play' the GIF generating each frame (as 32bpp) instead of raw frame data
GIF_PLAYBACK = 2
HDR_DEFAULT = 0
ICO_DEFAULT = 0
# convert to 32bpp then add an alpha channel from the AND-mask when loading
ICO_MAKEALPHA = 1
IFF_DEFAULT = 0
J2K_DEFAULT = 0 # save with a 16:1 rate
JP2_DEFAULT = 0 # save with a 16:1 rate
# loading (see JPEG_FAST)
# saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)
JPEG_DEFAULT = 0
# load the file as fast as possible, sacrificing some quality
JPEG_FAST = 0x0001
# load the file with the best quality, sacrificing some speed
JPEG_ACCURATE = 0x0002
# load separated CMYK "as is" (use | to combine with other load flags)
JPEG_CMYK = 0x0004
# load and rotate according to Exif 'Orientation' tag if available
JPEG_EXIFROTATE = 0x0008
JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1)
JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1)
JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1)
JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1)
JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1)
# save as a progressive-JPEG (use | to combine with other save flags)
JPEG_PROGRESSIVE = 0x2000
# save with high 4x1 chroma subsampling (4:1:1)
JPEG_SUBSAMPLING_411 = 0x1000
# save with medium 2x2 medium chroma subsampling (4:2:0) - default value
JPEG_SUBSAMPLING_420 = 0x4000
# save with low 2x1 chroma subsampling (4:2:2)
JPEG_SUBSAMPLING_422 = 0x8000
JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4)
# compute optimal Huffman coding tables (can reduce file size a few %)
JPEG_OPTIMIZE = 0x20000 # on saving,
JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers
KOALA_DEFAULT = 0
LBM_DEFAULT = 0
MNG_DEFAULT = 0
PCD_DEFAULT = 0
PCD_BASE = 1 # load the bitmap sized 768 x 512
PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256
PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128
PCX_DEFAULT = 0
PFM_DEFAULT = 0
PICT_DEFAULT = 0
PNG_DEFAULT = 0
PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction
# save using ZLib level 1 compression flag (default value is 6)
PNG_Z_BEST_SPEED = 0x0001
# save | |
name="MLST",
dtype=ChannelType.LONG,
doc="Last Val Monitored",
read_only=True,
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
output_mode_select = pvproperty(
name="OMSL",
dtype=ChannelType.ENUM,
enum_strings=menus.menuOmsl.get_string_tuple(),
doc="Output Mode Select",
)
output_specification = pvproperty(
name="OUT", dtype=ChannelType.STRING, doc="Output Specification"
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode Async Delay",
value=-1.0,
)
simulation_mode_link = pvproperty(
name="SIML", dtype=ChannelType.STRING, doc="Simulation Mode Link"
)
simulation_mode = pvproperty(
name="SIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuYesNo.get_string_tuple(),
doc="Simulation Mode",
)
simulation_mode_severity = pvproperty(
name="SIMS",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Simulation Mode Severity",
)
simulation_output_link = pvproperty(
name="SIOL", dtype=ChannelType.STRING, doc="Simulation Output Link"
)
sim_mode_scan = pvproperty(
name="SSCN",
dtype=ChannelType.ENUM,
enum_strings=menus.menuScan.get_string_tuple(),
doc="Sim. Mode Scan",
value=0,
)
# desired_output = pvproperty(name='VAL',
# dtype=ChannelType.LONG,
# doc='Desired Output')
link_parent_attribute(archive_deadband, "log_atol", use_setattr=True)
link_parent_attribute(monitor_deadband, "value_atol", use_setattr=True)
class LonginFields(RecordFieldGroup, _LimitsLong):
_record_type = "longin"
_dtype = ChannelType.LONG # DTYP of .VAL
has_val_field = True
copy_pvproperties(locals(), RecordFieldGroup, _LimitsLong)
device_type = pvproperty(
name="DTYP",
dtype=ChannelType.ENUM,
enum_strings=menus.dtyp_longin.get_string_tuple(),
doc="Device Type",
)
archive_deadband = pvproperty(
name="ADEL", dtype=ChannelType.LONG, doc="Archive Deadband"
)
alarm_filter_time_constant = pvproperty(
name="AFTC", dtype=ChannelType.DOUBLE, doc="Alarm Filter Time Constant"
)
alarm_filter_value = pvproperty(
name="AFVL",
dtype=ChannelType.DOUBLE,
doc="Alarm Filter Value",
read_only=True,
)
last_value_archived = pvproperty(
name="ALST",
dtype=ChannelType.LONG,
doc="Last Value Archived",
read_only=True,
)
engineering_units = pvproperty(
name="EGU",
dtype=ChannelType.CHAR,
max_length=16,
report_as_string=True,
doc="Engineering Units",
)
alarm_deadband = pvproperty(
name="HYST", dtype=ChannelType.LONG, doc="Alarm Deadband"
)
input_specification = pvproperty(
name="INP", dtype=ChannelType.STRING, doc="Input Specification"
)
last_value_alarmed = pvproperty(
name="LALM",
dtype=ChannelType.LONG,
doc="Last Value Alarmed",
read_only=True,
)
monitor_deadband = pvproperty(
name="MDEL", dtype=ChannelType.LONG, doc="Monitor Deadband"
)
last_val_monitored = pvproperty(
name="MLST",
dtype=ChannelType.LONG,
doc="Last Val Monitored",
read_only=True,
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode Async Delay",
value=-1.0,
)
sim_mode_location = pvproperty(
name="SIML", dtype=ChannelType.STRING, doc="Sim Mode Location"
)
simulation_mode = pvproperty(
name="SIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuYesNo.get_string_tuple(),
doc="Simulation Mode",
)
sim_mode_alarm_svrty = pvproperty(
name="SIMS",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Sim mode Alarm Svrty",
)
sim_input_specifctn = pvproperty(
name="SIOL", dtype=ChannelType.STRING, doc="Sim Input Specifctn"
)
sim_mode_scan = pvproperty(
name="SSCN",
dtype=ChannelType.ENUM,
enum_strings=menus.menuScan.get_string_tuple(),
doc="Sim. Mode Scan",
value=0,
)
simulation_value = pvproperty(
name="SVAL", dtype=ChannelType.LONG, doc="Simulation Value"
)
# current_value = pvproperty(name='VAL',
# dtype=ChannelType.LONG,
# doc='Current value')
link_parent_attribute(archive_deadband, "log_atol", use_setattr=True)
link_parent_attribute(monitor_deadband, "value_atol", use_setattr=True)
class LongoutFields(RecordFieldGroup, _LimitsLong):
_record_type = "longout"
_dtype = ChannelType.LONG # DTYP of .VAL
has_val_field = True
copy_pvproperties(locals(), RecordFieldGroup, _LimitsLong)
device_type = pvproperty(
name="DTYP",
dtype=ChannelType.ENUM,
enum_strings=menus.dtyp_longout.get_string_tuple(),
doc="Device Type",
)
archive_deadband = pvproperty(
name="ADEL", dtype=ChannelType.LONG, doc="Archive Deadband"
)
last_value_archived = pvproperty(
name="ALST",
dtype=ChannelType.LONG,
doc="Last Value Archived",
read_only=True,
)
desired_output_loc = pvproperty(
name="DOL", dtype=ChannelType.STRING, doc="Desired Output Loc"
)
drive_high_limit = pvproperty(
name="DRVH", dtype=ChannelType.LONG, doc="Drive High Limit"
)
drive_low_limit = pvproperty(
name="DRVL", dtype=ChannelType.LONG, doc="Drive Low Limit"
)
engineering_units = pvproperty(
name="EGU",
dtype=ChannelType.CHAR,
max_length=16,
report_as_string=True,
doc="Engineering Units",
)
alarm_deadband = pvproperty(
name="HYST", dtype=ChannelType.LONG, doc="Alarm Deadband"
)
invalid_output_action = pvproperty(
name="IVOA",
dtype=ChannelType.ENUM,
enum_strings=menus.menuIvoa.get_string_tuple(),
doc="INVALID output action",
)
invalid_output_value = pvproperty(
name="IVOV", dtype=ChannelType.LONG, doc="INVALID output value"
)
last_value_alarmed = pvproperty(
name="LALM",
dtype=ChannelType.LONG,
doc="Last Value Alarmed",
read_only=True,
)
monitor_deadband = pvproperty(
name="MDEL", dtype=ChannelType.LONG, doc="Monitor Deadband"
)
last_val_monitored = pvproperty(
name="MLST",
dtype=ChannelType.LONG,
doc="Last Val Monitored",
read_only=True,
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
output_mode_select = pvproperty(
name="OMSL",
dtype=ChannelType.ENUM,
enum_strings=menus.menuOmsl.get_string_tuple(),
doc="Output Mode Select",
)
output_specification = pvproperty(
name="OUT", dtype=ChannelType.STRING, doc="Output Specification"
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode Async Delay",
value=-1.0,
)
sim_mode_location = pvproperty(
name="SIML", dtype=ChannelType.STRING, doc="Sim Mode Location"
)
simulation_mode = pvproperty(
name="SIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuYesNo.get_string_tuple(),
doc="Simulation Mode",
)
sim_mode_alarm_svrty = pvproperty(
name="SIMS",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Sim mode Alarm Svrty",
)
sim_output_specifctn = pvproperty(
name="SIOL", dtype=ChannelType.STRING, doc="Sim Output Specifctn"
)
sim_mode_scan = pvproperty(
name="SSCN",
dtype=ChannelType.ENUM,
enum_strings=menus.menuScan.get_string_tuple(),
doc="Sim. Mode Scan",
value=0,
)
# desired_output = pvproperty(name='VAL',
# dtype=ChannelType.LONG,
# doc='Desired Output')
link_parent_attribute(archive_deadband, "log_atol", use_setattr=True)
link_parent_attribute(monitor_deadband, "value_atol", use_setattr=True)
class LsiFields(RecordFieldGroup):
_record_type = "lsi"
_dtype = None # DTYP of .VAL
has_val_field = False
copy_pvproperties(locals(), RecordFieldGroup)
device_type = pvproperty(
name="DTYP",
dtype=ChannelType.ENUM,
enum_strings=menus.dtyp_lsi.get_string_tuple(),
doc="Device Type",
)
post_archive_monitors = pvproperty(
name="APST",
dtype=ChannelType.ENUM,
enum_strings=menus.menuPost.get_string_tuple(),
doc="Post Archive Monitors",
)
input_specification = pvproperty(
name="INP", dtype=ChannelType.STRING, doc="Input Specification"
)
length_of_val = pvproperty(
name="LEN", dtype=ChannelType.LONG, doc="Length of VAL", read_only=True
)
post_value_monitors = pvproperty(
name="MPST",
dtype=ChannelType.ENUM,
enum_strings=menus.menuPost.get_string_tuple(),
doc="Post Value Monitors",
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
length_of_oval = pvproperty(
name="OLEN",
dtype=ChannelType.LONG,
doc="Length of OVAL",
read_only=True,
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode Async Delay",
value=-1.0,
)
simulation_mode_link = pvproperty(
name="SIML", dtype=ChannelType.STRING, doc="Simulation Mode Link"
)
simulation_mode = pvproperty(
name="SIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuYesNo.get_string_tuple(),
doc="Simulation Mode",
)
simulation_mode_severity = pvproperty(
name="SIMS",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Simulation Mode Severity",
)
simulation_input_link = pvproperty(
name="SIOL", dtype=ChannelType.STRING, doc="Simulation Input Link"
)
size_of_buffers = pvproperty(
name="SIZV",
dtype=ChannelType.INT,
doc="Size of buffers",
read_only=True,
value=41,
)
sim_mode_scan = pvproperty(
name="SSCN",
dtype=ChannelType.ENUM,
enum_strings=menus.menuScan.get_string_tuple(),
doc="Sim. Mode Scan",
value=0,
)
class LsoFields(RecordFieldGroup):
_record_type = "lso"
_dtype = None # DTYP of .VAL
has_val_field = False
copy_pvproperties(locals(), RecordFieldGroup)
device_type = pvproperty(
name="DTYP",
dtype=ChannelType.ENUM,
enum_strings=menus.dtyp_lso.get_string_tuple(),
doc="Device Type",
)
post_archive_monitors = pvproperty(
name="APST",
dtype=ChannelType.ENUM,
enum_strings=menus.menuPost.get_string_tuple(),
doc="Post Archive Monitors",
)
desired_output_link = pvproperty(
name="DOL", dtype=ChannelType.STRING, doc="Desired Output Link"
)
invalid_output_action = pvproperty(
name="IVOA",
dtype=ChannelType.ENUM,
enum_strings=menus.menuIvoa.get_string_tuple(),
doc="INVALID Output Action",
)
invalid_output_value = pvproperty(
name="IVOV",
dtype=ChannelType.CHAR,
max_length=40,
report_as_string=True,
doc="INVALID Output Value",
)
length_of_val = pvproperty(
name="LEN", dtype=ChannelType.LONG, doc="Length of VAL", read_only=True
)
post_value_monitors = pvproperty(
name="MPST",
dtype=ChannelType.ENUM,
enum_strings=menus.menuPost.get_string_tuple(),
doc="Post Value Monitors",
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
length_of_oval = pvproperty(
name="OLEN",
dtype=ChannelType.LONG,
doc="Length of OVAL",
read_only=True,
)
output_mode_select = pvproperty(
name="OMSL",
dtype=ChannelType.ENUM,
enum_strings=menus.menuOmsl.get_string_tuple(),
doc="Output Mode Select",
)
output_specification = pvproperty(
name="OUT", dtype=ChannelType.STRING, doc="Output Specification"
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode Async Delay",
value=-1.0,
)
simulation_mode_link = pvproperty(
name="SIML", dtype=ChannelType.STRING, doc="Simulation Mode link"
)
simulation_mode = pvproperty(
name="SIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuYesNo.get_string_tuple(),
doc="Simulation Mode",
)
simulation_mode_severity = pvproperty(
name="SIMS",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Simulation Mode Severity",
)
simulation_output_link = pvproperty(
name="SIOL", dtype=ChannelType.STRING, doc="Simulation Output Link"
)
size_of_buffers = pvproperty(
name="SIZV",
dtype=ChannelType.INT,
doc="Size of buffers",
read_only=True,
value=41,
)
sim_mode_scan = pvproperty(
name="SSCN",
dtype=ChannelType.ENUM,
enum_strings=menus.menuScan.get_string_tuple(),
doc="Sim. Mode Scan",
value=0,
)
class MbbiFields(RecordFieldGroup):
_record_type = "mbbi"
_dtype = ChannelType.ENUM # DTYP of .VAL
has_val_field = True
copy_pvproperties(locals(), RecordFieldGroup)
device_type = pvproperty(
name="DTYP",
dtype=ChannelType.ENUM,
enum_strings=menus.dtyp_mbbi.get_string_tuple(),
doc="Device Type",
)
alarm_filter_time_constant = pvproperty(
name="AFTC", dtype=ChannelType.DOUBLE, doc="Alarm Filter Time Constant"
)
alarm_filter_value = pvproperty(
name="AFVL",
dtype=ChannelType.DOUBLE,
doc="Alarm Filter Value",
read_only=True,
)
change_of_state_svr = pvproperty(
name="COSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="Change of State Svr",
)
eight_string = pvproperty(
name="EIST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Eight String",
)
state_eight_severity = pvproperty(
name="EISV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Eight Severity",
)
eight_value = pvproperty(
name="EIVL", dtype=ChannelType.LONG, doc="Eight Value"
)
eleven_string = pvproperty(
name="ELST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Eleven String",
)
state_eleven_severity = pvproperty(
name="ELSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Eleven Severity",
)
eleven_value = pvproperty(
name="ELVL", dtype=ChannelType.LONG, doc="Eleven Value"
)
fifteen_string = pvproperty(
name="FFST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Fifteen String",
)
state_fifteen_severity = pvproperty(
name="FFSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Fifteen Severity",
)
fifteen_value = pvproperty(
name="FFVL", dtype=ChannelType.LONG, doc="Fifteen Value"
)
four_string = pvproperty(
name="FRST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Four String",
)
state_four_severity = pvproperty(
name="FRSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Four Severity",
)
four_value = pvproperty(
name="FRVL", dtype=ChannelType.LONG, doc="Four Value"
)
fourteen_string = pvproperty(
name="FTST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Fourteen String",
)
state_fourteen_sevr = pvproperty(
name="FTSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Fourteen Sevr",
)
fourteen_value = pvproperty(
name="FTVL", dtype=ChannelType.LONG, doc="Fourteen Value"
)
five_string = pvproperty(
name="FVST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Five String",
)
state_five_severity = pvproperty(
name="FVSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Five Severity",
)
five_value = pvproperty(
name="FVVL", dtype=ChannelType.LONG, doc="Five Value"
)
input_specification = pvproperty(
name="INP", dtype=ChannelType.STRING, doc="Input Specification"
)
last_value_alarmed = pvproperty(
name="LALM",
dtype=ChannelType.INT,
doc="Last Value Alarmed",
read_only=True,
)
hardware_mask = pvproperty(
name="MASK", dtype=ChannelType.LONG, doc="Hardware Mask", read_only=True
)
last_value_monitored = pvproperty(
name="MLST",
dtype=ChannelType.INT,
doc="Last Value Monitored",
read_only=True,
)
nine_string = pvproperty(
name="NIST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="Nine String",
)
state_nine_severity = pvproperty(
name="NISV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State Nine Severity",
)
nine_value = pvproperty(
name="NIVL", dtype=ChannelType.LONG, doc="Nine Value"
)
number_of_bits = pvproperty(
name="NOBT", dtype=ChannelType.INT, doc="Number of Bits", read_only=True
)
prev_simulation_mode = pvproperty(
name="OLDSIMM",
dtype=ChannelType.ENUM,
enum_strings=menus.menuSimm.get_string_tuple(),
doc="Prev. Simulation Mode",
read_only=True,
)
one_string = pvproperty(
name="ONST",
dtype=ChannelType.CHAR,
max_length=26,
report_as_string=True,
doc="One String",
)
state_one_severity = pvproperty(
name="ONSV",
dtype=ChannelType.ENUM,
enum_strings=menus.menuAlarmSevr.get_string_tuple(),
doc="State One Severity",
)
one_value = pvproperty(name="ONVL", dtype=ChannelType.LONG, doc="One Value")
prev_raw_value = pvproperty(
name="ORAW",
dtype=ChannelType.LONG,
doc="Prev Raw Value",
read_only=True,
)
raw_value = pvproperty(name="RVAL", dtype=ChannelType.LONG, doc="Raw Value")
states_defined = pvproperty(
name="SDEF", dtype=ChannelType.INT, doc="States Defined", read_only=True
)
sim_mode_async_delay = pvproperty(
name="SDLY",
dtype=ChannelType.DOUBLE,
doc="Sim. Mode | |
<reponame>brucepro/micropsi2
__author__ = 'rvuine'
import json
import os
import warnings
from micropsi_core.nodenet import monitor
from micropsi_core.nodenet.node import Nodetype
from micropsi_core.nodenet.nodenet import Nodenet, NODENET_VERSION, NodenetLockException
from .dict_stepoperators import DictPropagate, DictPORRETDecay, DictCalculate, DictDoernerianEmotionalModulators
from .dict_node import DictNode
from .dict_nodespace import DictNodespace
import copy
STANDARD_NODETYPES = {
"Nodespace": {
"name": "Nodespace"
},
"Comment": {
"name": "Comment",
"symbol": "#",
'parameters': ['comment'],
"shape": "Rectangle"
},
"Register": {
"name": "Register",
"slottypes": ["gen"],
"nodefunction_name": "register",
"gatetypes": ["gen"]
},
"Sensor": {
"name": "Sensor",
"parameters": ["datasource"],
"nodefunction_name": "sensor",
"gatetypes": ["gen"]
},
"Actor": {
"name": "Actor",
"parameters": ["datatarget"],
"nodefunction_name": "actor",
"slottypes": ["gen"],
"gatetypes": ["gen"]
},
"Concept": {
"name": "Concept",
"slottypes": ["gen"],
"nodefunction_name": "concept",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp", "sym", "ref"]
},
"Script": {
"name": "Script",
"slottypes": ["gen", "por", "ret", "sub", "sur"],
"nodefunction_name": "script",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp", "sym", "ref"],
"gate_defaults": {
"por": {
"threshold": -1
},
"ret": {
"threshold": -1
},
"sub": {
"threshold": -1
},
"sur": {
"threshold": -1
}
}
},
"Pipe": {
"name": "Pipe",
"slottypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp"],
"nodefunction_name": "pipe",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp"],
"gate_defaults": {
"gen": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"por": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"ret": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"sub": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": True
},
"sur": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"cat": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 1
},
"exp": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
}
},
'symbol': 'πp',
'shape': 'Rectangle'
},
"Trigger": {
"name": "Trigger",
"slottypes": ["gen", "sub", "sur"],
"nodefunction_name": "trigger",
"gatetypes": ["gen", "sub", "sur"],
"gate_defaults": {
"gen": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"sub": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
},
"sur": {
"minimum": -100,
"maximum": 100,
"threshold": -100,
"spreadsheaves": 0
}
},
"parameters": ["timeout", "condition", "response"],
"parameter_values": {
"condition": ["=", ">"]
}
},
"Activator": {
"name": "Activator",
"slottypes": ["gen"],
"parameters": ["type"],
"parameter_values": {"type": ["gen", "por", "ret", "sub", "sur", "cat", "exp", "sym", "ref"]},
"nodefunction_name": "activator"
}
}
class DictNodenet(Nodenet):
"""Main data structure for MicroPsi agents,
Contains the net entities and runs the activation spreading. The nodenet stores persistent data.
Attributes:
state: a dict of persistent nodenet data; everything stored within the state can be stored and exported
uid: a unique identifier for the node net
name: an optional name for the node net
filename: the path and file name to the file storing the persisted net data
nodespaces: a dictionary of node space UIDs and respective node spaces
nodes: a dictionary of node UIDs and respective nodes
links: a dictionary of link UIDs and respective links
gate_types: a dictionary of gate type names and the individual types of gates
slot_types: a dictionary of slot type names and the individual types of slots
node_types: a dictionary of node type names and node type definitions
world: an environment for the node net
worldadapter: an actual world adapter object residing in a world implementation, provides interface
owner: an id of the user who created the node net
step: the current simulation step of the node net
"""
@property
def data(self):
data = super(DictNodenet, self).data
data['links'] = self.construct_links_dict()
data['nodes'] = self.construct_nodes_dict()
for uid in data['nodes']:
data['nodes'][uid]['gate_parameters'] = self.get_node(uid).clone_non_default_gate_parameters()
data['nodespaces'] = self.construct_nodespaces_dict("Root")
data['version'] = self.__version
data['modulators'] = self.construct_modulators_dict()
return data
@property
def engine(self):
return "dict_engine"
@property
def current_step(self):
return self.__step
def __init__(self, filename, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}):
"""Create a new MicroPsi agent.
Arguments:
filename: the path and filename of the agent
agent_type (optional): the interface of this agent to its environment
name (optional): the name of the agent
owner (optional): the user that created this agent
uid (optional): unique handle of the agent; if none is given, it will be generated
"""
super(DictNodenet, self).__init__(name or os.path.basename(filename), worldadapter, world, owner, uid)
self.stepoperators = [DictPropagate(), DictCalculate(), DictPORRETDecay(), DictDoernerianEmotionalModulators()]
self.stepoperators.sort(key=lambda op: op.priority)
self.__version = NODENET_VERSION # used to check compatibility of the node net data
self.__step = 0
self.__modulators = {}
self.settings = {}
self.filename = filename
if world and worldadapter:
self.worldadapter = worldadapter
self.__nodes = {}
self.__nodetypes = STANDARD_NODETYPES
self.__native_modules = native_modules
self.__nodespaces = {}
self.__nodespaces["Root"] = DictNodespace(self, None, (0, 0), name="Root", uid="Root")
self.__locks = {}
self.__nodes_by_coords = {}
self.load()
def load(self, string=None):
"""Load the node net from a file"""
# try to access file
with self.netlock:
initfrom = {}
if string:
self.logger.info("Loading nodenet %s from string", self.name)
try:
initfrom.update(json.loads(string))
except ValueError:
warnings.warn("Could not read nodenet data from string")
return False
else:
try:
self.logger.info("Loading nodenet %s from file %s", self.name, self.filename)
with open(self.filename) as file:
initfrom.update(json.load(file))
except ValueError:
warnings.warn("Could not read nodenet data")
return False
except IOError:
warnings.warn("Could not open nodenet file")
if self.__version == NODENET_VERSION:
self.initialize_nodenet(initfrom)
return True
else:
raise NotImplementedError("Wrong version of nodenet data, cannot import.")
def reload_native_modules(self, native_modules):
""" reloads the native-module definition, and their nodefunctions
and afterwards reinstantiates the nodenet."""
self.__native_modules = {}
for key in native_modules:
self.__native_modules[key] = Nodetype(nodenet=self, **native_modules[key])
self.__native_modules[key].reload_nodefunction()
saved = self.data
self.clear()
self.merge_data(saved)
def initialize_nodespace(self, id, data):
if id not in self.__nodespaces:
# move up the nodespace tree until we find an existing parent or hit root
while id != 'Root' and data[id].get('parent_nodespace') not in self.__nodespaces:
self.initialize_nodespace(data[id]['parent_nodespace'], data)
self.__nodespaces[id] = DictNodespace(self,
data[id].get('parent_nodespace'),
data[id].get('position'),
name=data[id].get('name', 'Root'),
uid=id,
index=data[id].get('index'),
gatefunction_strings=data[id].get('gatefunctions'))
def initialize_nodenet(self, initfrom):
"""Called after reading new nodenet state.
Parses the nodenet state and set up the non-persistent data structures necessary for efficient
computation of the node net
"""
nodetypes = {}
for type, data in self.__nodetypes.items():
nodetypes[type] = Nodetype(nodenet=self, **data)
self.__nodetypes = nodetypes
native_modules = {}
for type, data in self.__native_modules.items():
native_modules[type] = Nodetype(nodenet=self, **data)
self.__native_modules = native_modules
self.__modulators = initfrom.get("modulators", {})
# set up nodespaces; make sure that parent nodespaces exist before children are initialized
self.__nodespaces = {}
self.__nodespaces["Root"] = DictNodespace(self, None, (0, 0), name="Root", uid="Root")
# now merge in all init data (from the persisted file typically)
self.merge_data(initfrom)
def construct_links_dict(self):
data = {}
for node_uid in self.get_node_uids():
links = self.get_node(node_uid).get_associated_links()
for link in links:
data[link.uid] = link.data
return data
def construct_nodes_dict(self, max_nodes=-1):
data = {}
i = 0
for node_uid in self.get_node_uids():
i += 1
data[node_uid] = self.get_node(node_uid).data
if max_nodes > 0 and i > max_nodes:
break
return data
def construct_nodespaces_dict(self, nodespace_uid):
data = {}
for nodespace_candidate_uid in self.get_nodespace_uids():
if self.get_nodespace(nodespace_candidate_uid).parent_nodespace == nodespace_uid or nodespace_candidate_uid == nodespace_uid:
data[nodespace_candidate_uid] = self.get_nodespace(nodespace_candidate_uid).data
return data
def get_nodetype(self, type):
""" Returns the nodetpype instance for the given nodetype or native_module or None if not found"""
if type in self.__nodetypes:
return self.__nodetypes[type]
else:
return self.__native_modules.get(type)
def get_nodespace_area_data(self, nodespace, x1, x2, y1, y2):
x_range = (x1 - (x1 % 100), 100 + x2 - (x2 % 100), 100)
y_range = (y1 - (y1 % 100), 100 + y2 - (y2 % 100), 100)
world_uid = self.world.uid if self.world is not None else None
data = {
'links': {},
'nodes': {},
'name': self.name,
'max_coords': {'x': 0, 'y': 0},
'is_active': self.is_active,
'current_step': self.current_step,
'nodespaces': self.construct_nodespaces_dict(nodespace),
'world': world_uid,
'worldadapter': self.worldadapter,
'modulators': self.construct_modulators_dict()
}
if self.user_prompt is not None:
data['user_prompt'] = self.user_prompt.copy()
self.user_prompt = None
links = []
followupnodes = []
for x in range(*x_range):
if x in self.__nodes_by_coords:
for y in range(*y_range):
if y in self.__nodes_by_coords[x]:
for uid in self.__nodes_by_coords[x][y]:
if self.get_node(uid).parent_nodespace == nodespace: # maybe sort directly by nodespace??
node = self.get_node(uid)
data['nodes'][uid] = node.data
if node.position[0] > data['max_coords']['x']:
data['max_coords']['x'] = node.position[0]
if node.position[1] > data['max_coords']['y']:
data['max_coords']['y'] = node.position[1]
links.extend(self.get_node(uid).get_associated_links())
followupnodes.extend(self.get_node(uid).get_associated_node_uids())
for link in links:
data['links'][link.uid] = link.data
for uid in followupnodes:
if uid not in data['nodes']:
data['nodes'][uid] = self.get_node(uid).data
return data
def update_node_positions(self):
""" recalculates the position hash """
self.__nodes_by_coords = {}
self.max_coords = {'x': 0, 'y': 0}
for uid in self.get_node_uids():
pos = self.get_node(uid).position
xpos = int(pos[0] - (pos[0] % 100))
ypos = int(pos[1] - (pos[1] % 100))
if xpos not in self.__nodes_by_coords:
self.__nodes_by_coords[xpos] = {}
if xpos > self.max_coords['x']:
self.max_coords['x'] = | |
hbox_main.addLayout(vbox_apps)
# Vertical separation line
vertical_sep_01 = QtWidgets.QLabel('', window)
vertical_sep_01.setFrameStyle(QtWidgets.QFrame.VLine | QtWidgets.QFrame.Plain)
vertical_sep_01.setStyleSheet('QFrame {color: #444444}')
hbox_main.addWidget(vertical_sep_01)
paneTabs.setLayout(hbox_main)
paneTabs.move(10, 10)
# General
paneGeneral.setFixedSize(840, 264)
paneGeneral.move(172, 20)
paneGeneral.setVisible(False)
# General::BatchLink label
lbl_batchLink = QtWidgets.QLabel('Batch Link Autosave Path', paneGeneral)
lbl_batchLink.setStyleSheet('QFrame {color: #989898; background-color: #373737}')
lbl_batchLink.setMinimumSize(840, 28)
lbl_batchLink.setAlignment(QtCore.Qt.AlignCenter)
def update_batchLinkPathLabel():
batch_link_path = self.framework.prefs.get('flameBatchBlessing', {}).get('flame_batch_root')
flame_project_name = self.flame.project.current_project.name
if self.framework.prefs['flameBatchBlessing'].get('use_project', True):
lbl_batchLinkPath.setText(os.path.join(batch_link_path, flame_project_name))
else:
lbl_batchLinkPath.setText(batch_link_path)
# General::BatchLink Enable button
def enableBatchLink():
if self.framework.prefs['flameBatchBlessing'].get('enabled', True):
btn_batchLink.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
self.framework.prefs['flameBatchBlessing']['enabled'] = False
else:
btn_batchLink.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset black; border-bottom: 1px inset #555555}')
self.framework.prefs['flameBatchBlessing']['enabled'] = True
btn_batchLink = QtWidgets.QPushButton('Batch Link', paneGeneral)
btn_batchLink.setFocusPolicy(QtCore.Qt.NoFocus)
btn_batchLink.setMinimumSize(88, 28)
btn_batchLink.move(0, 34)
if self.framework.prefs['flameBatchBlessing'].get('enabled', True):
btn_batchLink.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset black; border-bottom: 1px inset #555555}')
else:
btn_batchLink.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
btn_batchLink.pressed.connect(enableBatchLink)
# General::BatchLink default path button
def batchLinkDefault():
self.framework.prefs['flameBatchBlessing']['flame_batch_root'] = '/var/tmp/flameMenuSG/flame_batch_setups'
update_batchLinkPathLabel()
self.framework.save_prefs()
btn_batchLinkDefault = QtWidgets.QPushButton('Default', paneGeneral)
btn_batchLinkDefault.setFocusPolicy(QtCore.Qt.NoFocus)
btn_batchLinkDefault.setMinimumSize(88, 28)
btn_batchLinkDefault.move(94, 34)
btn_batchLinkDefault.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_batchLinkDefault.clicked.connect(batchLinkDefault)
# General::BatchLink path field
lbl_batchLinkPath = QtWidgets.QLabel(paneGeneral)
lbl_batchLinkPath.setFocusPolicy(QtCore.Qt.NoFocus)
lbl_batchLinkPath.setMinimumSize(464, 28)
lbl_batchLinkPath.move(188,34)
lbl_batchLinkPath.setStyleSheet('QFrame {color: #9a9a9a; background-color: #222222}')
lbl_batchLinkPath.setFrameStyle(QtWidgets.QFrame.Box | QtWidgets.QFrame.Plain)
update_batchLinkPathLabel()
# General::BatchLink Add Flame project name button
def batchLinkUseProject():
if self.framework.prefs['flameBatchBlessing'].get('use_project', True):
btn_batchLinkUseProject.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
self.framework.prefs['flameBatchBlessing']['use_project'] = False
else:
btn_batchLinkUseProject.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset black; border-bottom: 1px inset #555555}')
self.framework.prefs['flameBatchBlessing']['use_project'] = True
update_batchLinkPathLabel()
self.framework.save_prefs()
btn_batchLinkUseProject = QtWidgets.QPushButton('Use Project', paneGeneral)
btn_batchLinkUseProject.setFocusPolicy(QtCore.Qt.NoFocus)
btn_batchLinkUseProject.setMinimumSize(88, 28)
btn_batchLinkUseProject.move(658, 34)
if self.framework.prefs['flameBatchBlessing'].get('use_project', True):
btn_batchLinkUseProject.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset black; border-bottom: 1px inset #555555}')
else:
btn_batchLinkUseProject.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
btn_batchLinkUseProject.pressed.connect(batchLinkUseProject)
# General::BatchLink Browse button
def batchLinkBrowse():
batch_link_path = self.framework.prefs.get('flameBatchBlessing', {}).get('flame_batch_root')
dialog = QtWidgets.QFileDialog()
dialog.setWindowTitle('Select Batch Autosave Folder')
dialog.setDirectory(batch_link_path)
new_path = dialog.getExistingDirectory(directory=batch_link_path,
options=dialog.ShowDirsOnly)
if new_path:
self.framework.prefs['flameBatchBlessing']['flame_batch_root'] = new_path
update_batchLinkPathLabel()
self.framework.save_prefs()
btn_batchLinkBrowse = QtWidgets.QPushButton('Browse', paneGeneral)
btn_batchLinkBrowse.setFocusPolicy(QtCore.Qt.NoFocus)
btn_batchLinkBrowse.setMinimumSize(88, 28)
btn_batchLinkBrowse.move(752, 34)
btn_batchLinkBrowse.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_batchLinkBrowse.clicked.connect(batchLinkBrowse)
# General::Loader PublishedFileTypes label
'''
lbl_PublishedFileTypes = QtWidgets.QLabel('Loader Published File Types', paneGeneral)
lbl_PublishedFileTypes.setStyleSheet('QFrame {color: #989898; background-color: #373737}')
lbl_PublishedFileTypes.setMinimumSize(536, 28)
lbl_PublishedFileTypes.move(304, 68)
lbl_PublishedFileTypes.setAlignment(QtCore.Qt.AlignCenter)
# General::Loader PublishedFileTypes Button 1
btn_PublishedFileType1 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType1.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType1.setText('Not Implemented')
btn_PublishedFileType1.setMinimumSize(128, 28)
btn_PublishedFileType1.move(304, 102)
btn_PublishedFileType1.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType1_menu = QtWidgets.QMenu()
btn_PublishedFileType1_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType1_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType1.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 2
btn_PublishedFileType2 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType2.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType2.setText('Not Implemented')
btn_PublishedFileType2.setMinimumSize(128, 28)
btn_PublishedFileType2.move(440, 102)
btn_PublishedFileType2.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType2_menu = QtWidgets.QMenu()
btn_PublishedFileType2_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType2_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType2.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 3
btn_PublishedFileType3 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType3.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType3.setText('Not Implemented')
btn_PublishedFileType3.setMinimumSize(128, 28)
btn_PublishedFileType3.move(576, 102)
btn_PublishedFileType3.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType3_menu = QtWidgets.QMenu()
btn_PublishedFileType3_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType3_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType3.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 4
btn_PublishedFileType4 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType4.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType4.setText('Not Implemented')
btn_PublishedFileType4.setMinimumSize(128, 28)
btn_PublishedFileType4.move(712, 102)
btn_PublishedFileType4.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType4_menu = QtWidgets.QMenu()
btn_PublishedFileType4_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType4_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType4.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 5
btn_PublishedFileType5 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType5.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType5.setText('Flame Batch File')
btn_PublishedFileType5.setMinimumSize(128, 28)
btn_PublishedFileType5.move(304, 136)
btn_PublishedFileType5.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType5_menu = QtWidgets.QMenu()
btn_PublishedFileType5_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType5_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType5.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 6
btn_PublishedFileType6 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType6.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType6.setText('Flame Batch File')
btn_PublishedFileType6.setMinimumSize(128, 28)
btn_PublishedFileType6.move(440, 136)
btn_PublishedFileType6.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType6_menu = QtWidgets.QMenu()
btn_PublishedFileType6_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType6_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType6.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 7
btn_PublishedFileType7 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType7.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType7.setText('Flame Batch File')
btn_PublishedFileType7.setMinimumSize(128, 28)
btn_PublishedFileType7.move(576, 136)
btn_PublishedFileType7.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType7_menu = QtWidgets.QMenu()
btn_PublishedFileType7_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType7_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType7.setMenu(btn_PublishedFileType1_menu)
# General::Loader PublishedFileTypes Button 8
btn_PublishedFileType8 = QtWidgets.QPushButton(paneGeneral)
btn_PublishedFileType8.setFocusPolicy(QtCore.Qt.NoFocus)
btn_PublishedFileType8.setText('Flame Batch File')
btn_PublishedFileType8.setMinimumSize(128, 28)
btn_PublishedFileType8.move(712, 136)
btn_PublishedFileType8.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_PublishedFileType8_menu = QtWidgets.QMenu()
btn_PublishedFileType8_menu.addAction('File Sequence', set_presetTypeImage)
btn_PublishedFileType8_menu.addAction('Movie', set_presetTypeMovie)
btn_PublishedFileType8.setMenu(btn_PublishedFileType1_menu)
''' # end of loader PublishedFileType settings
# General::Create Default Task Template Label
lbl_DefTaskTemplate = QtWidgets.QLabel('Default Task Template', paneGeneral)
lbl_DefTaskTemplate.setStyleSheet('QFrame {color: #989898; background-color: #373737}')
lbl_DefTaskTemplate.setMinimumSize(298, 28)
lbl_DefTaskTemplate.move(0, 68)
lbl_DefTaskTemplate.setAlignment(QtCore.Qt.AlignCenter)
# General::Create Shot Task Template Label
lbl_ShotTaskTemplate = QtWidgets.QLabel('Shot', paneGeneral)
lbl_ShotTaskTemplate.setStyleSheet('QFrame {color: #989898;}')
lbl_ShotTaskTemplate.setMinimumSize(36, 28)
lbl_ShotTaskTemplate.move(0, 102)
# General::Loader Shot Task Template Menu
btn_ShotTaskTemplate = QtWidgets.QPushButton(paneGeneral)
flameMenuNewBatch_prefs = self.framework.prefs.get('flameMenuNewBatch', {})
shot_task_template = flameMenuNewBatch_prefs.get('shot_task_template', {})
code = shot_task_template.get('code', 'No code')
btn_ShotTaskTemplate.setText(code)
shot_task_templates = self.connector.sg.find('TaskTemplate', [['entity_type', 'is', 'Shot']], ['code'])
shot_task_templates_by_id = {x.get('id'):x for x in shot_task_templates}
shot_task_templates_by_code_id = {x.get('code') + '_' + str(x.get('id')):x for x in shot_task_templates}
def selectShotTaskTemplate(template_id):
template = shot_task_templates_by_id.get(template_id, {})
code = template.get('code', 'no_code')
btn_ShotTaskTemplate.setText(code)
self.framework.prefs['flameMenuNewBatch']['shot_task_template'] = template
btn_ShotTaskTemplate.setFocusPolicy(QtCore.Qt.NoFocus)
btn_ShotTaskTemplate.setMinimumSize(258, 28)
btn_ShotTaskTemplate.move(40, 102)
btn_ShotTaskTemplate.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_ShotTaskTemplate_menu = QtWidgets.QMenu()
for code_id in sorted(shot_task_templates_by_code_id.keys()):
template = shot_task_templates_by_code_id.get(code_id, {})
code = template.get('code', 'no_code')
template_id = template.get('id')
action = btn_ShotTaskTemplate_menu.addAction(code)
x = lambda chk=False, template_id=template_id: selectShotTaskTemplate(template_id)
action.triggered[()].connect(x)
btn_ShotTaskTemplate.setMenu(btn_ShotTaskTemplate_menu)
# General::Create Asset Task Template Label
lbl_AssetTaskTemplate = QtWidgets.QLabel('Asset', paneGeneral)
lbl_AssetTaskTemplate.setStyleSheet('QFrame {color: #989898;}')
lbl_AssetTaskTemplate.setMinimumSize(36, 28)
lbl_AssetTaskTemplate.move(0, 136)
# General::Loader Asset Task Template Menu
btn_AssetTaskTemplate = QtWidgets.QPushButton(paneGeneral)
flameMenuNewBatch_prefs = self.framework.prefs.get('flameMenuNewBatch', {})
shot_task_template = flameMenuNewBatch_prefs.get('asset_task_template', {})
code = shot_task_template.get('code', 'No code')
btn_AssetTaskTemplate.setText(code)
asset_task_templates = self.connector.sg.find('TaskTemplate', [['entity_type', 'is', 'Asset']], ['code'])
asset_task_templates_by_id = {x.get('id'):x for x in asset_task_templates}
asset_task_templates_by_code_id = {x.get('code') + '_' + str(x.get('id')):x for x in asset_task_templates}
def selectAssetTaskTemplate(template_id):
template = shot_task_templates_by_id.get(template_id, {})
code = template.get('code', 'no_code')
btn_AssetTaskTemplate.setText(code)
self.framework.prefs['flameMenuNewBatch']['asset_task_template'] = template
btn_AssetTaskTemplate.setFocusPolicy(QtCore.Qt.NoFocus)
btn_AssetTaskTemplate.setMinimumSize(258, 28)
btn_AssetTaskTemplate.move(40, 136)
btn_AssetTaskTemplate.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #29323d; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}'
'QPushButton::menu-indicator {image: none;}')
btn_AssetTaskTemplate_menu = QtWidgets.QMenu()
for code_id in sorted(asset_task_templates_by_code_id.keys()):
template = asset_task_templates_by_code_id.get(code_id, {})
code = template.get('code', 'no_code')
template_id = template.get('id')
action = btn_AssetTaskTemplate_menu.addAction(code)
x = lambda chk=False, template_id=template_id: selectAssetTaskTemplate(template_id)
action.triggered[()].connect(x)
btn_AssetTaskTemplate.setMenu(btn_AssetTaskTemplate_menu)
# General::AutoRefresh button Label
lbl_AutoRefresh = QtWidgets.QLabel('Refresh Menu Automatically', paneGeneral)
lbl_AutoRefresh.setStyleSheet('QFrame {color: #989898; background-color: #373737}')
lbl_AutoRefresh.setMinimumSize(298, 28)
lbl_AutoRefresh.move(0, 170)
lbl_AutoRefresh.setAlignment(QtCore.Qt.AlignCenter)
lbl_AutoRefreshMsg = QtWidgets.QLabel('Use to debug right-click menu performance', paneGeneral)
lbl_AutoRefreshMsg.setStyleSheet('QFrame {color: #989898;}')
lbl_AutoRefreshMsg.setMinimumSize(36, 28)
lbl_AutoRefreshMsg.move(0, 204)
# General::AutoRefresh Main refresh button
def update_AutoRefreshMain():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
main_menu = menu_auto_refresh.get('main_menu', False)
if main_menu:
btn_AutoRefreshMain.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset #555555; border-bottom: 1px inset black}')
else:
btn_AutoRefreshMain.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
def clicked_AutoRefreshMain():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
menu_auto_refresh['main_menu'] = not menu_auto_refresh.get('main_menu', False)
self.framework.prefs_global['menu_auto_refresh'] = menu_auto_refresh
update_AutoRefreshMain()
btn_AutoRefreshMain = QtWidgets.QPushButton('Main Menu', paneGeneral)
btn_AutoRefreshMain.setFocusPolicy(QtCore.Qt.NoFocus)
btn_AutoRefreshMain.setMinimumSize(94, 28)
btn_AutoRefreshMain.move(0, 238)
btn_AutoRefreshMain.clicked.connect(clicked_AutoRefreshMain)
update_AutoRefreshMain()
# General::AutoRefresh Batch refresh button
def update_AutoRefreshBatch():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
batch = menu_auto_refresh.get('batch', False)
if batch:
btn_AutoRefreshBatch.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset #555555; border-bottom: 1px inset black}')
else:
btn_AutoRefreshBatch.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
def clicked_AutoRefreshBatch():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
menu_auto_refresh['batch'] = not menu_auto_refresh.get('batch', False)
self.framework.prefs_global['menu_auto_refresh'] = menu_auto_refresh
update_AutoRefreshBatch()
btn_AutoRefreshBatch = QtWidgets.QPushButton('Batch Menu', paneGeneral)
btn_AutoRefreshBatch.setFocusPolicy(QtCore.Qt.NoFocus)
btn_AutoRefreshBatch.setMinimumSize(94, 28)
btn_AutoRefreshBatch.move(100, 238)
btn_AutoRefreshBatch.clicked.connect(clicked_AutoRefreshBatch)
update_AutoRefreshBatch()
# General::AutoRefresh Media Panel refresh button
def update_AutoRefreshMediaPanel():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
media_panel = menu_auto_refresh.get('media_panel', False)
if media_panel:
btn_AutoRefreshMediaPanel.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset #555555; border-bottom: 1px inset black}')
else:
btn_AutoRefreshMediaPanel.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
def clicked_AutoRefreshMediaPanel():
menu_auto_refresh = self.framework.prefs_global.get('menu_auto_refresh', {})
menu_auto_refresh['media_panel'] = not menu_auto_refresh.get('media_panel', False)
self.framework.prefs_global['menu_auto_refresh'] = menu_auto_refresh
update_AutoRefreshMediaPanel()
btn_AutoRefreshMediaPanel = QtWidgets.QPushButton('Media Panel', paneGeneral)
btn_AutoRefreshMediaPanel.setFocusPolicy(QtCore.Qt.NoFocus)
btn_AutoRefreshMediaPanel.setMinimumSize(94, 28)
btn_AutoRefreshMediaPanel.move(200, 238)
btn_AutoRefreshMediaPanel.clicked.connect(clicked_AutoRefreshMediaPanel)
| |
"shr": "Shi",
"shs": "Shuswap",
"sht": "Shasta",
"shu": "Chadian Arabic",
"shv": "Shehri",
"shw": "Shwai",
"shx": "She",
"shy": "Tachawit",
"shz": "<NAME>",
"si": "Sinhalese",
"si-med": "<NAME>",
"sia": "<NAME>",
"sib": "Sebop",
"sid": "Sidamo",
"sie": "Simaa",
"sif": "Siamou",
"sig": "Paasaal",
"sih": "Zire",
"sii": "<NAME>",
"sij": "Numbami",
"sik": "Sikiana",
"sil": "<NAME>",
"sim": "Seim",
"sio": "Siouan",
"sio-dhe": "Dhegihan",
"sio-dkt": "Dakotan",
"sio-mor": "Missouri River Siouan",
"sio-msv": "Mississippi Valley Siouan",
"sio-ohv": "Ohio Valley Siouan",
"sio-pro": "Proto-Siouan",
"sip": "Sikkimese",
"siq": "Sonia",
"sir": "Siri",
"sis": "Siuslaw",
"sit": "Sino-Tibetan",
"sit-aao": "Ao",
"sit-alm": "Almora",
"sit-bdi": "Bodish",
"sit-bok": "Bokar",
"sit-cha": "Chairel",
"sit-dhi": "Dhimalish",
"sit-ebo": "East Bodish",
"sit-gkh": "Gokhy",
"sit-gma": "Greater Magaric",
"sit-gsi": "Greater Siangic",
"sit-hrs": "Hrusish",
"sit-hrs-pro": "Proto-Hrusish",
"sit-jap": "Japhug",
"sit-jnp": "Jingphoic",
"sit-jpl": "Kachin-Luic",
"sit-kch": "Konyak-Chang",
"sit-kha": "Kham",
"sit-kha-pro": "Proto-Kham",
"sit-khb": "Kho-Bwa",
"sit-kic": "Central Kiranti",
"sit-kie": "Eastern Kiranti",
"sit-kin": "Kinnauri",
"sit-kir": "Kiranti",
"sit-kiw": "Western Kiranti",
"sit-kon": "Konyak",
"sit-kyk": "Kyirong-Kagate",
"sit-lab": "Ladakhi-Balti",
"sit-las": "Lahuli-Spiti",
"sit-liz": "Lizu",
"sit-luu": "Luish",
"sit-luu-pro": "Proto-Luish",
"sit-mar": "Maringic",
"sit-mdz": "Midzu",
"sit-mnz": "Mondzish",
"sit-mor": "Moran",
"sit-mru": "Mruic",
"sit-nax": "Naic",
"sit-new": "Newaric",
"sit-nng": "Nungish",
"sit-prn": "Puiron",
"sit-pro": "Proto-Sino-Tibetan",
"sit-qia": "Qiangic",
"sit-rgy": "Rgyalrongic",
"sit-sit": "Situ",
"sit-tam": "Tamangic",
"sit-tan": "Tani",
"sit-tan-pro": "Proto-Tani",
"sit-tgm": "Tangam",
"sit-tib": "Tibetic",
"sit-tja": "Tujia",
"sit-tma": "Tangkhul-Maring",
"sit-tng": "Tangkhulic",
"sit-tno": "Tangsa-Nocte",
"sit-tos": "Tosu",
"sit-tsh": "Tshobdun",
"sit-tsk": "Tshangla",
"sit-whm": "West Himalayish",
"sit-zbu": "Zbu",
"sit-zem": "Zeme",
"siu": "Sinagen",
"siv": "Sumariup",
"siw": "Siwai",
"six": "Sumau",
"siy": "Sivandi",
"siz": "Siwi",
"sja": "Epena",
"sjb": "<NAME>",
"sjd": "<NAME>",
"sje": "<NAME>",
"sjg": "Assangori",
"sjk": "<NAME>",
"sjl": "Miji",
"sjm": "Mapun",
"sjn": "Sindarin",
"sjo": "Xibe",
"sjp": "Surjapuri",
"sjr": "Siar-Lak",
"sjs": "<NAME>",
"sjt": "<NAME>",
"sju": "<NAME>",
"sjw": "Shawnee",
"sk": "Slovak",
"ska": "Skagit",
"skb": "Saek",
"skc": "Ma Manda",
"skd": "Southern Sierra Miwok",
"ske": "Ske",
"skf": "Sakirabiá",
"skh": "Sikule",
"ski": "Sika",
"skj": "Seke",
"skk": "Sok",
"skm": "Sakam",
"skn": "Kolibugan Subanon",
"sko": "Seko Tengah",
"skp": "Sekapan",
"skq": "Sininkere",
"skr": "Saraiki",
"skr-prk": "Sakari",
"sks": "Maia",
"skt": "Sakata",
"sku": "Sakao",
"skv": "Skou",
"skw": "Skepi Creole Dutch",
"skx": "Seko Padang",
"sky": "Sikaiana",
"skz": "Sekar",
"sl": "Slovene",
"sla": "Slavic",
"sla-pro": "Proto-Slavic",
"slc": "Saliba (Colombia)",
"sld": "Sissala",
"sle": "Sholaga",
"slf": "Swiss-Italian Sign Language",
"slg": "Selungai Murut",
"slh": "Southern Puget Sound Salish",
"sli": "Silesian German",
"slj": "Salumá",
"sll": "Salt-Yui",
"slm": "Pangutaran Sama",
"sln": "Salinan",
"slp": "Lamaholot",
"slq": "Salchuq",
"slr": "Salar",
"sls": "Singapore Sign Language",
"slt": "Sila",
"slu": "Selaru",
"slw": "Sialum",
"slx": "Salampasu",
"sly": "Selayar",
"slz": "Ma'ya",
"sm": "Samoan",
"sma": "Southern Sami",
"smb": "Simbari",
"smc": "Som",
"smd": "Sama",
"smf": "Auwe",
"smg": "Simbali",
"smh": "Samei",
"smi": "Sami",
"smi-pro": "Proto-Samic",
"smj": "<NAME>",
"smk": "Bolinao",
"sml": "Central Sama",
"smm": "Musasa",
"smn": "Inari Sami",
"smp": "<NAME>",
"smq": "Samo",
"smr": "Simeulue",
"sms": "<NAME>",
"smt": "Simte",
"smu": "Somray",
"smv": "Samvedi",
"smw": "Sumbawa",
"smx": "Samba",
"smy": "Semnani",
"smz": "Simeku",
"sn": "Shona",
"snb": "Sebuyau",
"snc": "Sinaugoro",
"sne": "B<NAME>",
"snf": "Noon",
"sng": "Sanga (Congo)",
"sni": "Sensi",
"snj": "Riverain Sango",
"snk": "Soninke",
"snl": "Sangil",
"snm": "Southern Ma'di",
"snn": "Siona",
"sno": "Snohomish",
"snp": "Siane",
"snq": "Sangu (Gabon)",
"snr": "Sihan",
"sns": "Nahavaq",
"snu": "Senggi",
"snv": "Sa'ban",
"snw": "Selee",
"snx": "Sam",
"sny": "Saniyo-Hiyewe",
"snz": "Kou",
"so": "Somali",
"soa": "<NAME>",
"sob": "Sobei",
"soc": "Soko",
"sod": "Songoora",
"soe": "Songomeno",
"sog": "Sogdian",
"sog-ear": "Early Sogdian",
"sog-lat": "Late Sogdian",
"soh": "Aka (Sudan)",
"soi": "Sonha",
"soj": "Soi",
"sok": "Sokoro",
"sol": "Solos",
"son": "Songhay",
"son-pro": "Proto-Songhay",
"soo": "Nsong",
"sop": "Songe",
"soq": "Kanasi",
"sor": "Somrai",
"sos": "Seenku",
"sou": "Southern Thai",
"sov": "Sonsorolese",
"sow": "Sowanda",
"sox": "Swo",
"soy": "Miyobe",
"soz": "Temi",
"spb": "Sepa (Indonesia)",
"spc": "Sapé",
"spd": "Saep",
"spe": "Sepa (New Guinea)",
"spg": "Sian",
"spi": "Saponi",
"spk": "Sengo",
"spl": "Selepet",
"spm": "Sepen",
"spn": "Sanapaná",
"spo": "Spokane",
"spp": "Supyire",
"spr": "Saparua",
"sps": "Saposa",
"spt": "Spiti Bhoti",
"spu": "Sapuan",
"spv": "Sambalpuri",
"spx": "South Picene",
"spy": "Sabaot",
"sq": "Albanian",
"sqa": "Shama-Sambuga",
"sqh": "Shau",
"sqj": "Albanian",
"sqj-pro": "Proto-Albanian",
"sqk": "Albanian Sign Language",
"sqm": "Suma",
"sqn": "Susquehannock",
"sqo": "Sorkhei",
"sqq": "Sou",
"sqr": "Siculo-Arabic",
"sqs": "Sri Lankan Sign Language",
"sqt": "Soqotri",
"squ": "Squamish",
"sra": "Saruga",
"srb": "Sora",
"sre": "Sara",
"srf": "Nafi",
"srg": "Sulod",
"srh": "Sarikoli",
"sri": "Siriano",
"srk": "Serudung Murut",
"srl": "Isirawa",
"srm": "Saramaccan",
"srn": "Sranan Tongo",
"srq": "Sirionó",
"srr": "Serer",
"srs": "Sarcee",
"srt": "Sauri",
"sru": "Suruí",
"srv": "Waray Sorsogon",
"srw": "Serua",
"srx": "Sirmauri",
"sry": "Sera",
"srz": "Shahmirzadi",
"ss": "Swazi",
"ssa": "Nilo-Saharan",
"ssa-fur": "Fur",
"ssa-klk": "Kuliak",
"ssa-klk-pro": "Proto-Kuliak",
"ssa-kom": "Koman",
"ssa-kom-pro": "Proto-Koman",
"ssa-pro": "Proto-Nilo-Saharan",
"ssa-sah": "Saharan",
"ssb": "Southern Sama",
"ssc": "Suba-Simbiti",
"ssd": "Siroi",
"sse": "Balangingi",
"ssf": "Thao",
"ssg": "Seimat",
"ssh": "Shihhi Arabic",
"ssi": "Sansi",
"ssj": "Sausi",
"ssk": "Sunam",
"ssl": "Western Sisaala",
"ssm": "Semnam",
"ssn": "Waata",
"sso": "Sissano",
"ssp": "Spanish Sign Language",
"ssq": "So'a",
"ssr": "Swiss-French Sign Language",
"sss": "Sô",
"sst": "Sinasina",
"ssu": "Susuami",
"ssv": "Shark Bay",
"ssx": "Samberigi",
"ssy": "Saho",
"ssz": "Sengseng",
"st": "Sotho",
"stb": "Northern Subanen",
"std": "Sentinelese",
"ste": "Liana-Seti",
"stf": "Seta",
"stg": "Trieng",
"sth": "Shelta",
"sti": "Bulo Stieng",
"stj": "<NAME>",
"stk": "Arammba",
"stm": "Setaman",
"stn": "Owa",
"sto": "Stoney",
"stp": "Southeastern Tepehuan",
"stq": "Saterland Frisian",
"str": "Saanich",
"sts": "Shumashti",
"stt": "Budeh Stieng",
"stu": "Samtao",
"stv": "Silt'e",
"stw": "Satawalese",
"sty": "Siberian Tatar",
"su": "Sundanese",
"sua": "Sulka",
"sub": "Suku",
"suc": "Western Subanon",
"sue": "Suena",
"sug": "Suganga",
"sui": "Suki",
"suk": "Sukuma",
"suq": "Suri",
"sur": "Mwaghavul",
"sus": "Susu",
"sut": "Subtiaba",
"suv": "Sulung",
"suw": "Sumbwa",
"sux": "Sumerian",
"suy": "Suyá",
"suz": "Sunwar",
"sv": "Swedish",
"sva": "Svan",
"svb": "Ulau-Suain",
"svc": "Vincentian Creole English",
"sve": "Serili",
"svk": "Slovakian Sign Language",
"svm": "Slavomolisano",
"svs": "Savosavo",
"svx": "Skalvian",
"sw": "Swahili",
"swb": "Maore Comorian",
"swf": "Sere",
"swg": "Swabian",
"swi": "Sui",
"swj": "Sira",
"swl": "Swedish Sign Language",
"swm": "Samosa",
"swn": "Sokna",
"swo": "Shanenawa",
"swp": "Suau",
"swq": "Sharwa",
"swr": "Saweru",
"sws": "Seluwasan",
"swt": "Sawila",
"swu": "Suwawa",
"sww": "Sowa",
"swx": "Suruahá",
"swy": "Sarua",
"sxb": "Suba",
"sxc": "Sicanian",
"sxe": "Sighu",
"sxg": "Shixing",
"sxk": "Southern Kalapuya",
"sxl": "Selonian",
"sxm": "Samre",
"sxn": "Sangir",
"sxo": "Sorothaptic",
"sxr": "Saaroa",
"sxs": "Sasaru",
"sxu": "Upper Saxon",
"sxw": "Saxwe Gbe",
"sya": "Siang",
"syb": "Central Subanen",
"syc": "Classical Syriac",
"syd": "Samoyedic",
"syd-fne": "Forest Nenets",
"syd-pro": "Proto-Samoyedic",
"syi": "Seki",
"syk": "Sukur",
"syl": "Sylheti",
"sym": "<NAME>",
"syn": "Senaya",
"syo": "Suoy",
"sys": "Sinyar",
"syw": "Kagate",
"syx": "Osamayi",
"syy": "Al-Sayyid Bedouin Sign Language",
"sza": "Semelai",
"szb": "Ngalum",
"szc": "Semaq Beri",
"szd": "Seru",
"sze": "Seze",
"szg": "Sengele",
"szl": "Silesian",
"szn": "Sula",
"szp": "Suabo",
"szv": "Isubu",
"szw": "Sawai",
"szy": "Sakizaya",
"ta": "Tamil",
"ta-mid": "Middle Tamil",
"taa": "Lower Tanana",
"tab": "Tabasaran",
"tac": "Lowland Tarahumara",
"tad": "Tause",
"tae": "Tariana",
"taf": "Tapirapé",
"tag": "Tagoi",
"tai": "Tai",
"tai-cen": "Central Tai",
"tai-cho": "Chongzuo Tai",
"tai-nor": "Northern Tai",
"tai-pro": "Proto-Tai",
"tai-sap": "Sapa-Southwestern Tai",
"tai-shz": "Shangsi Zhuang",
"tai-swe": "Southwestern Tai",
"tai-swe-pro": "Proto-Southwestern Tai",
"tai-tay": "Tày",
"tai-wen": "Wenma-Southwestern Tai",
"taj": "Eastern Tamang",
"tak": "Tala",
"tal": "Tal",
"tan": "Tangale",
"tao": "Yami",
"tap": "Taabwa",
"taq": "Tamasheq",
"tar": "Central Tarahumara",
"tas": "Tay Boi",
"tau": "Upper Tanana",
"tav": "Tatuyo",
"taw": "Tai",
"tax": "Tamki",
"tay": "Atayal",
"taz": "Tocho",
"tba": "Aikanã",
"tbb": "Tapeba",
"tbc": "Takia",
"tbd": "Kaki Ae",
"tbe": "Tanimbili",
"tbf": "Mandara",
"tbg": "North Tairora",
"tbh": "Thurawal",
"tbi": "Gaam",
"tbj": "Tiang",
"tbk": "Calamian Tagbanwa",
"tbl": "Tboli",
"tbm": "Tagbu",
"tbn": "Bar<NAME>bo",
"tbo": "Tawala",
"tbp": "Taworta",
"tbq": "Tibeto-Burman",
"tbq-anp": "Angami-Pochuri",
"tbq-bdg": "Bodo-Garo",
"tbq-bdg-pro": "Proto-Bodo-Garo",
"tbq-bkj": "Sal",
"tbq-brm": "Burmish",
"tbq-buq": "Burmo-Qiangic",
"tbq-kuk": "Kukish",
"tbq-kuk-pro": "Proto-Kuki-Chin",
"tbq-lal": "Lalo",
"tbq-lal-pro": "Proto-Lalo",
"tbq-laz": "Laze",
"tbq-lob": "Lolo-Burmese",
"tbq-lob-pro": "Proto-Lolo-Burmese",
"tbq-lol": "Loloish",
"tbq-lol-pro": "Proto-Loloish",
"tbq-ngo": "Ngochang",
"tbq-plg": "Pai-lang",
"tbq-pro": "Proto-Tibeto-Burman",
"tbr": "Tumtum",
"tbs": "Tanguat",
"tbt": "Kitembo",
"tbu": "Tubar",
"tbv": "Tobo",
"tbw": "Tagbanwa",
"tbx": "Kapin",
"tby": "Tabaru",
"tbz": "Ditammari",
"tca": "Ticuna",
"tcb": "Tanacross",
"tcc": "Datooga",
"tcd": "Tafi",
"tce": "Southern Tutchone",
"tcf": "Mal<NAME>",
"tcg": "Tamagario",
"tch": "Turks and Caicos Creole English",
"tci": "Wára",
"tck": "Tchitchege",
"tcl": "Taman (Burma)",
"tcm": "Tanahmerah",
"tco": "Taungyo",
"tcp": "Tawr Chin",
"tcq": "Kaiy",
"tcs": "Torres Strait Creole",
"tct": "T'en",
"tcu": "Southeastern Tarahumara",
"tcw": "Tec<NAME>",
"tcx": "Toda",
"tcy": "Tulu",
"tcz": "Thado Chin",
"tda": "Tagdal",
"tdb": "Panchpargania",
"tdc": "Emberá-Tadó",
"tdd": "Tai Nüa",
"tde": "Tiranige Diga Dogon",
"tdf": "Talieng",
"tdg": "Western Tamang",
"tdh": "Thulung",
"tdi": "Tomadino",
"tdj": "Tajio",
"tdk": "Tambas",
"tdl": "Sur",
"tdm": "Taruma",
"tdn": "Tondano",
"tdo": "Teme",
"tdq": "Tita",
"tdr": "Todrah",
"tds": "Doutai",
"tdt": "<NAME>",
"tdu": "<NAME>",
"tdv": "Toro",
"tdy": "Tadyawan",
"te": "Telugu",
"tea": "Temiar",
"teb": "Tetete",
"tec": "Terik",
"ted": "<NAME>",
"tee": "<NAME>",
"tef": "Teressa",
"teg": "Teke-Tege",
"teh": "Tehuelche",
"tei": "Torricelli",
"tek": "Ib<NAME>",
"tem": "Temne",
"ten": "Tama (Colombia)",
"teo": "Ateso",
"tep": "Tepecano",
"teq": "Temein",
"ter": "Tereno",
"tes": "Tengger",
"tet": | |
from pathlib import Path
from typing import IO, TYPE_CHECKING, Dict, Set, Tuple, Union
import ele
import numpy as np
from openff.units import unit
from openff.interchange.components.mdtraj import (
_iterate_angles,
_iterate_impropers,
_iterate_pairs,
_iterate_propers,
_store_bond_partners,
)
from openff.interchange.exceptions import UnsupportedExportError
from openff.interchange.models import TopologyKey
if TYPE_CHECKING:
from openff.interchange.components.interchange import Interchange
def to_gro(openff_sys: "Interchange", file_path: Union[Path, str], decimal=8):
"""
Write a .gro file. See
https://manual.gromacs.org/documentation/current/reference-manual/file-formats.html#gro
for more details, including the recommended C-style one-liners
This code is partially copied from InterMol, see
https://github.com/shirtsgroup/InterMol/tree/v0.1/intermol/gromacs
"""
if isinstance(file_path, str):
path = Path(file_path)
if isinstance(file_path, Path):
path = file_path
# Explicitly round here to avoid ambiguous things in string formatting
rounded_positions = np.round(openff_sys.positions, decimal)
rounded_positions = rounded_positions.to(unit.nanometer).magnitude
n = decimal
with open(path, "w") as gro:
gro.write("Generated by OpenFF\n")
gro.write(f"{openff_sys.positions.shape[0]}\n")
typemap = _build_typemap(openff_sys)
for atom in openff_sys.topology.mdtop.atoms:
res = atom.residue
atom_name = typemap[atom.index]
residue_idx = (res.index + 1) % 100000
# TODO: After topology refactor, ensure this matches residue names
# in the topology file (unsure if this is necessary?)
residue_name = res.name[:5]
# TODO: Make sure these are in nanometers
gro.write(
f"%5d%-5s%5s%5d%{n+5}.{n}f%{n+5}.{n}f%{n+5}.{n}f\n"
% (
residue_idx,
residue_name,
atom_name,
(atom.index + 1) % 100000,
rounded_positions[atom.index, 0],
rounded_positions[atom.index, 1],
rounded_positions[atom.index, 2],
)
)
if openff_sys.box is None:
box = 11 * np.eye(3)
else:
box = openff_sys.box.to(unit.nanometer).magnitude
# Check for rectangular
if (box == np.diag(np.diagonal(box))).all():
for i in range(3):
gro.write(f"{box[i, i]:11.7f}")
else:
for i in range(3):
gro.write(f"{box[i, i]:11.7f}")
for i in range(3):
for j in range(3):
if i != j:
gro.write(f"{box[i, j]:11.7f}")
gro.write("\n")
def to_top(openff_sys: "Interchange", file_path: Union[Path, str]):
"""
Write a .gro file. See
https://manual.gromacs.org/documentation/current/reference-manual/file-formats.html#top
for more details.
This code is partially copied from InterMol, see
https://github.com/shirtsgroup/InterMol/tree/v0.1/intermol/gromacs
"""
if isinstance(file_path, str):
path = Path(file_path)
if isinstance(file_path, Path):
path = file_path
with open(path, "w") as top_file:
top_file.write("; Generated by OpenFF Interchange\n")
_write_top_defaults(openff_sys, top_file)
typemap = _build_typemap(openff_sys)
_write_atomtypes(openff_sys, top_file, typemap)
# TODO: Write [ nonbond_params ] section
# TODO: De-duplicate based on molecules
# TODO: Handle special case of water
_write_moleculetype(top_file)
_write_atoms(top_file, openff_sys, typemap)
_write_valence(top_file, openff_sys)
_write_system(top_file, openff_sys)
def _write_top_defaults(openff_sys: "Interchange", top_file: IO):
"""Write [ defaults ] section"""
top_file.write("[ defaults ]\n")
top_file.write("; nbfunc\tcomb-rule\tgen-pairs\tfudgeLJ\tfudgeQQ\n")
if "vdW" in openff_sys.handlers:
nbfunc = 1
scale_lj = openff_sys["vdW"].scale_14
gen_pairs = "yes"
handler_key = "vdW"
elif "Buckingham-6" in openff_sys.handlers:
nbfunc = 2
gen_pairs = "no"
scale_lj = openff_sys["Buckingham-6"].scale_14
handler_key = "Buckingham-6"
else:
raise UnsupportedExportError(
"Could not find a handler for short-ranged vdW interactions that is compatible "
"with GROMACS. Looked for handlers named `vdW` and `Buckingham-6`."
)
mixing_rule = openff_sys[handler_key].mixing_rule
if mixing_rule == "lorentz-berthelot":
comb_rule = 2
elif mixing_rule == "geometric":
comb_rule = 3
elif mixing_rule == "buckingham" and handler_key == "Buckingham-6":
# TODO: Not clear what the compatibility is here. `comb-rule` only applies to LJ terms.
# The documentation lists the combination rule for Buckingham potentials, but it does not
# seem like GROMACS will do this automatically, and needs to be implemented manully via
# [ nonbond_params ].
# https://manual.gromacs.org/current/reference-manual/topologies/parameter-files.html#non-bonded-parameters
# https://gromacs.bioexcel.eu/t/how-to-use-buckingham-function/1181/4
comb_rule = 2
else:
raise UnsupportedExportError(
f"Mixing rule `{mixing_rule} not compatible with GROMACS and/or not supported "
"by current exporter. Supported values are `lorentez-berthelot` and `geometric`."
)
top_file.write(
"{:6d}\t{:6d}\t{:6s} {:8.6f} {:8.6f}\n\n".format(
nbfunc,
comb_rule,
gen_pairs,
scale_lj,
openff_sys.handlers["Electrostatics"].scale_14,
)
)
def _build_typemap(openff_sys: "Interchange") -> Dict:
typemap = dict()
elements: Dict[str, int] = dict()
for atom in openff_sys.topology.mdtop.atoms:
element_symbol = atom.element.symbol
# TODO: Use this key to condense, see parmed.openmm._process_nobonded
# parameters = _get_lj_parameters([*parameters.values()])
# key = tuple([*parameters.values()])
if element_symbol not in elements.keys():
elements[element_symbol] = 1
else:
elements[element_symbol] += 1
atom_type = f"{element_symbol}{elements[element_symbol]}"
typemap[atom.index] = atom_type
return typemap
def _write_atomtypes(openff_sys: "Interchange", top_file: IO, typemap: Dict):
"""Write [ atomtypes ] section"""
if "vdW" in openff_sys.handlers:
if "Buckingham-6" in openff_sys.handlers:
raise UnsupportedExportError(
"Cannot mix 12-6 and Buckingham potentials in GROMACS"
)
else:
_write_atomtypes_lj(openff_sys, top_file, typemap)
else:
if "Buckingham-6" in openff_sys.handlers:
_write_atomtypes_buck(openff_sys, top_file, typemap)
else:
raise UnsupportedExportError("No vdW interactions found")
def _write_atomtypes_lj(openff_sys: "Interchange", top_file: IO, typemap: Dict):
top_file.write("[ atomtypes ]\n")
top_file.write(
";type, bondingtype, atomic_number, mass, charge, ptype, sigma, epsilon\n"
)
for atom_idx, atom_type in typemap.items():
atom = openff_sys.topology.mdtop.atom(atom_idx)
mass = atom.element.mass
atomic_number = atom.element.atomic_number
parameters = _get_lj_parameters(openff_sys, atom_idx)
sigma = parameters["sigma"].to(unit.nanometer).magnitude
epsilon = parameters["epsilon"].to(unit.Unit("kilojoule / mole")).magnitude
top_file.write(
"{:<11s} {:6d} {:.16g} {:.16g} {:5s} {:.16g} {:.16g}".format(
atom_type, # atom type
# "XX", # atom "bonding type", i.e. bond class
atomic_number,
mass,
0.0, # charge, overriden later in [ atoms ]
"A", # ptype
sigma,
epsilon,
)
)
top_file.write("\n")
def _write_atomtypes_buck(openff_sys: "Interchange", top_file: IO, typemap: Dict):
top_file.write("[ atomtypes ]\n")
top_file.write(
";type, bondingtype, atomic_number, mass, charge, ptype, sigma, epsilon\n"
)
for atom_idx, atom_type in typemap.items():
atom = openff_sys.topology.atom(atom_idx)
element = ele.element_from_atomic_number(atom.atomic_number)
parameters = _get_buck_parameters(openff_sys, atom_idx)
a = parameters["A"].to(unit.Unit("kilojoule / mol")).magnitude
b = parameters["B"].to(1 / unit.nanometer).magnitude
c = parameters["C"].to(unit.Unit("kilojoule / mol * nanometer ** 6")).magnitude
top_file.write(
"{:<11s} {:6d} {:.16g} {:.16g} {:5s} {:.16g} {:.16g} {:.16g}".format(
atom_type, # atom type
# "XX", # atom "bonding type", i.e. bond class
atom.atomic_number,
element.mass,
0.0, # charge, overriden later in [ atoms ]
"A", # ptype
a,
b,
c,
)
)
top_file.write("\n")
def _write_moleculetype(top_file: IO):
"""Write the [ moleculetype ] section"""
top_file.write("[ moleculetype ]\n")
top_file.write("; Name\tnrexcl\n")
top_file.write("MOL\t3\n\n")
def _write_atoms(
top_file: IO,
openff_sys: "Interchange",
typemap: Dict,
):
"""Write the [ atoms ] and [ pairs ] sections for a molecule"""
top_file.write("[ atoms ]\n")
top_file.write(";num, type, resnum, resname, atomname, cgnr, q, m\n")
charges = openff_sys.handlers["Electrostatics"].charges
for atom in openff_sys.topology.mdtop.atoms:
atom_idx = atom.index
mass = atom.element.mass
atom_type = typemap[atom.index]
res_idx = atom.residue.index
res_name = str(atom.residue)
top_key = TopologyKey(atom_indices=(atom_idx,))
charge = charges[top_key].m_as(unit.e)
top_file.write(
"{:6d} {:18s} {:6d} {:8s} {:8s} {:6d} "
"{:18.8f} {:18.8f}\n".format(
atom_idx + 1,
atom_type,
res_idx + 1,
res_name,
atom_type,
atom_idx + 1,
charge,
mass,
)
)
top_file.write("[ pairs ]\n")
top_file.write("; ai\taj\tfunct\n")
_store_bond_partners(openff_sys.topology.mdtop)
# Use a set to de-duplicate
pairs: Set[Tuple] = {*_iterate_pairs(openff_sys.topology.mdtop)}
for pair in pairs:
indices = [a.index for a in pair]
indices = sorted(indices)
top_file.write(
"{:7d} {:7d} {:6d}\n".format(
indices[0] + 1,
indices[1] + 1,
1,
)
)
def _write_valence(
top_file: IO,
openff_sys: "Interchange",
):
"""Write the [ bonds ], [ angles ], and [ dihedrals ] sections"""
_write_bonds(top_file, openff_sys)
_write_angles(top_file, openff_sys)
_write_dihedrals(top_file, openff_sys)
def _write_bonds(top_file: IO, openff_sys: "Interchange"):
if "Bonds" not in openff_sys.handlers.keys():
return
top_file.write("[ bonds ]\n")
top_file.write("; ai\taj\tfunc\tr\tk\n")
bond_handler = openff_sys.handlers["Bonds"]
for bond in openff_sys.topology.mdtop.bonds:
indices = tuple(sorted((bond.atom1.index, bond.atom2.index)))
for top_key in bond_handler.slot_map:
if top_key.atom_indices == indices:
pot_key = bond_handler.slot_map[top_key]
elif top_key.atom_indices == indices[::-1]:
pot_key = bond_handler.slot_map[top_key]
params = bond_handler.potentials[pot_key].parameters
k = params["k"].m_as(unit.Unit("kilojoule / mole / nanometer ** 2"))
length = params["length"].to(unit.nanometer).magnitude
top_file.write(
"{:7d} {:7d} {:4s} {:.16g} {:.16g}\n".format(
indices[0] + 1, # atom i
indices[1] + 1, # atom j
str(1), # bond type (functional form)
length,
k,
)
)
del pot_key
top_file.write("\n\n")
def _write_angles(top_file: IO, openff_sys: "Interchange"):
if "Angles" not in openff_sys.handlers.keys():
return
_store_bond_partners(openff_sys.topology.mdtop)
top_file.write("[ angles ]\n")
top_file.write("; ai\taj\tak\tfunc\tr\tk\n")
angle_handler = openff_sys.handlers["Angles"]
for angle in _iterate_angles(openff_sys.topology.mdtop):
indices = (
angle[0].index,
angle[1].index,
angle[2].index,
)
for top_key in angle_handler.slot_map:
if top_key.atom_indices == indices:
pot_key = angle_handler.slot_map[top_key]
params = angle_handler.potentials[pot_key].parameters
k = params["k"].m_as(unit.Unit("kilojoule / mole / radian ** 2"))
theta = params["angle"].to(unit.degree).magnitude
top_file.write(
"{:7d} {:7d} {:7d} {:4s} {:.16g} {:.16g}\n".format(
indices[0] + 1, # atom i
indices[1] + 1, # atom j
indices[2] + 1, # atom k
str(1), # angle type (functional form)
theta,
k,
)
)
top_file.write("\n\n")
def _write_dihedrals(top_file: IO, openff_sys: "Interchange"):
if "ProperTorsions" not in openff_sys.handlers:
if "RBTorsions" not in openff_sys.handlers:
if "ImproperTorsions" not in openff_sys.handlers:
return
_store_bond_partners(openff_sys.topology.mdtop)
top_file.write("[ dihedrals ]\n")
top_file.write("; i j k l func\n")
rb_torsion_handler = openff_sys.handlers.get("RBTorsions", [])
proper_torsion_handler = openff_sys.handlers.get("ProperTorsions", [])
improper_torsion_handler = openff_sys.handlers.get("ImproperTorsions", [])
# TODO: Ensure number of torsions written matches what is expected
for proper in _iterate_propers(openff_sys.topology.mdtop):
if proper_torsion_handler:
for top_key in proper_torsion_handler.slot_map:
indices = tuple(a.index for a in proper)
if top_key.atom_indices == indices:
pot_key = proper_torsion_handler.slot_map[top_key]
params = proper_torsion_handler.potentials[pot_key].parameters
k = params["k"].to(unit.Unit("kilojoule / mol")).magnitude
periodicity = int(params["periodicity"])
phase = params["phase"].to(unit.degree).magnitude
idivf = int(params["idivf"]) if "idivf" in params else 1
top_file.write(
"{:7d} {:7d} {:7d} {:7d} {:6d} {:16g} {:16g} {:7d}\n".format(
indices[0] + 1,
indices[1] + 1,
indices[2] + 1,
indices[3] + 1,
1,
phase,
k / idivf,
periodicity,
)
)
# This should be `if` if a single quartet can be subject to both proper and RB torsions
| |
from __future__ import print_function
import copy
from math import *
from instruction_list import *
from parse_code import *
from values import get_storage_value,get_params,set_params,print_params,is_params
from values import create_configuration,add_configuration,configuration_exist,seen_configuration,print_configuration
from values import MyGlobals
from hashlib import *
from sha3 import *
import random
import time
from datetime import datetime
from z3 import *
import re
from misc import *
import codecs
class EVMCoreHelper:
def is_fixed(self, s): return s['type'] == 'constant' and is_bv_value(simplify(s['z3']))
def is_undefined(self, s): return s['type'] == 'undefined'
def get_value(self, s): return simplify(s['z3']).as_long()
def power(self, y, x, n):
if x == 0: #base case
return 1
elif (x%2==0): #x even
return self.power((y*y)%n,x//2,n)%n
else: #x odd
return (y*self.power((y*y)%n,x//2,n))%n
def store_in_memory(self, mmemory, addr, value ):
for i in range(addr+1, addr+32):
if i in mmemory:
if not self.is_undefined(mmemory[i]):
if self.is_undefined( value ):
mmemory[i]['type'] = 'undefined'
continue
obytes = (i - addr);
old_value = mmemory[i]['z3']
new_value = ( old_value & (2**(8*obytes) - 1) ) ^ (value['z3'] << (8*obytes) )
if new_value == 0: del mmemory[i]
else: mmemory[i]['z3'] = new_value
for i in range(addr-31,addr):
if i in mmemory:
if not self.is_undefined(mmemory[i]):
if self.is_undefined( value ):
mmemory[i]['type'] = 'undefined'
continue
obytes = addr - i;
old_value = mmemory[i]['z3']
new_value = ( old_value & ( (2**(8*obytes)-1) << (8*(32-obytes) ) ) ) ^ ( value ['z3'] >> (8*obytes ) )
if new_value == 0: del mmemory[i]
else: mmemory[i]['z3'] = new_value
mmemory[addr] = value;
# EVMCore simulates execution of each instruction and generates symbolic constraints using the actual EVM semantics.
class EVMCore(EVMCoreHelper):
'''
* Implements logic to handle each instruction in defferent phases of static analysis.
1) Search enhance phase with global storage not symbolic.
2) Search enhance phase with global storage symbolic.
2) Actual event finding phase.
* Propogates rules differently for different phases for each instruction.
* Constructs datastructure of R/W locations of each function for search enhance phase.
'''
# Private function for processing instructions involving operation on a single input argument.
def _unary(self, o1, step, op='NONE' ):
if self.is_undefined(o1): return {'type':'undefined','step':step}
z1 = simplify(o1['z3'])
if op == 'NOT': z3 = ~z1
elif op == 'ISZERO': z3 = If(z1 == 0, BitVecVal(1, 256), BitVecVal(0, 256))
else:
print('did not process unary operation %s ' % op )
print(o1)
return {'type':'undefined','step':step}
return {'type':'constant','step':step, 'z3': z3}
# Private function for processing instructions involving operations on two input arguments.
def _binary(self, o1, o2 , step, op='NONE'):
# In some cases the result can be determined with the knowledge of only one operand
if self.is_fixed(o1):
val = simplify(o1['z3']).as_long()
if op in ['MUL','AND','DIV','SDIV'] and 0 == val: return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
if op in ['XOR','ADD'] and 0 == val: return o2
if self.is_fixed(o2):
val = simplify(o2['z3']).as_long()
if op in ['MUL','AND','DIV','SDIV'] and 0 == val: return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
if op in ['XOR','ADD'] and 0 == val: return o1
# If some of the operands is undefined then the result should be undefined
if self.is_undefined(o1) or self.is_undefined(o2): return {'type':'undefined','step':step}
z1 = simplify(o1['z3'])
z2 = simplify(o2['z3'])
if op =='AND' : z3 = z1 & z2
elif op =='OR' : z3 = z1 | z2
elif op =='XOR' : z3 = z1 ^ z2
elif op =='ADD' : z3 = z1 + z2
elif op =='SUB' : z3 = z1 - z2
elif op =='EXP' :
if is_bv_value(z1) and is_bv_value(z2):
z3 = BitVecVal( self.power (z1.as_long(), z2.as_long(), 2**256), 256 )
else:
return {'type':'undefined','step':step}
elif op =='DIV' : z3 = UDiv(z1,z2)
elif op =='SDIV': z3 = z1/z2
elif op =='MOD' : z3 = URem(z1,z2)
elif op =='SMOD' : z3 = z1 % z2
elif op =='MUL' : z3 = z1 * z2
elif op =='GT' : z3 = If(UGT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='SGT' : z3 = If(z1 > z2, BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='LT' : z3 = If(ULT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='SLT' : z3 = If(z1 < z2, BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='EQ' :
global last_eq_step, last_eq_func
# May reveal function calls
# last_eq_step and _func are used only in the debugging mode
if is_bv_value(z1):
a = z1.as_long()
for function_pair in MyGlobals.functions:
if a==(int(function_pair[1], 16)):
MyGlobals.last_eq_func = z1.as_long()
MyGlobals.last_eq_step = step
z3 = If(z1 == z2, BitVecVal(1, 256), BitVecVal(0, 256))
else:
print('did not process binary operation %s ' % op)
print(o1)
print(o2)
return {'type':'undefined','step':step}
return {'type':'constant','step':step, 'z3': z3}
# Private function for processing instrunctions involving operations on three input arguments.
def _ternary(self, o1, o2 , o3, step, op='NONE'):
if o3['type'] == 'constant' and is_bv_value(simplify(o3['z3'])) and 0 == simplify(o3['z3']).as_long(): return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
z1 = simplify(o1['z3'])
z2 = simplify(o2['z3'])
z3 = simplify(o3['z3'])
if op == 'ADDMOD': return {'type':'constant', 'step':step, 'z3': (z1+z2) % z3 }
elif op == 'MULMOD': return {'type':'constant', 'step':step, 'z3': (z1*z2) % z3 }
else:
print('did not process ternary operation %s ' % op)
print(o1)
print(o2)
print(o3)
return {'type':'undefined','step':step}
# Public function which facillitates execution of each instruction and genration of symbolic constraints.
def execute(self, code, stack, pos, storage, mmemory, data, trace, calldepth, function_hash, actual_key, search_enhance, debug, read_from_blockchain ):
# Stop the search once it exceeds timeout
time_now = datetime.datetime.now()
if MyGlobals.ONE_HB_TIMEOUT < int((time_now - MyGlobals.Time_checkpoint).total_seconds()):
MyGlobals.stop_search = True
return pos , True
if debug: print_stack(stack)
op = code[pos]['o']
halt = False
executed = True
step = code[pos]['id']
# for statistics
if (not search_enhance) and op in ['CALL', 'CODESIZE', 'CODECOPY', 'EXTCODESIZE', 'EXTCODECOPY', 'RETURNDATASIZE', 'RETURNDATACOPY', 'DIFFICULTY', 'GAS', 'CREATE', 'CALLCODE', 'DELEGATECALL', 'STATICCALL', 'LOG0', 'LOG1', 'LOG2', 'LOG3', 'LOG4']:
if op in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins[op]+=1
else:
MyGlobals.notimplemented_ins[op]=1
if (not search_enhance):
if op in ['CALL', 'SSTORE', 'SLOAD', 'MSTORE8', 'MLOAD', 'JUMP', 'JUMPI']:
if op+'_all' in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins[op+'_all']+=1
else:
MyGlobals.notimplemented_ins[op+'_all']=1
if 'total' in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins['total']+=1
else:
MyGlobals.notimplemented_ins['total']=1
key = 0
if actual_key in [1, 4]: key = 1
if actual_key in [2, 3]: key = 2
if op not in allops:
print('Unknown operation %s at pos %x' % (code[pos]['op'],pos) )
return pos,True
# check if stack has enough elements
if allops[op][1] > len(stack):
if debug: print('Not enough entries in the stack to execute the operation %8s at step %x: required %d, provided %d' % (op,code[pos]['id'], allops[op][1], len(stack)) )
return pos, True
start_stack_size = len(stack)
final_stack_size = len(stack) - allops[op][1] + allops[op][2]
# get arguments from the stack
# the cases of DUP and SWAP are different, so avoid those
args = []
if op.find('SWAP') < 0 and op.find('DUP') < 0 and op not in ['JUMPI']:
for i in range( allops[op][1] ): args.append( stack.pop() )
# all unary
if op in ['ISZERO','NOT']:
stack.append( self._unary ( args[0] ,step, op ) )
# all binary except SIGNEXTEND
elif op in ['ADD','MUL','SUB','DIV','SDIV','MOD','SMOD','EXP','AND','OR','XOR', 'LT','GT','SLT','SGT','EQ']:
stack.append( self._binary ( args[0] , args[1] , step , op ) )
# all ternary
elif op in ['ADDMOD','MULMOD']:
stack.append( self._ternary( args[0], args[1], args[2], step, op ) )
elif op == 'SIGNEXTEND':
if not self.is_fixed(args[0]) or not self.is_fixed(args[1]):
stack.append( {'type':'undefined','step':step} )
else:
o = self.get_value(args[1])
t = 256 - 8*( self.get_value(args[0]) + 1 )
tbit = (o >> t ) & 1
n = 0
for i in range(256):
n ^= (tbit if i<= t else ((o>>i)&1)) << i
stack.append( {'type':'undefined','step':step, 'z3':BitVecVal( n, 256 ) } )
elif op == 'SHA3':
addr = simplify(args[0]['z3'])
offset= simplify(args[1]['z3'])
exact_address = addr.as_long() if is_bv_value(addr) else -1
exact_offset = offset.as_long() if is_bv_value(offset) else -1
# for statistics
if exact_address==-1:
if (not search_enhance):
if 'sha3_addr' in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins['sha3_addr']+=1
else:
MyGlobals.notimplemented_ins['sha3_addr']=1
if exact_offset==-1:
if 'sha3_offset' in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins['sha3_offset']+=1
else:
MyGlobals.notimplemented_ins['sha3_offset']=1
res = {'type':'undefined','step':step}
changed_offset = exact_address
if (exact_offset - exact_address)//32 >= 2 : changed_offset = exact_offset//2
if exact_address >= 0 and exact_offset >= 0:
if (exact_offset % 32) == 0 : # for now, can deal only with offsets divisible by 32
val = ''
all_good = True
sha3val = 0
for i in range(exact_offset//32):
if (exact_address + i*32) not in mmemory or not self.is_fixed(mmemory[exact_address+i*32]):
all_good = False
break
val += '%064x' % self.get_value(mmemory[exact_address + i*32])
if all_good:
k = keccak_256()
# print('hereee', val, '\n')
# k.update(val.decode('hex'))
k.update((codecs.decode(val, 'hex')))
digest = k.hexdigest()
res = {'type':'constant','step':step, 'z3':BitVecVal(int(digest,16), 256) }
sha3val = int(digest,16)
else:
# for statistics
if (not search_enhance):
if exact_address==-1:
if 'sha3_addr' in MyGlobals.notimplemented_ins:
MyGlobals.notimplemented_ins['sha3_mem']+=1
else:
MyGlobals.notimplemented_ins['sha3_mem']=1
if search_enhance and is_bv_value(simplify(mmemory[changed_offset]['z3'])):
temp_key = remove0x(hex(mmemory[changed_offset]['z3'].as_long()).rstrip('L'))
if not 'SHA3'+'-'+str(step)+'-'+function_hash in MyGlobals.sha3vardata:
MyGlobals.sha3vardata['SHA3'+'-'+str(step)+'-'+function_hash] = []
MyGlobals.sha3vardata['SHA3'+'-'+str(step)+'-'+function_hash].append(temp_key)
else:
if not temp_key in MyGlobals.sha3vardata['SHA3'+'-'+str(step)+'-'+function_hash]:
MyGlobals.sha3vardata['SHA3'+'-'+str(step)+'-'+function_hash].append(temp_key)
stack.append(args[1])
stack.append(args[0])
return pos, False
if search_enhance and is_bv_value(simplify(mmemory[changed_offset]['z3'])):
temp_key = remove0x(hex(mmemory[changed_offset]['z3'].as_long()).rstrip('L'))
if not sha3val in MyGlobals.sha3vardata:
MyGlobals.sha3vardata[sha3val] = []
MyGlobals.sha3vardata[sha3val].append(temp_key)
else:
if not temp_key in MyGlobals.sha3vardata[sha3val]:
MyGlobals.sha3vardata[sha3val].append(temp_key)
stack.append( res )
elif op.find('PUSH') >= 0: stack.append( {'type':'constant','step':step, 'z3':BitVecVal(int(code[pos]['input'],16), 256)} )
elif op.find('DUP' ) >= 0: stack.append( copy.deepcopy( stack[-int(op[3:]) ] ) )
elif op.find('SWAP') >= 0:
tmp1 = stack[-1]
tmp2 = stack[-int(op[4:])-1 ]
stack[-1] = tmp2
stack[-int(op[4:]) -1] = tmp1
# assign symbolic variable to some of the parameters (such as CALLVALUE, TIMESTAMP, etc)
# only if they are selected to get one
# otherwise, below, they will get fixed value (BitVecVal) as specified
elif op in MyGlobals.symbolic_vars:
stack.append( {'type':'constant','step':step, 'z3': BitVec(op+'-'+str(calldepth)+'-'+function_hash,256) } )
elif op == 'NUMBER': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('block_number',''),16), 256)} )
elif op == 'GASLIMIT': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas_limit',''),16), 256)} )
elif op == 'TIMESTAMP': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('time_stamp',''),16), 256)} )
elif op == 'CALLVALUE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('call_value',''),16), 256)} )
elif op == 'ADDRESS': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('contract_address',''), 16), 256)} )
elif op == 'ORIGIN': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('contract_address',''), 16), 256)} )
elif op == 'GASPRICE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas_price',''), 16), 256) } )
elif op == 'COINBASE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(0,256)} )
elif op == 'DIFFICULTY': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(0,256)} )
elif op == 'CALLER': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('my_address',''), 16), 256) } )
elif op == 'GAS': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas',''),16), 256) } )
elif op == 'MSIZE': stack.append( | |
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> int:
return pulumi.get(self, "value")
@pulumi.output_type
class EndpointConfigAsyncInferenceClientConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxConcurrentInvocationsPerInstance":
suggest = "max_concurrent_invocations_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigAsyncInferenceClientConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigAsyncInferenceClientConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigAsyncInferenceClientConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_concurrent_invocations_per_instance: Optional[int] = None):
if max_concurrent_invocations_per_instance is not None:
pulumi.set(__self__, "max_concurrent_invocations_per_instance", max_concurrent_invocations_per_instance)
@property
@pulumi.getter(name="maxConcurrentInvocationsPerInstance")
def max_concurrent_invocations_per_instance(self) -> Optional[int]:
return pulumi.get(self, "max_concurrent_invocations_per_instance")
@pulumi.output_type
class EndpointConfigAsyncInferenceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "outputConfig":
suggest = "output_config"
elif key == "clientConfig":
suggest = "client_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigAsyncInferenceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigAsyncInferenceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigAsyncInferenceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
output_config: 'outputs.EndpointConfigAsyncInferenceOutputConfig',
client_config: Optional['outputs.EndpointConfigAsyncInferenceClientConfig'] = None):
pulumi.set(__self__, "output_config", output_config)
if client_config is not None:
pulumi.set(__self__, "client_config", client_config)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> 'outputs.EndpointConfigAsyncInferenceOutputConfig':
return pulumi.get(self, "output_config")
@property
@pulumi.getter(name="clientConfig")
def client_config(self) -> Optional['outputs.EndpointConfigAsyncInferenceClientConfig']:
return pulumi.get(self, "client_config")
@pulumi.output_type
class EndpointConfigAsyncInferenceNotificationConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "errorTopic":
suggest = "error_topic"
elif key == "successTopic":
suggest = "success_topic"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigAsyncInferenceNotificationConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigAsyncInferenceNotificationConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigAsyncInferenceNotificationConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
error_topic: Optional[str] = None,
success_topic: Optional[str] = None):
if error_topic is not None:
pulumi.set(__self__, "error_topic", error_topic)
if success_topic is not None:
pulumi.set(__self__, "success_topic", success_topic)
@property
@pulumi.getter(name="errorTopic")
def error_topic(self) -> Optional[str]:
return pulumi.get(self, "error_topic")
@property
@pulumi.getter(name="successTopic")
def success_topic(self) -> Optional[str]:
return pulumi.get(self, "success_topic")
@pulumi.output_type
class EndpointConfigAsyncInferenceOutputConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3OutputPath":
suggest = "s3_output_path"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "notificationConfig":
suggest = "notification_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigAsyncInferenceOutputConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigAsyncInferenceOutputConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigAsyncInferenceOutputConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_output_path: str,
kms_key_id: Optional[str] = None,
notification_config: Optional['outputs.EndpointConfigAsyncInferenceNotificationConfig'] = None):
pulumi.set(__self__, "s3_output_path", s3_output_path)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if notification_config is not None:
pulumi.set(__self__, "notification_config", notification_config)
@property
@pulumi.getter(name="s3OutputPath")
def s3_output_path(self) -> str:
return pulumi.get(self, "s3_output_path")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="notificationConfig")
def notification_config(self) -> Optional['outputs.EndpointConfigAsyncInferenceNotificationConfig']:
return pulumi.get(self, "notification_config")
@pulumi.output_type
class EndpointConfigCaptureContentTypeHeader(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "csvContentTypes":
suggest = "csv_content_types"
elif key == "jsonContentTypes":
suggest = "json_content_types"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigCaptureContentTypeHeader. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigCaptureContentTypeHeader.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigCaptureContentTypeHeader.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
csv_content_types: Optional[Sequence[str]] = None,
json_content_types: Optional[Sequence[str]] = None):
if csv_content_types is not None:
pulumi.set(__self__, "csv_content_types", csv_content_types)
if json_content_types is not None:
pulumi.set(__self__, "json_content_types", json_content_types)
@property
@pulumi.getter(name="csvContentTypes")
def csv_content_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "csv_content_types")
@property
@pulumi.getter(name="jsonContentTypes")
def json_content_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "json_content_types")
@pulumi.output_type
class EndpointConfigCaptureOption(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "captureMode":
suggest = "capture_mode"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigCaptureOption. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigCaptureOption.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigCaptureOption.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
capture_mode: str):
pulumi.set(__self__, "capture_mode", capture_mode)
@property
@pulumi.getter(name="captureMode")
def capture_mode(self) -> str:
return pulumi.get(self, "capture_mode")
@pulumi.output_type
class EndpointConfigDataCaptureConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "captureOptions":
suggest = "capture_options"
elif key == "destinationS3Uri":
suggest = "destination_s3_uri"
elif key == "initialSamplingPercentage":
suggest = "initial_sampling_percentage"
elif key == "captureContentTypeHeader":
suggest = "capture_content_type_header"
elif key == "enableCapture":
suggest = "enable_capture"
elif key == "kmsKeyId":
suggest = "kms_key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigDataCaptureConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigDataCaptureConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigDataCaptureConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
capture_options: Sequence['outputs.EndpointConfigCaptureOption'],
destination_s3_uri: str,
initial_sampling_percentage: int,
capture_content_type_header: Optional['outputs.EndpointConfigCaptureContentTypeHeader'] = None,
enable_capture: Optional[bool] = None,
kms_key_id: Optional[str] = None):
pulumi.set(__self__, "capture_options", capture_options)
pulumi.set(__self__, "destination_s3_uri", destination_s3_uri)
pulumi.set(__self__, "initial_sampling_percentage", initial_sampling_percentage)
if capture_content_type_header is not None:
pulumi.set(__self__, "capture_content_type_header", capture_content_type_header)
if enable_capture is not None:
pulumi.set(__self__, "enable_capture", enable_capture)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
@property
@pulumi.getter(name="captureOptions")
def capture_options(self) -> Sequence['outputs.EndpointConfigCaptureOption']:
return pulumi.get(self, "capture_options")
@property
@pulumi.getter(name="destinationS3Uri")
def destination_s3_uri(self) -> str:
return pulumi.get(self, "destination_s3_uri")
@property
@pulumi.getter(name="initialSamplingPercentage")
def initial_sampling_percentage(self) -> int:
return pulumi.get(self, "initial_sampling_percentage")
@property
@pulumi.getter(name="captureContentTypeHeader")
def capture_content_type_header(self) -> Optional['outputs.EndpointConfigCaptureContentTypeHeader']:
return pulumi.get(self, "capture_content_type_header")
@property
@pulumi.getter(name="enableCapture")
def enable_capture(self) -> Optional[bool]:
return pulumi.get(self, "enable_capture")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
return pulumi.get(self, "kms_key_id")
@pulumi.output_type
class EndpointConfigProductionVariant(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "initialVariantWeight":
suggest = "initial_variant_weight"
elif key == "modelName":
suggest = "model_name"
elif key == "variantName":
suggest = "variant_name"
elif key == "acceleratorType":
suggest = "accelerator_type"
elif key == "initialInstanceCount":
suggest = "initial_instance_count"
elif key == "instanceType":
suggest = "instance_type"
elif key == "serverlessConfig":
suggest = "serverless_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigProductionVariant. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigProductionVariant.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigProductionVariant.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
initial_variant_weight: float,
model_name: str,
variant_name: str,
accelerator_type: Optional[str] = None,
initial_instance_count: Optional[int] = None,
instance_type: Optional[str] = None,
serverless_config: Optional['outputs.EndpointConfigServerlessConfig'] = None):
pulumi.set(__self__, "initial_variant_weight", initial_variant_weight)
pulumi.set(__self__, "model_name", model_name)
pulumi.set(__self__, "variant_name", variant_name)
if accelerator_type is not None:
pulumi.set(__self__, "accelerator_type", accelerator_type)
if initial_instance_count is not None:
pulumi.set(__self__, "initial_instance_count", initial_instance_count)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if serverless_config is not None:
pulumi.set(__self__, "serverless_config", serverless_config)
@property
@pulumi.getter(name="initialVariantWeight")
def initial_variant_weight(self) -> float:
return pulumi.get(self, "initial_variant_weight")
@property
@pulumi.getter(name="modelName")
def model_name(self) -> str:
return pulumi.get(self, "model_name")
@property
@pulumi.getter(name="variantName")
def variant_name(self) -> str:
return pulumi.get(self, "variant_name")
@property
@pulumi.getter(name="acceleratorType")
def accelerator_type(self) -> Optional[str]:
return pulumi.get(self, "accelerator_type")
@property
@pulumi.getter(name="initialInstanceCount")
def initial_instance_count(self) -> Optional[int]:
return pulumi.get(self, "initial_instance_count")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[str]:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="serverlessConfig")
def serverless_config(self) -> Optional['outputs.EndpointConfigServerlessConfig']:
return pulumi.get(self, "serverless_config")
@pulumi.output_type
class EndpointConfigServerlessConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxConcurrency":
suggest = "max_concurrency"
elif key == "memorySizeInMB":
suggest = "memory_size_in_mb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointConfigServerlessConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointConfigServerlessConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointConfigServerlessConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_concurrency: int,
memory_size_in_mb: int):
pulumi.set(__self__, "max_concurrency", max_concurrency)
pulumi.set(__self__, "memory_size_in_mb", memory_size_in_mb)
@property
@pulumi.getter(name="maxConcurrency")
def max_concurrency(self) -> int:
return pulumi.get(self, "max_concurrency")
@property
@pulumi.getter(name="memorySizeInMB")
def memory_size_in_mb(self) -> int:
return pulumi.get(self, "memory_size_in_mb")
@pulumi.output_type
class EndpointConfigTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class EndpointDeploymentConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "blueGreenUpdatePolicy":
suggest = "blue_green_update_policy"
elif key == "autoRollbackConfiguration":
suggest = "auto_rollback_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointDeploymentConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointDeploymentConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointDeploymentConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
blue_green_update_policy: 'outputs.EndpointBlueGreenUpdatePolicy',
auto_rollback_configuration: Optional['outputs.EndpointAutoRollbackConfig'] = None):
pulumi.set(__self__, "blue_green_update_policy", blue_green_update_policy)
if auto_rollback_configuration is not None:
pulumi.set(__self__, "auto_rollback_configuration", auto_rollback_configuration)
@property
@pulumi.getter(name="blueGreenUpdatePolicy")
def blue_green_update_policy(self) -> 'outputs.EndpointBlueGreenUpdatePolicy':
return pulumi.get(self, "blue_green_update_policy")
@property
@pulumi.getter(name="autoRollbackConfiguration")
def auto_rollback_configuration(self) -> Optional['outputs.EndpointAutoRollbackConfig']:
return pulumi.get(self, "auto_rollback_configuration")
@pulumi.output_type
class EndpointTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class | |
# Document.name => Document
self._df = {} # Cache of document frequency per word.
self._similarity = {} # Cache of ((D1.id,D2.id), weight)-items (cosine similarity).
self._divergence = {} # Cache of Kullback-leibler divergence per (word1, word2).
self._ig = {} # Cache of (word, information gain)-items.
self._vector = None # Cache of corpus vector with all the words in the corpus.
self._lsa = None # LSA matrix with reduced dimensionality.
self._weight = weight # Weight used in Document.vector (TF-IDF or TF).
self._update()
self.extend(documents)
@property
def documents(self):
return self._documents
@property
def terms(self):
return self.vector.keys()
features = words = terms
@property
def classes(self):
return list(set(d.type for d in self.documents))
def _get_lsa(self):
return self._lsa
def _set_lsa(self, v=None):
self._lsa = v
self._update()
lsa = property(_get_lsa, _set_lsa)
def _get_weight(self):
return self._weight
def _set_weight(self, w):
self._weight = w
self._update() # Clear the cache.
weight = property(_get_weight, _set_weight)
@classmethod
def build(cls, path, *args, **kwargs):
""" Builds the corpus from a folder of text documents (e.g. path="folder/*.txt").
Each file is split into words and the words are counted.
"""
name = kwargs.pop("name", lambda path: None)
documents = []
for f in glob.glob(path):
documents.append(Document.open(f, *args, **kwargs))
documents[-1]._name = name(f)
return cls(documents)
@classmethod
def load(cls, path):
""" Loads the corpus from a pickle file created with Corpus.save().
"""
return cPickle.load(open(path))
def save(self, path, update=False):
""" Saves the corpus as a pickle file at the given path.
It can be loaded with Corpus.load().
This is faster because the words in the documents do not need to be stemmed again,
and cached vectors and similarities are stored
"""
if update:
for d1 in self.documents:
for d2 in self.documents:
self.cosine_similarity(d1, d2) # Update the entire cache before saving.
m = dict.fromkeys((d.id for d in self.documents), True)
for id1, id2 in self._similarity.keys():
if id1 not in m \
or id2 not in m:
self._similarity.pop((id1, id2)) # Remove Corpus.search() query cache.
cPickle.dump(self, open(path, "w"), BINARY)
def export(self, path, format=ORANGE, **kwargs):
""" Exports the corpus as a file for other machine learning applications,
e.g., Orange or Weka which both have a GUI and are faster.
"""
# Note: the Document.vector space is exported without cache or LSA concept space.
keys = sorted(self.vector.keys())
s = []
# Orange tab format:
if format == ORANGE:
s.append("\t".join(keys + ["m#name", "c#type"]))
for document in self.documents:
v = document.vector
v = [v.get(k, 0) for k in keys]
v = "\t".join(x==0 and "0" or "%.4f" % x for x in v)
v = "%s\t%s\t%s" % (v, document.name or "", document.type or "")
s.append(v)
# Weka ARFF format:
if format == WEKA:
s.append("@RELATION %s" % kwargs.get("name", hash(self)))
s.append("\n".join("@ATTRIBUTE %s NUMERIC" % k for k in keys))
s.append("@ATTRIBUTE class {%s}" % ",".join(set(d.type or "" for d in self.documents)))
s.append("@DATA")
for document in self.documents:
v = document.vector
v = [v.get(k, 0) for k in keys]
v = ",".join(x==0 and "0" or "%.4f" % x for x in v)
v = "%s,%s" % (v, document.type or "")
s.append(v)
s = "\n".join(s)
f = open(path, "w", encoding="utf-8")
f.write(decode_utf8(s))
f.close()
def _update(self):
# Ensures that all document relevancy vectors are recalculated
# when a document is added or deleted in the corpus (= new words or less words).
self._df = {}
self._similarity = {}
self._divergence = {}
self._ig = {}
self._vector = None
self._lsa = None
for document in self.documents:
document._vector = None
def __len__(self):
return len(self.documents)
def __iter__(self):
return iter(self.documents)
def __getitem__(self, i):
return self.documents.__getitem__(i)
def __delitem__(self, i):
d = list.pop(self.documents, i)
d._corpus = None
self._index.pop(d.name, None)
self._update()
def clear(self):
self._documents = readonlylist()
self._update()
def append(self, document):
""" Appends the given Document to the corpus, setting the corpus as its parent.
The corpus is updated, meaning that the cache of vectors and similarities is cleared
(relevancy and similarity weights will be different now that there is a new document).
"""
if not isinstance(document, Document):
raise TypeError, "Corpus.append() expects a Document."
document._corpus = self
if document.name is not None:
self._index[document.name] = document
list.append(self.documents, document)
self._update()
def extend(self, documents):
""" Extends the corpus with the given list of documents.
Clears the cache of vectors and similarities.
"""
for document in documents:
document._corpus = self
if document.name is not None:
self._index[document.name] = document
list.extend(self.documents, documents)
self._update()
def remove(self, document):
""" Removes the given Document from the corpus (sets Document.corpus=None).
"""
self.__delitem__(self.documents.index(document))
def document(self, name):
""" Returns the Document with the given name.
"""
# This assumes document names are unique.
if name in self._index:
return self._index[name]
def document_frequency(self, word):
""" Returns the document frequency of a word.
Returns 0 if there are no documents in the corpus (e.g. no word frequency).
df = number of documents containing the word / number of documents.
The more occurences of the word across the corpus, the higher its df weight.
"""
if len(self.documents) == 0:
return 0
if len(self._df) == 0:
# Caching document frequency for each word gives a 300x performance boost
# (calculate all of them once). Drawback: if you need TF-IDF for just one document.
for d in self.documents:
for w in d.terms:
self._df[w] = (w in self._df) and self._df[w]+1 or 1
for w in self._df:
self._df[w] /= float(len(self.documents))
return self._df.get(word, 0.0)
df = document_frequency
def inverse_document_frequency(self, word):
""" Returns the inverse document frequency of a word.
Returns None if the word is not in the corpus, or if there are no documents in the corpus.
Using the natural logarithm:
idf = log(1/df)
The more occurences of the word, the lower its idf weight (log() makes it grow slowly).
"""
df = self.df(word)
return df != 0 and log(1.0/df) or None
idf = inverse_document_frequency
@property
def vector(self):
""" Returns a dictionary of (word, 0)-items from the corpus.
It includes all words from all documents (i.e. it is the dimension of the vector space).
If a document is given, sets the document word relevancy values in the vector.
"""
# Note:
# - Corpus.vector is the dictionary of all (word, 0)-items.
# - Corpus.vector(document) returns a copy with the document's word relevancy values in it.
# - This is the full document vector, opposed to the sparse Document.vector.
# Words in a document that are not in the corpus vector are ignored
# (e.g. the document was not in the corpus, this can be the case in Corpus.search() for example).
# See Vector.__call__() why this is possible.
if not self._vector:
self._vector = Vector((w, 0) for w in chain(*(d.terms for d in self.documents)))
return self._vector
@property
def vectors(self):
""" Yields a list of all document vectors.
"""
return [d.vector for d in self.documents]
@property
def density(self):
""" Yields the overall word coverage as a number between 0.0-1.0.
"""
return float(sum(len(d.vector) for d in self.documents)) / len(self.vector)**2
# Following methods rely on Document.vector:
# frequent sets, cosine similarity, nearest neighbors, search, clustering,
# latent semantic analysis, divergence.
def frequent_concept_sets(self, threshold=0.5):
""" Returns a dictionary of (set(words), frequency)
of word combinations with a frequency above the given threshold.
"""
return apriori([d.terms for d in self.documents], support=threshold)
sets = frequent = frequent_concept_sets
def cosine_similarity(self, document1, document2):
""" Returns the similarity between two documents in the corpus as a number between 0.0-1.0.
The weight is based on the document relevancy vectors (i.e. tf-idf of words in the text).
cos = dot(v1,v2) / (norm(v1) * norm(v2))
"""
# If we already calculated the similarity between the given documents,
# it is available in cache for reuse.
id1 = document1.id
id2 = document2.id
if (id1, id2) in self._similarity: return self._similarity[(id1, id2)]
if (id2, id1) in self._similarity: return self._similarity[(id2, id1)]
# Calculate the matrix multiplication of the document vectors.
#v1 = self.vector(document1)
#v2 = self.vector(document2)
#s = cosine_similarity(v1.itervalues(), v2.itervalues()) / (v1.norm * v2.norm or 1)
| |
invalid caracter for the
host. ie maya didnt support object name starting with number, and automatically rename the object.
In order to retrieve the object use this functon. If a invalid
caracter is found, the caracter is removed.
This function can be change in the features, as it currently only look for number.
>>> name = "1sphere"
>>> sphere_obj,sphere_mesh = helper.Sphere(name)
>>> print (sphere_obj,sphere_mesh)#in maya
(u'sphere', u'makeNurbSphere1')
>>> corrected_name = helper.checkName(name)
>>> print (corrected_name)
sphere
>>> sphere = helper.getObject(name)
>>> print (sphere)
sphere
@type name: string
@param name: name of the molecule.
@rtype: string
@return: corrected name of the molecule.
"""
invalid=[]
for i in range(9):
invalid.append(str(i))
if name[0] in invalid:
name= name[1:]
return name
def getObject(self,name):
"""
Retrieve an object from his name.
* overwrited by children class for each host
>>> oname = "mysphere"
>>> object= helper.getObject(oname)
>>> print oname,object#the result depnds on the host
mysphere <c4d.BaseObject object at 0x1e4fc4b0> # Cinema4D
mysphere # Maya
@type name: string
@param name: request name of an host object
@rtype: hostObject
@return: the object with the requested name or None
"""
return None
def getObjectName(self,o):
"""
Return the name of an host object.
* overwrited by children class for each host
>>> obj = helper.Sphere("mySphere")
>>> name = helper.getObjectName(obj)
>>> print (name)
mySphere
@type o: hostObject
@param o: an host object
@rtype: string
@return: the name of the host object
"""
pass
@classmethod
def getCurrentScene(self,):
"""
Return the current/active working document or scene.
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> print (sc)
None #in maya there is no scene concept
<bpy_strct, Scene("Scene") #blender 2.6
[Scene "Scene"] #blender 2.49b
<c4d.documents.BaseDocument object at 0x246c01a0> #Cinema4D
@rtype: scene
@return: the active scene
"""
pass
@classmethod
def getCurrentSceneName(self):
"""
Return the current/active working document or scene name.
* overwrited by children class for each host
>>> scname = helper.getCurrentSceneName()
>>> print (scname)
None #maya
Scene #blender 2.6
Scene #blender 2.49b
Untitled #Cinema4D
@rtype: strng
@return: the active scene name
"""
pass
def getCurrentSelection(self,):
"""
Return the current/active selected object in the document or scene.
* overwrited by children class for each host
>>> liste_objects = helper.getCurrentSelection()
>>> print (liste_objects)
[<c4d.BaseObject object at 0x1e4fd3a0>, <c4d.BaseObject object at 0x1e4fd3d0>] #cinema4D
@rtype: liste
@return: the list of selected object
"""
pass
def setCurrentSelection(self,obj):
"""
Return the current/active selected object in the document or scene.
* overwrited by children class for each host
>>> liste_objects = [helper.getObject("obj1"),helper.getObject("obj2")]
>>> helper.setCurrentSelection(liste_objects)
@type obj: hostObject
@param obj: the object to be selected
"""
pass
def getPosUntilRoot(self,object):
"""
Go through the hierarchy of the object until reaching the top level,
increment the position to get the transformation due to parents.
DEPRECATED
@type object: hostObject
@param object: the object
@rtype: list
@return: the cumulative translation along the parenting hierarchy
"""
stop = False
#get the first parent
pos=[0,0,0]
while not stop :
#get the parent position, and add it to pos
#get the parent of the previous parent
parent=None
if parent is None :
stop = True
return pos
def addObjectToScene(self,doc,object,parent=None,centerRoot=True,rePos=None):
"""
Insert/add an object to the current document under the specified parent, and
at the specified location. This function is used by all the basic object creation function.
* overwrited by children class for each host
@type doc: hostScene
@param doc: the scene where to insert the object
@type object: hostObject
@param object: the object to insert
@type parent: hostObject
@param parent: the parent of the object to insert under
@type centerRoot: boolean
@param centerRoot: if the object have to be recentered according the top-level
@type rePos: list
@param rePos: the location of the object in the scene
"""
#get the object name
name=""
#if the object is not already in the scene
if self.getObject(name) == None:
if parent != None :
if type(parent) == str : parent = self.getObject(parent)
#if parent exist, insert the object under it
pass
if centerRoot :
#get the current position of the object
currentPos = []
if rePos != None :
parentPos = rePos
else :
parentPos = self.getPosUntilRoot(obj)#parent.GetPos()
#set the new position of the object
pass
else :
#insert the object
pass
def AddObject(self,object,parent=None,centerRoot=True,rePos=None):
"""
Insert/add an object to the current document under the specified parent, and
at the specified location. This function is an alias for addObjectToScene to
permit to some script to work either in dejavu and the host.
* overwrited by children class for each host
@type object: hostObject
@param object: the object to insert
@type parent: hostObject
@param parent: the parent of the object to insert under
@type centerRoot: boolean
@param centerRoot: if the object have to be recentered according the top-level
@type rePos: list
@param rePos: the location of the object in the scene
"""
doc = self.getCurrentScene()
self.addObjectToScene(doc,object,parent=parent,centerRoot=centerRoot,
rePos=rePos)
def ObjectsSelection(self,listeObjects,typeSel="new"):
"""
Modify the current object selection. Redundant with setCurrentSelection.
This function make the distinction between adding (typeSel="add") object to the selection and creating
a new selection (typeSel="new")
* overwrited by children class for each host
@type listeObjects: list
@param listeObjects: list of object to joins
@type typeSel: string
@param listeObjects: type of modification: new,add,...
"""
# dic={"add":c4d.SELECTION_ADD,"new":c4d.SELECTION_NEW}
sc = self.getCurrentScene()
#Put here the code to add/set an object to the current slection
#[sc.SetSelection(x,dic[typeSel]) for x in listeObjects]
def JoinsObjects(self,listeObjects):
"""
Merge the given liste of object in one unique geometry.
* overwrited by children class for each host
@type listeObjects: list
@param listeObjects: list of object to joins
"""
sc = self.getCurrentScene()
def addCameraToScene(self,name,Type,focal,center,scene,**kw):
"""
Add a camera object to the scene
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> center=[0.,-12.,40.]
>>> cam = helper.addCameraToScene("cam1","persp",30.,center,sc)
@type name: string
@param name: name of the camera
@type Type: cameraType
@param Type: perspective, orthogonale etc...
@type focal: float
@param focal: the focal of the camera
@type center: list
@param center: the position of the camera
@type scene: host scene
@param scene: the scene
#we add a **kw for futur arguments
"""
pass
# cam = None
# self.addObjectToScene(scene,cam)
def addLampToScene(self,name,Type='Area',rgb=[1.,1.,1.],dist=25.0,energy=1.0,
soft=1.0,shadow=False,center=[0.,0.,0.],sc=None,**kw):
"""
Add a light to the scene
* overwrited by children class for each host
>>> sc = helper.getCurrentScene()
>>> center=[0.,-12.,40.]
>>> color = [1.,1.,1.]
>>> light = helper.addLampToScene("light1","Sun",color,20.,1.0,1.0,True,center,sc)
@type name: string
@param name: name of the instance
@type Type: light hostType/int etc..
@param Type: the light type : spot,sun,omni,etc..
@type rgb: list of int 0-255
@param rgb: color of the light in rgb
@type dist: float
@param dist: light distance of attenuation
@type energy: float
@param energy: intensity of the light
@type soft: bool
@param soft: soft light
@type shadow: boolean
@param shadow: does the light produce shadow
@type scene: host scene
@param scene: the scene
#we add a **kw for futur arguments
"""
dicType={'Area':0,'Sun':3}
lamp = None#c4d.BaseObject(LIGHT)
#lamp name (name)
#lamp position (center)
#lamp color (float(rgb[0]), float(rgb[1]), float(rgb[2]))#color
#lamp energy float(energy) #intensity
#lamp type dicType[Type] #type
if shadow :
#lampe shadow
pass
self.addObjectToScene(scene,lamp,centerRoot=False)
def newEmpty(self,name,location=None,parentCenter=None,**kw):
"""
Create a new Null/Empty Object
* overwrited by children class for each host
>>> empty = helper.newEmpty("null1",location=[10.0,0.0,0.0])
>>> empty_child = helper.newEmpty("null2",location=[15.0,0.0,0.0],parent = empty)
@type name: string
@param name: name of the empty
@type location: list
@param location: position of the null object
@type parentCenter: list
@param | |
<gh_stars>10-100
del_items(0x80138AF4)
SetType(0x80138AF4, "void GameOnlyTestRoutine__Fv()")
del_items(0x80138AFC)
SetType(0x80138AFC, "int vecleny__Fii(int a, int b)")
del_items(0x80138B20)
SetType(0x80138B20, "int veclenx__Fii(int a, int b)")
del_items(0x80138B4C)
SetType(0x80138B4C, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x80139144)
SetType(0x80139144, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x8013922C)
SetType(0x8013922C, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x801393C8)
SetType(0x801393C8, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x8013943C)
SetType(0x8013943C, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139658)
SetType(0x80139658, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139874)
SetType(0x80139874, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x801398CC)
SetType(0x801398CC, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x80139A80)
SetType(0x80139A80, "void PutMissile__Fi(int i)")
del_items(0x80139B84)
SetType(0x80139B84, "void GetMissilePos__Fi(int i)")
del_items(0x80139CAC)
SetType(0x80139CAC, "void MoveMissilePos__Fi(int i)")
del_items(0x80139E14)
SetType(0x80139E14, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x8013A188)
SetType(0x8013A188, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x8013A8E8)
SetType(0x8013A8E8, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x8013B354)
SetType(0x8013B354, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x8013BB30)
SetType(0x8013BB30, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x8013BFAC)
SetType(0x8013BFAC, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x8013C040)
SetType(0x8013C040, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x8013C110)
SetType(0x8013C110, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x8013C154)
SetType(0x8013C154, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C334)
SetType(0x8013C334, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C4F0)
SetType(0x8013C4F0, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x8013C614)
SetType(0x8013C614, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C984)
SetType(0x8013C984, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8013CBF0)
SetType(0x8013CBF0, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CD04)
SetType(0x8013CD04, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CEFC)
SetType(0x8013CEFC, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D050)
SetType(0x8013D050, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D238)
SetType(0x8013D238, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D494)
SetType(0x8013D494, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D57C)
SetType(0x8013D57C, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D744)
SetType(0x8013D744, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D950)
SetType(0x8013D950, "void AddWeapexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DA38)
SetType(0x8013DA38, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x8013DB1C)
SetType(0x8013DB1C, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DF40)
SetType(0x8013DF40, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E150)
SetType(0x8013E150, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E330)
SetType(0x8013E330, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E3F8)
SetType(0x8013E3F8, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E554)
SetType(0x8013E554, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E9C0)
SetType(0x8013E9C0, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EA1C)
SetType(0x8013EA1C, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EBD8)
SetType(0x8013EBD8, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EED0)
SetType(0x8013EED0, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EFD4)
SetType(0x8013EFD4, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F0AC)
SetType(0x8013F0AC, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F3A4)
SetType(0x8013F3A4, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F55C)
SetType(0x8013F55C, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F5F0)
SetType(0x8013F5F0, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F818)
SetType(0x8013F818, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F880)
SetType(0x8013F880, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FAAC)
SetType(0x8013FAAC, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FB5C)
SetType(0x8013FB5C, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FE0C)
SetType(0x8013FE0C, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FF08)
SetType(0x8013FF08, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FF8C)
SetType(0x8013FF8C, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801401A4)
SetType(0x801401A4, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140254)
SetType(0x80140254, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140304)
SetType(0x80140304, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8014036C)
SetType(0x8014036C, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801405A8)
SetType(0x801405A8, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x801407C4)
SetType(0x801407C4, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801408B4)
SetType(0x801408B4, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80140AA8)
SetType(0x80140AA8, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80140C68)
SetType(0x80140C68, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140CDC)
SetType(0x80140CDC, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140D64)
SetType(0x80140D64, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140DCC)
SetType(0x80140DCC, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140FC8)
SetType(0x80140FC8, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80141068)
SetType(0x80141068, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801411A4)
SetType(0x801411A4, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x801414F0)
SetType(0x801414F0, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x801416D4)
SetType(0x801416D4, "void MI_Dummy__Fi(int i)")
del_items(0x801416DC)
SetType(0x801416DC, "void MI_Golem__Fi(int i)")
del_items(0x80141938)
SetType(0x80141938, "void MI_SetManashield__Fi(int i)")
del_items(0x80141974)
SetType(0x80141974, "void MI_LArrow__Fi(int i)")
del_items(0x80142130)
SetType(0x80142130, "void MI_Arrow__Fi(int i)")
del_items(0x8014234C)
SetType(0x8014234C, "void MI_Firebolt__Fi(int i)")
del_items(0x80142A18)
SetType(0x80142A18, "void MI_Lightball__Fi(int i)")
del_items(0x80142CA0)
SetType(0x80142CA0, "void MI_Acidpud__Fi(int i)")
del_items(0x80142DB0)
SetType(0x80142DB0, "void MI_Firewall__Fi(int i)")
del_items(0x80143074)
SetType(0x80143074, "void MI_Fireball__Fi(int i)")
del_items(0x80143A38)
SetType(0x80143A38, "void MI_Lightctrl__Fi(int i)")
del_items(0x80143DB4)
SetType(0x80143DB4, "void MI_Lightning__Fi(int i)")
del_items(0x80143EA0)
SetType(0x80143EA0, "void MI_Town__Fi(int i)")
del_items(0x801440D8)
SetType(0x801440D8, "void MI_Flash__Fi(int i)")
del_items(0x8014442C)
SetType(0x8014442C, "void MI_Flash2__Fi(int i)")
del_items(0x801445F4)
SetType(0x801445F4, "void MI_Manashield__Fi(int i)")
del_items(0x80144918)
SetType(0x80144918, "void MI_Firemove__Fi(int i)")
del_items(0x80144BA4)
SetType(0x80144BA4, "void MI_Guardian__Fi(int i)")
del_items(0x80144E54)
SetType(0x80144E54, "void MI_Chain__Fi(int i)")
del_items(0x801450C0)
SetType(0x801450C0, "void MI_Weapexp__Fi(int i)")
del_items(0x80145378)
SetType(0x80145378, "void MI_Misexp__Fi(int i)")
del_items(0x80145678)
SetType(0x80145678, "void MI_Acidsplat__Fi(int i)")
del_items(0x80145814)
SetType(0x80145814, "void MI_Teleport__Fi(int i)")
del_items(0x80145BDC)
SetType(0x80145BDC, "void MI_Stone__Fi(int i)")
del_items(0x80145D88)
SetType(0x80145D88, "void MI_Boom__Fi(int i)")
del_items(0x80145E80)
SetType(0x80145E80, "void MI_Rhino__Fi(int i)")
del_items(0x8014622C)
SetType(0x8014622C, "void MI_FirewallC__Fi(int i)")
del_items(0x801464B4)
SetType(0x801464B4, "void MI_Infra__Fi(int i)")
del_items(0x8014656C)
SetType(0x8014656C, "void MI_Apoca__Fi(int i)")
del_items(0x80146800)
SetType(0x80146800, "void MI_Wave__Fi(int i)")
del_items(0x80146CFC)
SetType(0x80146CFC, "void MI_Nova__Fi(int i)")
del_items(0x80146FBC)
SetType(0x80146FBC, "void MI_Flame__Fi(int i)")
del_items(0x801471B4)
SetType(0x801471B4, "void MI_Flamec__Fi(int i)")
del_items(0x8014743C)
SetType(0x8014743C, "void MI_Cbolt__Fi(int i)")
del_items(0x80147740)
SetType(0x80147740, "void MI_Hbolt__Fi(int i)")
del_items(0x80147A4C)
SetType(0x80147A4C, "void MI_Element__Fi(int i)")
del_items(0x80148104)
SetType(0x80148104, "void MI_Bonespirit__Fi(int i)")
del_items(0x8014850C)
SetType(0x8014850C, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x8014857C)
SetType(0x8014857C, "void MI_Rportal__Fi(int i)")
del_items(0x801487A0)
SetType(0x801487A0, "void ProcessMissiles__Fv()")
del_items(0x80148B94)
SetType(0x80148B94, "void ClearMissileSpot__Fi(int mi)")
del_items(0x80148C4C)
SetType(0x80148C4C, "void MoveToScrollTarget__7CBlocks(struct CBlocks | |
== None for a in newdata]):
print("No graphing data")
else:
if None in newdata:
print("Warning: Some data to be graphed was nil; ignoring")
newdata.remove(None)
graph = chooseGraph(graph)
graph.data.extend(regularizeData(newdata, color))
graph.data = FillInColors(graph.data)
computeLimitsFromData(graph)
graph.gDrawView()
def graphLess(colorkeyword=None,
graph=None): # <a name="graph"</a>[<a href="graph.html#graph">Doc</a>]
"Remove a line of data points from the graph. Defaults to last line"
if colorkeyword == None:
subtractFromGraph(None, graph)
else:
graph = chooseGraph(graph)
removecolor = ColorFromKeyword(colorkeyword)
linenum = 0
for color, dlist in graph.data:
if color == removecolor:
subtractFromGraph(linenum, graph)
break
else:
linenum += 1
else:
print(("No such color used in this graph", colorkeyword))
def subtractFromGraph(linenum=None,
graph=None): # <a name="subtractfromgraph"</a>[<a href="graph.html#subtractfromgraph">Doc</a>]
"Remove a line of data points from the graph. Linenum is from zero or defaults to last line"
originalfrontwindow = Win.FrontWindow()
graph = chooseGraph(graph)
if linenum == None:
linenum = len(graph.data) - 1
num = 0
newdata = []
graph.data[linenum:] = graph.data[linenum + 1:]
computeLimitsFromData(graph)
gDrawView(graph)
def xGraphLimits(xmin=None, xmax=None,
graph=None): # <a name="xgraphlimits"</a>[<a href="graph.html#xgraphlimits">Doc</a>]
graph = chooseGraph(graph)
if xmin != None or xmax != None:
graph.autolimitsx = False
if xmin != None:
graph.xmin = xmin
if xmax != None:
graph.xmax = xmax
gSetCoordinateSystem(graph.dataview, graph.xmin, graph.ymin, graph.xmax,
graph.ymax, 'lowerLeft')
else:
graph.autolimitsx = True
computeLimitsFromData(graph)
graph.gDrawView()
def yGraphLimits(ymin=None, ymax=None,
graph=None): # <a name="ygraphlimits"</a>[<a href="graph.html#ygraphlimits">Doc</a>]
graph = chooseGraph(graph)
if ymin != None or ymax != None:
graph.autolimitsy = False
if ymin != None:
graph.ymin = ymin
if ymax != None:
graph.ymax = ymax
gSetCoordinateSystem(graph.dataview, graph.xmin, graph.ymin, graph.xmax,
graph.ymax, 'lowerLeft')
else:
graph.autolimitsy = True
computeLimitsFromData(graph)
graph.gDrawView()
def xTickmarks(xticks=None,
graph=None): # <a name="xtickmarks"</a>[<a href="graph.html#xtickmarks">Doc</a>]
"Sets the ticks marks and possibly resets limits."
graph = chooseGraph(graph)
if xticks == None:
graph.xtickmarks = None
if graph.data != None:
computeLimitsFromData(graph)
elif isinstance(xticks, (tuple, list)):
graph.xtickmarks = regularizeTickmarks(xticks)
graph.xmin = min(graph.xmin, MinTickmark(graph.xtickmarks))
graph.xmax = max(graph.xmax, MaxTickmark(graph.xtickmarks))
gSetCoordinateSystem(graph.dataview, graph.xmin, graph.ymin, graph.xmax,
graph.ymax, 'lowerLeft')
else: # a number
if graph.data != None and graph.data != []:
ticks = []
i = graph.xmin
while i <= graph.xmax:
ticks.append(i)
i += xticks
graph.xtickmarks = regularizeTickmarks(ticks)
graph.gDrawView()
def yTickmarks(yticks=None,
graph=None): # <a name="ytickmarks"</a>[<a href="graph.html#ytickmarks">Doc</a>]
"Sets the ticks marks and possibly resets limits."
graph = chooseGraph(graph)
if yticks == None:
graph.ytickmarks = None
if graph.data != None:
computeLimitsFromData(graph)
elif isinstance(yticks, (tuple, list)):
graph.ytickmarks = regularizeTickmarks(yticks)
if graph.data != None:
computeLimitsFromData(graph)
else:
graph.ymin = min(graph.ymin, MinTickmark(graph.ytickmarks))
graph.ymax = max(graph.ymax, MaxTickmark(graph.ytickmarks))
else: # a number
if graph.data != None and graph.data != []:
ticks = []
i = graph.ymin
while i <= graph.ymax:
ticks.append(i)
i += yticks
graph.ytickmarks = regularizeTickmarks(ticks)
graph.gDrawView()
def regularizeTickmarks(ticks):
useticks = []
for tick in ticks:
if isinstance(tick, (tuple, list)):
useticks.append(tick)
elif isinstance(tick, float):
useticks.append([tick, str(round(tick, 3))])
else:
useticks.append([tick, str(tick)])
return useticks
def MinTickmark(ticks):
if ticks != None and ticks != []:
if isinstance(ticks[0], (tuple, list)):
return ticks[0][0]
else:
return ticks[0]
def MaxTickmark(ticks):
if ticks != None and ticks != []:
if isinstance(ticks[-1], (tuple, list)):
return ticks[-1][0]
else:
return ticks[-1]
colors = [gColorRed(True), gColorGreen(True), gColorBlue(True),
gColorBlack(True), \
gColorYellow(True), gColorPink(True), gColorCyan(True),
gColorPurple(True), \
gColorMagenta(True), gColorOrange(True), gColorBrown(True),
gColorLightBlue(True), \
gColorGray(True), gColorDarkGreen(True), gColorTan(True)]
def nthColor(
n): # <a name="nthColor"</a>[<a href="graph.html#nthColor">Doc</a>]
return colors[n // length(colors)]
colorList = {'blue': gBlue, 'red': gRed, 'green': gGreen, 'black': gBlack,
'yellow': gYellow, \
'pink': gPink, 'cyan': gCyan, 'purple': gPurple,
'magenta': gMagenta, \
'orange': gOrange, 'brown': gBrown, 'lightBlue': gLightBlue,
'gray': gGray, \
'darkGreen': gDarkGreen, 'tan': gTan, 'white': gWhite, \
'lightGray': gLightGray, 'darkGray': gDarkGray}
def ColorFromKeyword(colorkeyword):
color = colorList.get(colorkeyword, None)
if color == None:
print(("Unrecognized color keyword:", colorkeyword))
return color
def FirstUnusedColor(data):
"Returns first color in the list of colors that is least used in data"
for permittedTimesUsed in range(100):
for color in colors:
if TimesColorUsed(color, data) <= permittedTimesUsed:
return color
def TimesColorUsed(color, data):
count = 0
for c, dlist in data:
if c == color:
count += 1
return count
def chooseGraph(
graph=None): # <a name="choosegraph"</a>[<a href="graph.html#choosegraph">Doc</a>]
"Select a graph based on input 'graph'"
global GDEVICE
glist = []
for w in GDEVICE.childwindows:
if isinstance(w, Graph):
glist.append(w)
if graph == None: # None given - if first is a graph, use it, else make a new one
if glist == []:
usegraph = Graph("Graph")
else:
usegraph = glist[0]
elif isinstance(graph, Graph): # already have graph
usegraph = graph
elif graph == True:
usegraph = Graph("Graph")
elif isinstance(graph, str): # we have a graph name
for g in glist:
if g.title == graph:
usegraph = g
break
else:
usegraph = Graph(graph)
else:
print(("Error: can't choose graph", graph))
usegraph = None
return usegraph
def closeGraph(graph=None):
graph.gCloseview()
def drawSegment(graph, x1, y1, x2, y2, color):
l = []
if graph.boxy:
l.append(gDrawLine(graph.dataview, x1, y1, x2, y1, color))
l.append(gDrawLine(graph.dataview, x2, y1, x2, y2, color))
else:
l.append(gDrawLine(graph.dataview, x1, y1, x2, y2, color))
return l
def draw(graph, ylist, color):
l = []
for i in range(len(ylist) - 1): # start from 1
x1 = i + 1
x2 = i + 2
y1 = ylist[i]
y2 = ylist[i + 1]
l.extend(drawSegment(graph, x1, y1, x2, y2, color))
return l
def drawXY(graph, xylist, color):
l = []
for i in range(len(xylist) - 1):
x1, y1 = xylist[i]
x2, y2 = xylist[i + 1]
l.extend(drawSegment(graph, x1, y1, x2, y2, color))
return l
def calcRadius(graph):
x0, y0, xs, ys, corner = gGetCSScale(graph.dataview)
radius = xs / 5000.0
if radius < .001:
radius = .001
if radius > .02:
radius = .02
return radius
def drawPoints(graph, ylist, color, highlight):
radius = calcRadius(graph)
if highlight:
radius = 2 * radius
l = []
for i in range(len(ylist)):
l.append(gDrawDisk(graph.dataview, i + 1, ylist[i], radius, color))
return l
def drawPointsXY(graph, xylist, color, highlight):
radius = calcRadius(graph)
if highlight:
radius = 2 * radius
l = []
for i in range(len(xylist)):
x, y = xylist[i]
l.append(gDrawDisk(graph.dataview, x, y, radius, color))
return l
def computeLimitsFromData(graph):
if graph.autolimitsx:
graph.xmin = MinTickmark(graph.xtickmarks)
if graph.xmin == None:
graph.xmin = FirstX(graph.data)
graph.xmax = MaxTickmark(graph.xtickmarks)
if graph.xmax == None:
graph.xmax = FirstX(graph.data)
if graph.autolimitsy:
graph.ymin = MinTickmark(graph.ytickmarks)
if graph.ymin == None:
graph.ymin = FirstY(graph.data)
graph.ymax = MaxTickmark(graph.ytickmarks)
if graph.ymax == None:
graph.ymax = FirstY(graph.data)
if graph.autolimitsx or graph.autolimitsy:
for color, dlist in graph.data:
if not isinstance(dlist[0], (tuple, list)):
if graph.autolimitsy:
for y in dlist:
if y < graph.ymin:
graph.ymin = y
if y > graph.ymax:
graph.ymax = y
if graph.autolimitsx:
if 1 < graph.xmin:
graph.xmin = 1
if len(dlist) > graph.xmax:
graph.xmax = len(dlist)
else:
for x, y in dlist:
if graph.autolimitsy:
if y < graph.ymin:
graph.ymin = y
if y > graph.ymax:
graph.ymax = y
if graph.autolimitsx:
if x < graph.xmin:
graph.xmin = x
if x > graph.xmax:
graph.xmax = x
if graph.ymin == graph.ymax:
print(("Warning: all lines are flat at", graph.ymin))
if graph.ymax > 0:
graph.ymin = 0
else:
graph.ymin = graph.ymax - 1
gSetCoordinateSystem(graph.dataview, graph.xmin, graph.ymin, graph.xmax,
graph.ymax, 'lowerLeft')
def gridGraph(griddensit=None,
graph=None): # <a name="gridGraph"</a>[<a href="graph.html#gridGraph">Doc</a>]
graph = chooseGraph(graph)
if griddensit != None:
graph.griddensity = griddensit
if graph.griddensity == None:
graph.griddensity = 5
for x, label in graph.xtickmarks:
dx = gdCoordx(graph.dataview, x)
if x >= graph.xmin and x <= graph.xmax:
for dy in range(gdCoordy(graph.dataview, graph.ymax), \
gdCoordy(graph.dataview, graph.ymin), \
graph.griddensity):
gdDrawPoint(graph.dataview, dx, dy, graph.maincolor)
for y, label in graph.ytickmarks:
dy = gdCoordy(graph.dataview, y)
if y >= graph.ymin and y <= graph.ymax:
for dx in range(gdCoordx(graph.dataview, graph.xmin), \
gdCoordx(graph.dataview, graph.xmax), \
graph.griddensity):
gdDrawPoint(graph.dataview, dx, dy, graph.maincolor)
def drawHighlight(graph):
if graph.highlightp and graph.highlightline != None:
color, data = graph.data[graph.highlightline]
graph.lasthighlight = drawLine(graph, data,
gColorPen(graph, color, None, None, 2),
True)
def removeHighlight(graph):
if graph.lasthighlight != None:
gDelete(graph.dataview, graph.lasthighlight)
graph.lasthighlight = None
def drawLine(graph, line, color, highlight=False):
if isinstance(line[0], (tuple, list)):
if graph.pointsonly:
return drawPointsXY(graph, line, color, highlight)
else:
return drawXY(graph, line, color)
else:
if graph.pointsonly:
return drawPoints(graph, line, color, highlight)
else:
return draw(graph, line, color)
# A histogram is a graph, created in a particular way
# histogram(data, numbins, minex, maxex, graph)
def histogram(data, numbins=None, minex=None, maxex=None, color=None,
hist=None): # <a name="histogram"</a>[<a href="graph.html#histogram">Doc</a>]
"plots histogram of data, minex <= data < maxex, in a color on a graph named hist"
if data == None or data == []:
print("No graphing data")
elif len(data) == 1:
print("Cannot histogram a single datum")
else:
if minex == None:
| |
<reponame>isaaclegred/universality
"""a module for custom (Gaussian) kernel density estimation (KDE)
"""
__author__ = "<NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
from scipy.special import erf
import multiprocessing as mp
from universality import stats
from universality.utils import DEFAULT_NUM_PROC
#-------------------------------------------------
DEFAULT_BANDWIDTH = 0.1
KNOWN_CUMULATIVE_INTEGRAL_DIRECTIONS = [
'increasing',
'decreasing',
]
DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION = 'increasing'
#-------------------------------------------------
# 1D CDF estimation
#-------------------------------------------------
def logcdf(samples, data, prior_bounds, weights=None, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION, num_proc=DEFAULT_NUM_PROC):
"""estimates the log(cdf) at all points in samples based on data and integration in "direction".
Does this directly by estimating the CDF from the weighted samples WITHOUT building a KDE"""
### this should be relatively quick (just an ordered summation), so we do it once
data, cweights = stats.samples2cdf(data, weights=weights)
if direction=='increasing':
pass ### we already integrate up from the lower values to higher values
elif direction=='decreasing':
cweights = 1. - cweights ### reverse the order of the integral
else:
raise ValueError('direction=%s not understood!'%direction)
logcdfs = np.empty(len(samples), dtype=float)
if num_proc==1: ### do everything on this one core
logcdfs[:] = _logcdf_worker(samples, data, cweights, prior_bounds)
else: ### parallelize
# partition work amongst the requested number of cores
Nsamp = len(samples)
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_logcdf_worker, args=(samples[truth], data, cweights, prior_bounds), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
logcdfs[truth] = conni.recv()
return logcdfs
def _logcdf_worker(samples, data, cweights, bounds, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION, conn=None):
### we have to account for prior volume differences between different models to properly do the model-selection integral
### this is handled by explicitly passing the bounds for the overall prior that may or may not be truncated by samples
### make a copy so I can modify it in-place
local_samples = np.empty(len(samples), dtype=float)
local_samples[:] = samples[:]
local_samples[local_samples<bounds[0]] = bounds[0]
local_samples[local_samples>bounds[1]] = bounds[1]
### approx to the cumulative integral within the prior bounds
logcdfs = np.interp(local_samples, data, cweights) - np.interp(bounds[0], data, cweights)
truth = logcdfs > 0
logcdfs[truth] = np.log(logcdfs[truth])
logcdfs[np.logical_not(truth)] = -np.infty
### add the prior volume correction
### NOTE: we assume flat priors implicitly! really, this should be an integral over the (non-trivial) prior distribution
if direction=='increasing':
truth = bounds[0] < local_samples
logcdfs[truth] -= np.log(local_samples[truth] - bounds[0])
logcdfs[np.logical_not(truth)] = -np.infty ### these sample shave zero support in the prior, so we assign them zero weight
elif direction=='decreasing':
truth = bounds[1] > local_samples
logcdfs[truth] -= np.log(bounds[1] - local_samples[truth])
logcdfs[np.logical_not(truth)] = -np.infty ### this is the same thing as above
else:
raise ValueError('direction=%s not understood!'%direction)
if conn is not None:
conn.send(logcdfs)
return logcdfs
def logcumkde(samples, data, variance, bounds=None, weights=None, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION):
"""estimates the log(cdf) at all points in samples based on data and integration in "direction"
This is done with a 1D KDE-based CDF estimate between bounds
computes I = \sum_i w_i \int_0^samples dx K(x, data_i; variance) / \sum_i w_i
This corresponds to integrating up to the value passed as samples for a Gaussian kernel centered at data_i
if direction == 'increasing', we just return this. If direction == 'decreasing', we return 1 - I
"""
### sanity-check the input argumants
assert len(np.shape(samples))==1, 'samples must be a 1D array'
assert len(np.shape(data))==1, 'data must be a 1D array'
assert isinstance(variance, (int, float)), 'variance must be an int or a float'
### set up variables for computation
ans = np.empty(len(samples), dtype=float)
frac = variance**-0.5
if weights is None:
N = len(data)
weights = np.ones(N, dtype=float)/N
### set up bounds
if bounds is None:
lower = 0
norm = 1
else:
m, M = bounds ### assumes all samples are between these bounds, but data need not be...
lower = cumulative_gaussian_distribution((m - data)*frac)
norm = cumulative_gaussian_distribution((M - data)*frac) - lower
### iterate and compute the cumuative integrals
for i, sample in enumerate(samples):
### NOTE: it is important that we pass "sample - data" so that data is the mean
ans[i] = np.sum(weights * (cumulative_gaussian_distribution((sample - data)*frac) - lower))
ans /= np.sum(weights * norm)
### return based on the requested direction
if direction == 'increasing':
return np.log(ans)
elif direction == 'decreasing':
return np.log(1 - ans)
else:
raise RuntimeError('direction=%s not understood!'%direction)
def cumulative_gaussian_distribution(z):
"""standard cumulative Gaussian distribution"""
return 0.5*(1 + erf(z/2**0.5))
#-------------------------------------------------
# KDE and cross-validation likelihood
#-------------------------------------------------
def vects2flatgrid(*vects):
return np.transpose([_.flatten() for _ in np.meshgrid(*vects, indexing='ij')])
def logkde(samples, data, variances, weights=None, num_proc=DEFAULT_NUM_PROC):
"""
a wrapper around actually computing the KDE estimate at a collection of samples
estimates kde as sum_i[weight_i * K(sample, data_i)]
returns log(kde(samples))
"""
shape = samples.shape
if len(shape) not in [1, 2]:
raise ValueError('bad shape for samples')
if len(shape)==1:
Nsamp = shape[0]
samples = samples.reshape((Nsamp,1))
data = data.reshape((len(data),1))
else:
Nsamp, Ndim = samples.shape
if weights is None:
Ndata = len(data)
weights = np.ones(Ndata, dtype='float')/Ndata
logkdes = np.empty(Nsamp, dtype=float)
if num_proc == 1: ### do everything on this one core
logkdes[:] = _logkde_worker(samples, data, variances, weights)
else: ### parallelize
# partition work amongst the requested number of cores
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_logkde_worker, args=(samples[truth], data, variances, weights), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
logkdes[truth] = conni.recv()
return logkdes
def _define_sets(Nsamp, num_proc):
sets = [np.zeros(Nsamp, dtype=bool) for i in xrange(num_proc)]
for i in xrange(Nsamp):
sets[i%num_proc][i] = True
return [_ for _ in sets if np.any(_)]
def _logkde_worker(samples, data, variances, weights, conn=None):
Nsamp, Ndim = samples.shape
Ndata = len(data)
logkdes = np.empty(Nsamp, dtype='float')
twov = -0.5/variances
z = np.empty(Ndata, dtype=float)
for i in xrange(Nsamp):
sample = samples[i]
z[:] = np.sum((data-sample)**2 * twov, axis=1) ### shape: (Ndata, Ndim) -> (Ndata)
### do this backflip to preserve accuracy
m = np.max(z)
logkdes[i] = np.log(np.sum(weights*np.exp(z-m))) + m
logkdes += -0.5*Ndim*np.log(2*np.pi) - 0.5*np.sum(np.log(variances))
if conn is not None:
conn.send(logkdes)
return logkdes
def grad_logkde(samples, data, variances, weights=None, num_proc=DEFAULT_NUM_PROC):
"""
Nsamp, Ndim = samples.shape
returns the gradient of the logLikelihood based on (data, variances, weights) at each sample (shape=Nsamp, Ndim)
"""
shape = samples.shape
if len(shape) not in [1, 2]:
raise ValueError('bad shape for samples')
if len(shape)==1:
Nsamp = shape[0]
Ndim = 1
samples = samples.reshape((Nsamp,1))
data = data.reshape((len(data),1))
else:
Nsamp, Ndim = samples.shape
grad_logkdes = np.empty(Nsamp, dtype=float)
Ndata = len(data)
if weights is None: ### needed because modern numpy performs element-wise comparison here
weights = np.ones(Ndata, dtype='float')/Ndata
if num_proc==1:
grad_logkdes[:] = _grad_logkde_worker(samples, data, variances, weights)
else:
# divide the work
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_grad_logkde_worker, args=(samples[truth], data, variances, weights), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
grad_logkdes[truth] = conni.recv()
return grad_logkdes
def _grad_logkde_worker(samples, data, variances, weights, conn=None):
Nsamp, Ndim = samples.shape
Ndata = len(data)
grad_logkdes = np.empty(Nsamp, dtype='float')
v = variances[0]
assert np.all(variances==v), 'we only support a single variance at this time, even though it must be repeated Ndim times within "variances"'
twov = -0.5/v
z = np.empty(Ndata, dtype=float)
for i in xrange(Nsamp):
sample = samples[i]
z[:] = np.sum((data-sample)**2 * twov, axis=1) ### shape: (Ndata, Ndim) -> (Ndata)
### do this backflip to preserve accuracy
m = np.max(z)
y = np.sum(weights*np.exp(z-m))
x = np.sum(weights*np.exp(z-m)*(-z/v))
if y==0:
if np.all(x==0):
grad_logkdes[i] = 0 ### this is the appropriate limit here
else:
raise Warning('something bad happened with your estimate of the gradient in logleave1outLikelihood')
else:
grad_logkdes[i] = Ndim*twov + x/y
if conn is not None:
conn.send(grad_logkdes)
return grad_logkdes
def logvarkde(samples, data, variances, weights=None, num_proc=DEFAULT_NUM_PROC):
"""
a wrapper around computing bootstrapped estimates of the variance of the kde
delegates to logcovkde
"""
return logcovkde((samples, samples), data, variances, weights=weights, num_proc=num_proc)
def logcovkde(samples1, samples2, data, variances, weights=None, num_proc=DEFAULT_NUM_PROC):
"""
a wrapper around computing bootstrapped estimates of the covariance of the kde (btwn points defined in samples1, samples2)
estimates covariance as sum_i[ weight_i**2 * K(sample1, data_i) * K(sample2, data_i) ] | |
# coding: utf-8
"""
NEF_Emulator
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from evolved5g.swagger_client.api_client import ApiClient
class SessionWithQoSAPIApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post(self, body, scs_as_id, **kwargs): # noqa: E501
"""Create Subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post(body, scs_as_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AsSessionWithQoSSubscriptionCreate body: (required)
:param str scs_as_id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post_with_http_info(body, scs_as_id, **kwargs) # noqa: E501
else:
(data) = self.create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post_with_http_info(body, scs_as_id, **kwargs) # noqa: E501
return data
def create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post_with_http_info(self, body, scs_as_id, **kwargs): # noqa: E501
"""Create Subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post_with_http_info(body, scs_as_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AsSessionWithQoSSubscriptionCreate body: (required)
:param str scs_as_id: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'scs_as_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post`") # noqa: E501
# verify the required parameter 'scs_as_id' is set
if ('scs_as_id' not in params or
params['scs_as_id'] is None):
raise ValueError("Missing the required parameter `scs_as_id` when calling `create_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scs_as_id' in params:
path_params['scsAsId'] = params['scs_as_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/3gpp-as-session-with-qos/v1/{scsAsId}/subscriptions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AsSessionWithQoSSubscription', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete(self, scs_as_id, subscription_id, **kwargs): # noqa: E501
"""Delete Subscription # noqa: E501
Delete a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete(scs_as_id, subscription_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:param str subscription_id: (required)
:return: AsSessionWithQoSSubscription
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete_with_http_info(scs_as_id, subscription_id, **kwargs) # noqa: E501
else:
(data) = self.delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete_with_http_info(scs_as_id, subscription_id, **kwargs) # noqa: E501
return data
def delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete_with_http_info(self, scs_as_id, subscription_id, **kwargs): # noqa: E501
"""Delete Subscription # noqa: E501
Delete a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete_with_http_info(scs_as_id, subscription_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:param str subscription_id: (required)
:return: AsSessionWithQoSSubscription
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scs_as_id', 'subscription_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scs_as_id' is set
if ('scs_as_id' not in params or
params['scs_as_id'] is None):
raise ValueError("Missing the required parameter `scs_as_id` when calling `delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete`") # noqa: E501
# verify the required parameter 'subscription_id' is set
if ('subscription_id' not in params or
params['subscription_id'] is None):
raise ValueError("Missing the required parameter `subscription_id` when calling `delete_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scs_as_id' in params:
path_params['scsAsId'] = params['scs_as_id'] # noqa: E501
if 'subscription_id' in params:
path_params['subscriptionId'] = params['subscription_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/3gpp-as-session-with-qos/v1/{scsAsId}/subscriptions/{subscriptionId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AsSessionWithQoSSubscription', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get(self, scs_as_id, **kwargs): # noqa: E501
"""Read Active Subscriptions # noqa: E501
Get subscription by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get(scs_as_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:return: list[AsSessionWithQoSSubscription]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get_with_http_info(scs_as_id, **kwargs) # noqa: E501
else:
(data) = self.read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get_with_http_info(scs_as_id, **kwargs) # noqa: E501
return data
def read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get_with_http_info(self, scs_as_id, **kwargs): # noqa: E501
"""Read Active Subscriptions # noqa: E501
Get subscription by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get_with_http_info(scs_as_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:return: list[AsSessionWithQoSSubscription]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scs_as_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scs_as_id' is set
if ('scs_as_id' not in params or
params['scs_as_id'] is None):
raise ValueError("Missing the required parameter `scs_as_id` when calling `read_active_subscriptions_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scs_as_id' in params:
path_params['scsAsId'] = params['scs_as_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth<PASSWORD>'] # noqa: E501
return self.api_client.call_api(
'/api/v1/3gpp-as-session-with-qos/v1/{scsAsId}/subscriptions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AsSessionWithQoSSubscription]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get(self, scs_as_id, subscription_id, **kwargs): # noqa: E501
"""Read Subscription # noqa: E501
Get subscription by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get(scs_as_id, subscription_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:param str subscription_id: (required)
:return: AsSessionWithQoSSubscription
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get_with_http_info(scs_as_id, subscription_id, **kwargs) # noqa: E501
else:
(data) = self.read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get_with_http_info(scs_as_id, subscription_id, **kwargs) # noqa: E501
return data
def read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get_with_http_info(self, scs_as_id, subscription_id, **kwargs): # noqa: E501
"""Read Subscription # noqa: E501
Get subscription by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get_with_http_info(scs_as_id, subscription_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scs_as_id: (required)
:param str subscription_id: (required)
:return: AsSessionWithQoSSubscription
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scs_as_id', 'subscription_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_subscription_api_v13gpp_as_session_with_qos_v1_scs_as_id_subscriptions_subscription_id_get" % key
)
params[key] = val
| |
object in the
workspace @id ws KBaseGenomeAnnotations.Assembly), parameter
"quality" of type "Genome_quality_measure" (@optional
frameshift_error_rate sequence_error_rate) -> structure: parameter
"frameshift_error_rate" of Double, parameter "sequence_error_rate"
of Double, parameter "close_genomes" of list of type
"Close_genome" (@optional genome closeness_measure) -> structure:
parameter "genome" of type "Genome_id" (KBase genome ID @id kb),
parameter "closeness_measure" of Double, parameter
"analysis_events" of list of type "Analysis_event" (@optional
tool_name execution_time parameters hostname) -> structure:
parameter "id" of type "Analysis_event_id", parameter "tool_name"
of String, parameter "execution_time" of Double, parameter
"parameters" of list of String, parameter "hostname" of String,
parameter "info" of type "object_info" (Information about an
object, including user provided metadata. obj_id objid - the
numerical id of the object. obj_name name - the name of the
object. type_string type - the type of the object. timestamp
save_date - the save date of the object. obj_ver ver - the version
of the object. username saved_by - the user that saved or copied
the object. ws_id wsid - the workspace containing the object.
ws_name workspace - the workspace containing the object. string
chsum - the md5 checksum of the object. int size - the size of the
object in bytes. usermeta meta - arbitrary user-supplied metadata
about the object.) -> tuple of size 11: parameter "objid" of type
"obj_id" (The unique, permanent numerical ID of an object.),
parameter "name" of type "obj_name" (A string used as a name for
an object. Any string consisting of alphanumeric characters and
the characters |._- that is not an integer is acceptable.),
parameter "type" of type "type_string" (A type string. Specifies
the type and its version in a single string in the format
[module].[typename]-[major].[minor]: module - a string. The module
name of the typespec containing the type. typename - a string. The
name of the type as assigned by the typedef statement. major - an
integer. The major version of the type. A change in the major
version implies the type has changed in a non-backwards compatible
way. minor - an integer. The minor version of the type. A change
in the minor version implies that the type has changed in a way
that is backwards compatible with previous type definitions. In
many cases, the major and minor versions are optional, and if not
provided the most recent version will be used. Example:
MyModule.MyType-3.1), parameter "save_date" of type "timestamp" (A
time in the format YYYY-MM-DDThh:mm:ssZ, where Z is either the
character Z (representing the UTC timezone) or the difference in
time to UTC in the format +/-HHMM, eg: 2012-12-17T23:24:06-0500
(EST time) 2013-04-03T08:56:32+0000 (UTC time)
2013-04-03T08:56:32Z (UTC time)), parameter "version" of Long,
parameter "saved_by" of type "username" (Login name of a KBase
user account.), parameter "wsid" of type "ws_id" (The unique,
permanent numerical ID of a workspace.), parameter "workspace" of
type "ws_name" (A string used as a name for a workspace. Any
string consisting of alphanumeric characters and "_", ".", or "-"
that is not an integer is acceptable. The name may optionally be
prefixed with the workspace owner's user name and a colon, e.g.
kbasetest:my_workspace.), parameter "chsum" of String, parameter
"size" of Long, parameter "meta" of type "usermeta" (User provided
metadata about an object. Arbitrary key-value pairs provided by
the user.) -> mapping from String to String, parameter
"provenance" of list of type "ProvenanceAction" (A provenance
action. A provenance action (PA) is an action taken while
transforming one data object to another. There may be several PAs
taken in series. A PA is typically running a script, running an
api command, etc. All of the following fields are optional, but
more information provided equates to better data provenance.
resolved_ws_objects should never be set by the user; it is set by
the workspace service when returning data. On input, only one of
the time or epoch may be supplied. Both are supplied on output.
The maximum size of the entire provenance object, including all
actions, is 1MB. timestamp time - the time the action was started
epoch epoch - the time the action was started. string caller - the
name or id of the invoker of this provenance action. In most
cases, this will be the same for all PAs. string service - the
name of the service that performed this action. string service_ver
- the version of the service that performed this action. string
method - the method of the service that performed this action.
list<UnspecifiedObject> method_params - the parameters of the
method that performed this action. If an object in the parameters
is a workspace object, also put the object reference in the
input_ws_object list. string script - the name of the script that
performed this action. string script_ver - the version of the
script that performed this action. string script_command_line -
the command line provided to the script that performed this
action. If workspace objects were provided in the command line,
also put the object reference in the input_ws_object list.
list<obj_ref> input_ws_objects - the workspace objects that were
used as input to this action; typically these will also be present
as parts of the method_params or the script_command_line
arguments. list<obj_ref> resolved_ws_objects - the workspace
objects ids from input_ws_objects resolved to permanent workspace
object references by the workspace service. list<string>
intermediate_incoming - if the previous action produced output
that 1) was not stored in a referrable way, and 2) is used as
input for this action, provide it with an arbitrary and unique ID
here, in the order of the input arguments to this action. These
IDs can be used in the method_params argument. list<string>
intermediate_outgoing - if this action produced output that 1) was
not stored in a referrable way, and 2) is used as input for the
next action, provide it with an arbitrary and unique ID here, in
the order of the output values from this action. These IDs can be
used in the intermediate_incoming argument in the next action.
list<ExternalDataUnit> external_data - data external to the
workspace that was either imported to the workspace or used to
create a workspace object. list<SubAction> subactions - the
subactions taken as a part of this action. mapping<string, string>
custom - user definable custom provenance fields and their values.
string description - a free text description of this action.) ->
structure: parameter "time" of type "timestamp" (A time in the
format YYYY-MM-DDThh:mm:ssZ, where Z is either the character Z
(representing the UTC timezone) or the difference in time to UTC
in the format +/-HHMM, eg: 2012-12-17T23:24:06-0500 (EST time)
2013-04-03T08:56:32+0000 (UTC time) 2013-04-03T08:56:32Z (UTC
time)), parameter "epoch" of type "epoch" (A Unix epoch (the time
since 00:00:00 1/1/1970 UTC) in milliseconds.), parameter "caller"
of String, parameter "service" of String, parameter "service_ver"
of String, parameter "method" of String, parameter "method_params"
of list of unspecified object, parameter "script" of String,
parameter "script_ver" of String, parameter "script_command_line"
of String, parameter "input_ws_objects" of list of type "obj_ref"
(A string that uniquely identifies an object in the workspace
service. There are two ways to uniquely identify an object in one
string: "[ws_name or id]/[obj_name or id]/[obj_ver]" - for
example, "MyFirstWorkspace/MyFirstObject/3" would identify the
third version of an object called MyFirstObject in the workspace
called MyFirstWorkspace. 42/Panic/1 would identify the first
version of the object name Panic in workspace with id 42.
Towel/1/6 would identify the 6th version of the object with id 1
in the Towel workspace. "kb|ws.[ws_id].obj.[obj_id].ver.[obj_ver]"
- for example, "kb|ws.23.obj.567.ver.2" would identify the second
version of an object with id 567 in a workspace with id 23. In all
cases, if the version number is omitted, the latest version of the
object is assumed.), parameter "resolved_ws_objects" of list of
type "obj_ref" (A string that uniquely identifies an object in the
workspace service. There are two ways to uniquely identify an
object in one string: "[ws_name or id]/[obj_name or | |
import tensorflow as tf
import numpy as np
import time
from copy import deepcopy
from tensorflow.python.ops.parallel_for import gradients
from tensorflow.contrib import rnn
class ALPaCA:
def __init__(self, config, sess, graph=None, preprocess=None, f_nom=None):
self.config = deepcopy(config)
self.lr = config['lr']
self.formulation = config['formulation']
self.x_dim = config['x_dim']
self.y_dim = config['y_dim']
self.phi_dim = config['nn_layers'][-1] # Last layer
self.sigma_eps = self.config['sigma_eps']
self.updates_so_far = 0
self.sess = sess
self.graph = graph if graph is not None else tf.get_default_graph()
# y = K^T phi( preprocess(x) ) + f_nom(x)
self.preprocess = preprocess
self.f_nom = f_nom
arch_string = [str(config_layer) for config_layer in self.config['nn_layers']]
arch_string = '-'.join(arch_string)
self.model_name = self.formulation+'_'+str(time.time())+'_action='+self.config['action']+'_basis='+self.config['basis']+ \
'_nn-layers='+arch_string+'_activation='+self.config['activation']+'_lr='+str(self.lr)+ \
'_sigma-eps='+str(self.sigma_eps)+'_batch-size='+str(self.config['meta_batch_size'])+ \
'_num-input-points='+str(self.config['num_input_points'])+ \
'_data-horizon='+str(self.config['data_horizon'])+'_test-horizon='+str(self.config['test_horizon'])+'_row-length='+str(self.config['row_length'])
def construct_model(self):
with self.graph.as_default():
last_layer = self.config['nn_layers'][-1]
if self.sigma_eps is list:
self.SigEps = tf.diag( np.array(self.sigma_eps) )
else:
self.SigEps = self.sigma_eps*tf.eye(self.y_dim)
self.SigEps = tf.reshape(self.SigEps, (1,1,self.y_dim,self.y_dim))
# try making it learnable
# self.SigEps = tf.get_variable('sigma_eps', initializer=self.SigEps )
# Prior Parameters of last layer
self.K = tf.get_variable('K_init',shape=[last_layer,self.y_dim]) #\bar{K}_0
self.L_asym = tf.get_variable('L_asym',shape=[last_layer,last_layer]) # cholesky decomp of \Lambda_0
self.L = self.L_asym @ tf.transpose(self.L_asym) # \Lambda_0
# x: query points (M, N_test, x_dim)
# y: target points (M, N_test, y_dim) ( what K^T phi(x) should approximate )
self.x = tf.placeholder(tf.float32, shape=[None,None,self.x_dim], name="x")
self.y = tf.placeholder(tf.float32, shape=[None,None,self.y_dim], name="y")
# Points used to compute posterior using BLR
# context_x: x points available for context (M, N_context, x_dim)
# context_y: y points available for context (M, N_context, y_dim)
self.context_x = tf.placeholder(tf.float32, shape=[None,None,self.x_dim], name="cx")
self.context_y = tf.placeholder(tf.float32, shape=[None,None,self.y_dim], name="cy")
# num_updates: number of context points from context_x,y to use when computing posterior. size (M,)
self.num_models = tf.shape(self.context_x)[0]
self.max_num_context = tf.shape(self.context_x)[1]*tf.ones((self.num_models,), dtype=tf.int32)
self.num_context = tf.placeholder_with_default(self.max_num_context, shape=(None,))
# Map input to feature space
with tf.variable_scope('phi',reuse=None):
# self.phi is (M, N_test, phi_dim)
if self.basis == 'lstm':
self.phi = tf.map_fn( lambda x: self.basis_lstm(x),
elems=self.x,
dtype=tf.float32)
else:
self.phi = tf.map_fn( lambda x: self.basis(x),
elems=self.x,
dtype=tf.float32)
# Map context input to feature space
with tf.variable_scope('phi', reuse=True):
# self.context_phi is (M, N_context, phi_dim)
if self.basis == 'lstm':
self.context_phi = tf.map_fn( lambda x: self.basis_lstm(x),
elems=self.context_x,
dtype=tf.float32)
else:
self.context_phi = tf.map_fn( lambda x: self.basis(x),
elems=self.context_x,
dtype=tf.float32)
# Evaluate f_nom if given, else use 0
self.f_nom_cx = tf.zeros_like(self.context_y)
self.f_nom_x = 0 #tf.zeros_like(self.y)
if self.f_nom is not None:
self.f_nom_cx = self.f_nom(self.context_x)
self.f_nom_x = self.f_nom(self.x)
# Subtract f_nom from context points before BLR
self.context_y_blr = self.context_y - self.f_nom_cx
# Compute posterior weights from context data
with tf.variable_scope('blr', reuse=None):
# posterior_K is (M, phi_dim, y_dim), posterior_L_inv is (M, phi_dim, phi_dim)
self.posterior_K, self.posterior_L_inv = tf.map_fn( lambda x: self.batch_blr(*x),
elems=(self.context_phi, self.context_y_blr, self.num_context),
dtype=(tf.float32, tf.float32) )
# Compute posterior predictive distribution, and evaluate targets self.y under this distribution
self.mu_pred, self.Sig_pred, self.predictive_nll = self.compute_pred_and_nll()
# The loss function is expectation of this predictive nll.
self.total_loss = tf.reduce_mean(self.predictive_nll)
tf.summary.scalar('total_loss', self.total_loss)
self.optimizer = tf.train.AdamOptimizer(self.lr)
global_step = tf.Variable(0, trainable=False, name='global_step')
self.train_op = self.optimizer.minimize(self.total_loss,global_step=global_step)
self.train_writer = tf.summary.FileWriter('summaries/train_'+self.model_name, self.sess.graph, flush_secs=10)
self.val_writer = tf.summary.FileWriter('summaries/val_'+self.model_name, self.sess.graph, flush_secs=10)
self.merged = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
# ---- TF operations ---- #
def basis(self,x,name='basis'):
layer_sizes = self.config['nn_layers']
activations = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
'sigmoid': tf.nn.sigmoid
}
activation = activations[ self.config['activation'] ]
if self.preprocess is None:
inp = x
else:
inp = self.preprocess(x)
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
for units in layer_sizes:
inp = tf.layers.dense(inputs=inp, units=units,activation=activation)
return inp
def basis_lstm(self, x, name='basis_lstm'):
layer_sizes = self.config['nn_layers']
activations = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
'sigmoid': tf.nn.sigmoid
}
activation = activations[ self.config['activation'] ]
x = tf.expand_dims(x, axis=0)
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
cells = [rnn.LSTMCell(num_units=layer, activation=activation) for layer in layer_sizes]
stacked_cell = rnn.MultiRNNCell(cells)
outputs, state = tf.nn.dynamic_rnn(stacked_cell, x, dtype=tf.float32)
# outputs, state = tf.nn.dynamic_rnn(cell, x, sequence_length=tf.expand_dims(seq_len, axis=0), dtype=tf.float32)
return outputs[0,:,:]
def batch_blr(self,X,Y,num):
X = X[:num,:]
Y = Y[:num,:]
Ln_inv = tf.matrix_inverse(tf.transpose(X) @ X + self.L)
Kn = Ln_inv @ (tf.transpose(X) @ Y + self.L @ self.K)
return tf.cond( num > 0, lambda : (Kn,Ln_inv), lambda : (self.K, tf.linalg.inv(self.L)) )
def compute_pred_and_nll(self):
"""
Uses self.posterior_K and self.posterior_L_inv and self.f_nom_x to generate the posterior predictive.
Returns:
mu_pred = posterior predictive mean at query points self.x
shape (M, T, y_dim)
Sig_pred = posterior predictive variance at query points self.x
shape (M, T, y_dim, y_dim)
predictive_nll = negative log likelihood of self.y under the posterior predictive density
shape (M, T)
"""
mu_pred = batch_matmul(tf.matrix_transpose(self.posterior_K), self.phi) + self.f_nom_x
spread_fac = 1 + batch_quadform(self.posterior_L_inv, self.phi)
Sig_pred = tf.expand_dims(spread_fac, axis=-1)*tf.reshape(self.SigEps, (1,1,self.y_dim,self.y_dim))
# Score self.y under predictive distribution to obtain loss
with tf.variable_scope('loss', reuse=None):
logdet = self.y_dim*tf.log(spread_fac) + tf.linalg.logdet(self.SigEps)
Sig_pred_inv = tf.linalg.inv(Sig_pred)
quadf = batch_quadform(Sig_pred_inv, (self.y - mu_pred))
predictive_nll = tf.squeeze(logdet + quadf, axis=-1)
# log stuff for summaries
self.rmse_1 = tf.reduce_mean( tf.sqrt( tf.reduce_sum( tf.square(mu_pred - self.y)[:,0,:], axis=-1 ) ) )
self.mpv_1 = tf.reduce_mean( tf.matrix_determinant( Sig_pred[:,0,:,:]) )
tf.summary.scalar('RMSE_1step', self.rmse_1)
tf.summary.scalar('MPV_1step', self.mpv_1)
return mu_pred, Sig_pred, predictive_nll
# ---- Train and Test functions ------ #
def train(self, dataset, dataset_val, num_train_updates):
batch_size = self.config['meta_batch_size']
horizon = self.config['data_horizon']
test_horizon = self.config['test_horizon']
# minimize loss
for i in range(num_train_updates):
x, y = dataset.sample(n_funcs=batch_size, n_samples=horizon+test_horizon)
feed_dict = {
self.context_y: y[:,:horizon,:],
self.context_x: x[:,:horizon,:],
self.y: y[:,horizon:,:],
self.x: x[:,horizon:,:],
self.num_context: np.random.randint(horizon+1,size=batch_size)
}
summary, loss, _ = self.sess.run([self.merged,self.total_loss,self.train_op],feed_dict)
x_val, y_val = dataset_val.sample(n_funcs=batch_size, n_samples=horizon+test_horizon)
feed_dict_val = {
self.context_y: y_val[:,:horizon,:],
self.context_x: x_val[:,:horizon,:],
self.y: y_val[:,horizon:,:],
self.x: x_val[:,horizon:,:],
self.num_context: np.random.randint(horizon+1,size=batch_size)
}
val_summary, val_loss = self.sess.run([self.merged,self.total_loss],feed_dict_val)
# Check val stats
# feed_dict_val = self.gen_variable_horizon_data_ordered(x_val, y_val, num_samples, horizon, test_horizon)
# print('feed_dict_val ux shape', feed_dict_val[self.update_x].shape)
# print('feed_dict_val uy shape', feed_dict_val[self.update_y].shape)
# print('feed_dict_val x shape', feed_dict_val[self.x].shape)
# print('feed_dict_val y shape', feed_dict_val[self.y].shape)
# print('feed_dict_val num_updates shape', feed_dict_val[self.num_updates].shape)
if i % 50 == 0:
print('loss:',loss)
print('val_loss: ', val_loss)
if i % 1000 == 0:
self.save('checkpoints/'+self.model_name, global_step=i)
self.train_writer.add_summary(summary, self.updates_so_far)
self.val_writer.add_summary(val_summary, self.updates_so_far)
self.updates_so_far += 1
self.save('checkpoints/'+self.model_name, global_step=i)
# x_c, y_c, x are all [N, n]
# returns mu_pred, Sig_pred
def test(self, x_c, y_c, x):
feed_dict = {
self.context_y: y_c,
self.context_x: x_c,
self.x: x
}
mu_pred, Sig_pred = self.sess.run([self.mu_pred, self.Sig_pred], feed_dict)
return mu_pred, Sig_pred
# def generate_train_dict(self, x_val, y_val, horizon, batch_size):
# context_x = np.zeros([x_val.shape[0], horizon, x_val.shape[2]])
# context_y = np.zeros([y_val.shape[0], horizon, y_val.shape[2]])
# num_context_points = np.random.randint(horizon+1, size=batch_size)
# context_x[:,:num_context_points,:] = x_val[:,:num_context_points,:]
# context_y[:,:num_context_points,:] = y_val[:,:num_context_points,:]
# x = x_val[:,num_context_points:num_context_points+1,:]
# y = y_val[:,num_context_points:num_context_points+1,:]
# feed_dict = {
# self.context_y: context_y,
# self.context_x: context_x,
# self.y: y,
# self.x: x,
# self.num_context: num_context_points,
# }
# return feed_dict
# def gen_variable_horizon_data_ordered(self,x,y,num_samples,horizon,test_horizon):
# num_updates = np.random.randint(horizon+1, size=num_samples)
# M,N = x.shape[0:2]
# M_ind = np.random.choice(M, num_samples)
# x = x[M_ind,:,:]
# y = y[M_ind,:,:]
# uy = np.zeros([num_samples, horizon, self.y_dim])
# ux = np.zeros([num_samples, horizon, self.x_dim])
# temp_y = np.zeros([num_samples, 1, self.y_dim])
# temp_x = np.zeros([num_samples, 1, self.x_dim])
# for i in range(num_samples):
# current_num_update = num_updates[i]
# uy[i,:current_num_update,:] = y[i,:current_num_update,:]
# ux[i,:current_num_update,:] = x[i,:current_num_update,:]
# temp_y[i,:,:] = y[i,current_num_update:current_num_update+1,:]
# temp_x[i,:,:] = x[i,current_num_update:current_num_update+1,:]
# feed_dict = {
# self.update_y: np.float32(uy),
# self.update_x: np.float32(ux),
# self.y: np.float32(temp_y),
# self.x: np.float32(temp_x),
# self.num_updates: num_updates,
# }
# return feed_dict
# convenience function to use just the encoder on numpy input
def encode(self, x):
feed_dict = {
self.x: x
}
return self.sess.run(self.phi, feed_dict)
# ---- Save and Restore ------
def save(self, model_path, global_step=None):
save_path = self.saver.save(self.sess, model_path, global_step=global_step)
print('Saved to:', save_path)
def restore(self, model_path):
self.saver.restore(self.sess, model_path)
print('Restored model from:', model_path)
# given mat [a,b,c,...,N,N] and batch_v [a,b,c,...,M,N], returns [a,b,c,...,M,N]
def batch_matmul(mat, batch_v, name='batch_matmul'):
with tf.name_scope(name):
return tf.matrix_transpose(tf.matmul(mat,tf.matrix_transpose(batch_v)))
# works for A = [...,n,n] or [...,N,n,n]
# (uses the same matrix A for all N b vectors in the first case)
# assumes b = [...,N,n]
# returns [...,N,1]
def batch_quadform(A, b):
A_dims = A.get_shape().ndims
b_dims = b.get_shape().ndims
b_vec = tf.expand_dims(b, axis=-1)
if A_dims == b_dims + 1:
return tf.squeeze( tf.matrix_transpose(b_vec) @ A @ b_vec, axis=-1)
elif A_dims == b_dims:
Ab = tf.expand_dims( tf.matrix_transpose( A @ tf.matrix_transpose(b) ), axis=-1) # ... x N x n x 1
return tf.squeeze( tf.matrix_transpose(b_vec) @ Ab, axis = -1) # ... x N x 1
else:
raise ValueError('Matrix size of %d is not supported.'%(A_dims))
# takes in y = (..., y_dim)
# x = (..., x_dim)
# returns dydx = (..., y_dim, x_dim), the jacobian | |
CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3680
PFNGLBUFFERDATAPROC = CFUNCTYPE(None, GLenum, GLsizeiptr, POINTER(GLvoid), GLenum) # GL/glext.h:3681
PFNGLBUFFERSUBDATAPROC = CFUNCTYPE(None, GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)) # GL/glext.h:3682
PFNGLGETBUFFERSUBDATAPROC = CFUNCTYPE(None, GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)) # GL/glext.h:3683
PFNGLMAPBUFFERPROC = CFUNCTYPE(POINTER(GLvoid), GLenum, GLenum) # GL/glext.h:3684
PFNGLUNMAPBUFFERPROC = CFUNCTYPE(GLboolean, GLenum) # GL/glext.h:3685
PFNGLGETBUFFERPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3686
PFNGLGETBUFFERPOINTERVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3687
# VERSION_2_0 (GL/glext.h:3690)
GL_VERSION_2_0 = 1 # GL/glext.h:3691
# GL/glext.h:3693
glBlendEquationSeparate = _link_function('glBlendEquationSeparate', None, [GLenum, GLenum], 'VERSION_2_0')
# GL/glext.h:3694
glDrawBuffers = _link_function('glDrawBuffers', None, [GLsizei, POINTER(GLenum)], 'VERSION_2_0')
# GL/glext.h:3695
glStencilOpSeparate = _link_function('glStencilOpSeparate', None, [GLenum, GLenum, GLenum, GLenum], 'VERSION_2_0')
# GL/glext.h:3696
glStencilFuncSeparate = _link_function('glStencilFuncSeparate', None, [GLenum, GLenum, GLint, GLuint], 'VERSION_2_0')
# GL/glext.h:3697
glStencilMaskSeparate = _link_function('glStencilMaskSeparate', None, [GLenum, GLuint], 'VERSION_2_0')
# GL/glext.h:3698
glAttachShader = _link_function('glAttachShader', None, [GLuint, GLuint], 'VERSION_2_0')
# GL/glext.h:3699
glBindAttribLocation = _link_function('glBindAttribLocation', None, [GLuint, GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3700
glCompileShader = _link_function('glCompileShader', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3701
glCreateProgram = _link_function('glCreateProgram', GLuint, [], 'VERSION_2_0')
# GL/glext.h:3702
glCreateShader = _link_function('glCreateShader', GLuint, [GLenum], 'VERSION_2_0')
# GL/glext.h:3703
glDeleteProgram = _link_function('glDeleteProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3704
glDeleteShader = _link_function('glDeleteShader', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3705
glDetachShader = _link_function('glDetachShader', None, [GLuint, GLuint], 'VERSION_2_0')
# GL/glext.h:3706
glDisableVertexAttribArray = _link_function('glDisableVertexAttribArray', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3707
glEnableVertexAttribArray = _link_function('glEnableVertexAttribArray', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3708
glGetActiveAttrib = _link_function('glGetActiveAttrib', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3709
glGetActiveUniform = _link_function('glGetActiveUniform', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3710
glGetAttachedShaders = _link_function('glGetAttachedShaders', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3711
glGetAttribLocation = _link_function('glGetAttribLocation', GLint, [GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3712
glGetProgramiv = _link_function('glGetProgramiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3713
glGetProgramInfoLog = _link_function('glGetProgramInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3714
glGetShaderiv = _link_function('glGetShaderiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3715
glGetShaderInfoLog = _link_function('glGetShaderInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3716
glGetShaderSource = _link_function('glGetShaderSource', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3717
glGetUniformLocation = _link_function('glGetUniformLocation', GLint, [GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3718
glGetUniformfv = _link_function('glGetUniformfv', None, [GLuint, GLint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3719
glGetUniformiv = _link_function('glGetUniformiv', None, [GLuint, GLint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3720
glGetVertexAttribdv = _link_function('glGetVertexAttribdv', None, [GLuint, GLenum, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3721
glGetVertexAttribfv = _link_function('glGetVertexAttribfv', None, [GLuint, GLenum, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3722
glGetVertexAttribiv = _link_function('glGetVertexAttribiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3723
glGetVertexAttribPointerv = _link_function('glGetVertexAttribPointerv', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'VERSION_2_0')
# GL/glext.h:3724
glIsProgram = _link_function('glIsProgram', GLboolean, [GLuint], 'VERSION_2_0')
# GL/glext.h:3725
glIsShader = _link_function('glIsShader', GLboolean, [GLuint], 'VERSION_2_0')
# GL/glext.h:3726
glLinkProgram = _link_function('glLinkProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3727
glShaderSource = _link_function('glShaderSource', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3728
glUseProgram = _link_function('glUseProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3729
glUniform1f = _link_function('glUniform1f', None, [GLint, GLfloat], 'VERSION_2_0')
# GL/glext.h:3730
glUniform2f = _link_function('glUniform2f', None, [GLint, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3731
glUniform3f = _link_function('glUniform3f', None, [GLint, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3732
glUniform4f = _link_function('glUniform4f', None, [GLint, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3733
glUniform1i = _link_function('glUniform1i', None, [GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3734
glUniform2i = _link_function('glUniform2i', None, [GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3735
glUniform3i = _link_function('glUniform3i', None, [GLint, GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3736
glUniform4i = _link_function('glUniform4i', None, [GLint, GLint, GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3737
glUniform1fv = _link_function('glUniform1fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3738
glUniform2fv = _link_function('glUniform2fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3739
glUniform3fv = _link_function('glUniform3fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3740
glUniform4fv = _link_function('glUniform4fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3741
glUniform1iv = _link_function('glUniform1iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3742
glUniform2iv = _link_function('glUniform2iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3743
glUniform3iv = _link_function('glUniform3iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3744
glUniform4iv = _link_function('glUniform4iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3745
glUniformMatrix2fv = _link_function('glUniformMatrix2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3746
glUniformMatrix3fv = _link_function('glUniformMatrix3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3747
glUniformMatrix4fv = _link_function('glUniformMatrix4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3748
glValidateProgram = _link_function('glValidateProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3749
glVertexAttrib1d = _link_function('glVertexAttrib1d', None, [GLuint, GLdouble], 'VERSION_2_0')
# GL/glext.h:3750
glVertexAttrib1dv = _link_function('glVertexAttrib1dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3751
glVertexAttrib1f = _link_function('glVertexAttrib1f', None, [GLuint, GLfloat], 'VERSION_2_0')
# GL/glext.h:3752
glVertexAttrib1fv = _link_function('glVertexAttrib1fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3753
glVertexAttrib1s = _link_function('glVertexAttrib1s', None, [GLuint, GLshort], 'VERSION_2_0')
# GL/glext.h:3754
glVertexAttrib1sv = _link_function('glVertexAttrib1sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3755
glVertexAttrib2d = _link_function('glVertexAttrib2d', None, [GLuint, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3756
glVertexAttrib2dv = _link_function('glVertexAttrib2dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3757
glVertexAttrib2f = _link_function('glVertexAttrib2f', None, [GLuint, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3758
glVertexAttrib2fv = _link_function('glVertexAttrib2fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3759
glVertexAttrib2s = _link_function('glVertexAttrib2s', None, [GLuint, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3760
glVertexAttrib2sv = _link_function('glVertexAttrib2sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3761
glVertexAttrib3d = _link_function('glVertexAttrib3d', None, [GLuint, GLdouble, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3762
glVertexAttrib3dv = _link_function('glVertexAttrib3dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3763
glVertexAttrib3f = _link_function('glVertexAttrib3f', None, [GLuint, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3764
glVertexAttrib3fv = _link_function('glVertexAttrib3fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3765
glVertexAttrib3s = _link_function('glVertexAttrib3s', None, [GLuint, GLshort, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3766
glVertexAttrib3sv = _link_function('glVertexAttrib3sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3767
glVertexAttrib4Nbv = _link_function('glVertexAttrib4Nbv', None, [GLuint, POINTER(GLbyte)], 'VERSION_2_0')
# GL/glext.h:3768
glVertexAttrib4Niv = _link_function('glVertexAttrib4Niv', None, [GLuint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3769
glVertexAttrib4Nsv = _link_function('glVertexAttrib4Nsv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3770
glVertexAttrib4Nub = _link_function('glVertexAttrib4Nub', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], 'VERSION_2_0')
# GL/glext.h:3771
glVertexAttrib4Nubv = _link_function('glVertexAttrib4Nubv', None, [GLuint, POINTER(GLubyte)], 'VERSION_2_0')
# GL/glext.h:3772
glVertexAttrib4Nuiv = _link_function('glVertexAttrib4Nuiv', None, [GLuint, POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3773
glVertexAttrib4Nusv = _link_function('glVertexAttrib4Nusv', None, [GLuint, POINTER(GLushort)], 'VERSION_2_0')
# GL/glext.h:3774
glVertexAttrib4bv = _link_function('glVertexAttrib4bv', None, [GLuint, POINTER(GLbyte)], 'VERSION_2_0')
# GL/glext.h:3775
glVertexAttrib4d = _link_function('glVertexAttrib4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3776
glVertexAttrib4dv = _link_function('glVertexAttrib4dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3777
glVertexAttrib4f = _link_function('glVertexAttrib4f', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3778
glVertexAttrib4fv = _link_function('glVertexAttrib4fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3779
glVertexAttrib4iv = _link_function('glVertexAttrib4iv', None, [GLuint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3780
glVertexAttrib4s = _link_function('glVertexAttrib4s', None, [GLuint, GLshort, GLshort, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3781
glVertexAttrib4sv = _link_function('glVertexAttrib4sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3782
glVertexAttrib4ubv = _link_function('glVertexAttrib4ubv', None, [GLuint, POINTER(GLubyte)], 'VERSION_2_0')
# GL/glext.h:3783
glVertexAttrib4uiv = _link_function('glVertexAttrib4uiv', None, [GLuint, POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3784
glVertexAttrib4usv = _link_function('glVertexAttrib4usv', None, [GLuint, POINTER(GLushort)], 'VERSION_2_0')
# GL/glext.h:3785
glVertexAttribPointer = _link_function('glVertexAttribPointer', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)], 'VERSION_2_0')
PFNGLBLENDEQUATIONSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:3787
PFNGLDRAWBUFFERSPROC = CFUNCTYPE(None, GLsizei, POINTER(GLenum)) # GL/glext.h:3788
PFNGLSTENCILOPSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:3789
PFNGLSTENCILFUNCSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLuint) # GL/glext.h:3790
PFNGLSTENCILMASKSEPARATEPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3791
PFNGLATTACHSHADERPROC = CFUNCTYPE(None, GLuint, GLuint) # GL/glext.h:3792
PFNGLBINDATTRIBLOCATIONPROC = CFUNCTYPE(None, GLuint, GLuint, POINTER(GLchar)) # GL/glext.h:3793
PFNGLCOMPILESHADERPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3794
PFNGLCREATEPROGRAMPROC = CFUNCTYPE(GLuint) # GL/glext.h:3795
PFNGLCREATESHADERPROC = CFUNCTYPE(GLuint, GLenum) # GL/glext.h:3796
PFNGLDELETEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3797
PFNGLDELETESHADERPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3798
PFNGLDETACHSHADERPROC = CFUNCTYPE(None, GLuint, GLuint) # GL/glext.h:3799
PFNGLDISABLEVERTEXATTRIBARRAYPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3800
PFNGLENABLEVERTEXATTRIBARRAYPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3801
PFNGLGETACTIVEATTRIBPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)) # GL/glext.h:3802
PFNGLGETACTIVEUNIFORMPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)) # GL/glext.h:3803
PFNGLGETATTACHEDSHADERSPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)) # GL/glext.h:3804
PFNGLGETATTRIBLOCATIONPROC = CFUNCTYPE(GLint, GLuint, POINTER(GLchar)) # GL/glext.h:3805
PFNGLGETPROGRAMIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3806
PFNGLGETPROGRAMINFOLOGPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3807
PFNGLGETSHADERIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3808
PFNGLGETSHADERINFOLOGPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3809
PFNGLGETSHADERSOURCEPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3810
PFNGLGETUNIFORMLOCATIONPROC = CFUNCTYPE(GLint, GLuint, POINTER(GLchar)) # GL/glext.h:3811
PFNGLGETUNIFORMFVPROC = CFUNCTYPE(None, GLuint, GLint, POINTER(GLfloat)) # GL/glext.h:3812
PFNGLGETUNIFORMIVPROC = CFUNCTYPE(None, GLuint, GLint, POINTER(GLint)) # GL/glext.h:3813
PFNGLGETVERTEXATTRIBDVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLdouble)) # GL/glext.h:3814
PFNGLGETVERTEXATTRIBFVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:3815
PFNGLGETVERTEXATTRIBIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3816
PFNGLGETVERTEXATTRIBPOINTERVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3817
PFNGLISPROGRAMPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3818
PFNGLISSHADERPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3819
PFNGLLINKPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3820
PFNGLSHADERSOURCEPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)) # GL/glext.h:3821
PFNGLUSEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3822
PFNGLUNIFORM1FPROC = CFUNCTYPE(None, GLint, GLfloat) # GL/glext.h:3823
PFNGLUNIFORM2FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat) # GL/glext.h:3824
PFNGLUNIFORM3FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat) # GL/glext.h:3825
PFNGLUNIFORM4FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3826
PFNGLUNIFORM1IPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:3827
PFNGLUNIFORM2IPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:3828
PFNGLUNIFORM3IPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint) # GL/glext.h:3829
PFNGLUNIFORM4IPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint, GLint) # GL/glext.h:3830
PFNGLUNIFORM1FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3831
PFNGLUNIFORM2FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3832
PFNGLUNIFORM3FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3833
PFNGLUNIFORM4FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3834
PFNGLUNIFORM1IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3835
PFNGLUNIFORM2IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3836
PFNGLUNIFORM3IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3837
PFNGLUNIFORM4IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3838
PFNGLUNIFORMMATRIX2FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3839
PFNGLUNIFORMMATRIX3FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3840
PFNGLUNIFORMMATRIX4FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3841
PFNGLVALIDATEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3842
PFNGLVERTEXATTRIB1DPROC = CFUNCTYPE(None, GLuint, GLdouble) # GL/glext.h:3843
PFNGLVERTEXATTRIB1DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3844
PFNGLVERTEXATTRIB1FPROC = CFUNCTYPE(None, GLuint, GLfloat) # GL/glext.h:3845
PFNGLVERTEXATTRIB1FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3846
PFNGLVERTEXATTRIB1SPROC = CFUNCTYPE(None, GLuint, GLshort) # GL/glext.h:3847
PFNGLVERTEXATTRIB1SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3848
PFNGLVERTEXATTRIB2DPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble) # GL/glext.h:3849
PFNGLVERTEXATTRIB2DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3850
PFNGLVERTEXATTRIB2FPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat) # GL/glext.h:3851
PFNGLVERTEXATTRIB2FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3852
PFNGLVERTEXATTRIB2SPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort) # GL/glext.h:3853
PFNGLVERTEXATTRIB2SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3854
PFNGLVERTEXATTRIB3DPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble) # GL/glext.h:3855
PFNGLVERTEXATTRIB3DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3856
PFNGLVERTEXATTRIB3FPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:3857
PFNGLVERTEXATTRIB3FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3858
PFNGLVERTEXATTRIB3SPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort) # GL/glext.h:3859
PFNGLVERTEXATTRIB3SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # | |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as nn_utils
import torch.backends.cudnn as cudnn
from torch.nn import SyncBatchNorm
import torch.optim.lr_scheduler as lr_scheduler
from torch.nn.parallel import DistributedDataParallel
import utils
from utils import CONFIG
import networks
class Trainer(object):
def __init__(self,
train_dataloader,
test_dataloader,
logger,
tb_logger):
# Save GPU memory.
cudnn.benchmark = False
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.logger = logger
self.tb_logger = tb_logger
self.model_config = CONFIG.model
self.train_config = CONFIG.train
self.log_config = CONFIG.log
self.loss_dict = {'rec': None,
'comp': None,
'smooth_l1':None,
'grad':None,
'gabor':None}
self.test_loss_dict = {'rec': None,
'smooth_l1':None,
'mse':None,
'sad':None,
'grad':None,
'gabor':None}
self.grad_filter = torch.tensor(utils.get_gradfilter()).cuda()
self.gabor_filter = torch.tensor(utils.get_gaborfilter(16)).cuda()
self.build_model()
self.resume_step = None
self.best_loss = 1e+8
utils.print_network(self.G, CONFIG.version)
if self.train_config.resume_checkpoint:
self.logger.info('Resume checkpoint: {}'.format(self.train_config.resume_checkpoint))
self.restore_model(self.train_config.resume_checkpoint)
if self.model_config.imagenet_pretrain and self.train_config.resume_checkpoint is None:
self.logger.info('Load Imagenet Pretrained: {}'.format(self.model_config.imagenet_pretrain_path))
if self.model_config.arch.encoder == "vgg_encoder":
utils.load_VGG_pretrain(self.G, self.model_config.imagenet_pretrain_path)
else:
utils.load_imagenet_pretrain(self.G, self.model_config.imagenet_pretrain_path)
def build_model(self):
self.G = networks.get_generator(encoder=self.model_config.arch.encoder, decoder=self.model_config.arch.decoder)
self.G.cuda()
if CONFIG.dist:
self.logger.info("Using pytorch synced BN")
self.G = SyncBatchNorm.convert_sync_batchnorm(self.G)
self.G_optimizer = torch.optim.Adam(self.G.parameters(),
lr = self.train_config.G_lr,
betas = [self.train_config.beta1, self.train_config.beta2])
if CONFIG.dist:
# SyncBatchNorm only supports DistributedDataParallel with single GPU per process
self.G = DistributedDataParallel(self.G, device_ids=[CONFIG.local_rank], output_device=CONFIG.local_rank)
else:
self.G = nn.DataParallel(self.G)
self.build_lr_scheduler()
def build_lr_scheduler(self):
"""Build cosine learning rate scheduler."""
self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
T_max=self.train_config.total_step
- self.train_config.warmup_step)
def reset_grad(self):
"""Reset the gradient buffers."""
self.G_optimizer.zero_grad()
def restore_model(self, resume_checkpoint):
"""
Restore the trained generator and discriminator.
:param resume_checkpoint: File name of checkpoint
:return:
"""
pth_path = os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(resume_checkpoint))
checkpoint = torch.load(pth_path, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu))
self.resume_step = checkpoint['iter']
self.logger.info('Loading the trained models from step {}...'.format(self.resume_step))
self.G.load_state_dict(checkpoint['state_dict'], strict=True)
if not self.train_config.reset_lr:
if 'opt_state_dict' in checkpoint.keys():
try:
self.G_optimizer.load_state_dict(checkpoint['opt_state_dict'])
except ValueError as ve:
self.logger.error("{}".format(ve))
else:
self.logger.info('No Optimizer State Loaded!!')
if 'lr_state_dict' in checkpoint.keys():
try:
self.G_scheduler.load_state_dict(checkpoint['lr_state_dict'])
except ValueError as ve:
self.logger.error("{}".format(ve))
else:
self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
T_max=self.train_config.total_step - self.resume_step - 1)
if 'loss' in checkpoint.keys():
self.best_loss = checkpoint['loss']
def train(self):
data_iter = iter(self.train_dataloader)
if self.train_config.resume_checkpoint:
start = self.resume_step + 1
else:
start = 0
moving_max_grad = 0
moving_grad_moment = 0.999
max_grad = 0
for step in range(start, self.train_config.total_step + 1):
try:
image_dict = next(data_iter)
except:
data_iter = iter(self.train_dataloader)
image_dict = next(data_iter)
image, alpha, trimap = image_dict['image'], image_dict['alpha'], image_dict['trimap']
image = image.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
# train() of DistributedDataParallel has no return
self.G.train()
log_info = ""
loss = 0
"""===== Update Learning Rate ====="""
if step < self.train_config.warmup_step and self.train_config.resume_checkpoint is None:
cur_G_lr = utils.warmup_lr(self.train_config.G_lr, step + 1, self.train_config.warmup_step)
utils.update_lr(cur_G_lr, self.G_optimizer)
else:
self.G_scheduler.step()
cur_G_lr = self.G_scheduler.get_lr()[0]
"""===== Forward G ====="""
alpha_pred, info_dict = self.G(image, trimap) # info_dict: intermediate feature of networks like attention
weight = utils.get_unknown_tensor(trimap)
"""===== Calculate Loss ====="""
if self.train_config.rec_weight > 0:
self.loss_dict['rec'] = self.regression_loss(alpha_pred, alpha, loss_type='l1', weight=weight) \
* self.train_config.rec_weight
if self.train_config.smooth_l1_weight > 0:
self.loss_dict['smooth_l1'] = self.smooth_l1(alpha_pred, alpha, weight=weight) \
* self.train_config.smooth_l1_weight
if self.train_config.comp_weight > 0:
self.loss_dict['comp'] = self.composition_loss(alpha_pred, image_dict['fg'].cuda(),
image_dict['bg'].cuda(), image, weight=weight) \
* self.train_config.comp_weight
if self.train_config.grad_weight > 0:
self.loss_dict['grad'] = self.grad_loss(alpha_pred, alpha, weight=weight, grad_filter=self.grad_filter) \
* self.train_config.grad_weight
if self.train_config.gabor_weight > 0:
self.loss_dict['gabor'] = self.gabor_loss(alpha_pred, alpha, weight=weight, gabor_filter=self.gabor_filter) \
* self.train_config.gabor_weight
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None and loss_key in ['rec', 'comp', 'smooth_l1', 'grad', 'gabor']:
loss += self.loss_dict[loss_key]
"""===== Back Propagate ====="""
self.reset_grad()
loss.backward()
"""===== Clip Large Gradient ====="""
if self.train_config.clip_grad:
if moving_max_grad == 0:
moving_max_grad = nn_utils.clip_grad_norm_(self.G.parameters(), 1e+6)
max_grad = moving_max_grad
else:
max_grad = nn_utils.clip_grad_norm_(self.G.parameters(), 2 * moving_max_grad)
moving_max_grad = moving_max_grad * moving_grad_moment + max_grad * (
1 - moving_grad_moment)
"""===== Update Parameters ====="""
self.G_optimizer.step()
"""===== Write Log and Tensorboard ====="""
# stdout log
if step % self.log_config.logging_step == 0:
# reduce losses from GPUs
if CONFIG.dist:
self.loss_dict = utils.reduce_tensor_dict(self.loss_dict, mode='mean')
loss = utils.reduce_tensor(loss)
# create logging information
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None:
log_info += loss_key.upper() + ": {:.4f}, ".format(self.loss_dict[loss_key])
self.logger.debug("Image tensor shape: {}. Trimap tensor shape: {}".format(image.shape, trimap.shape))
log_info = "[{}/{}], ".format(step, self.train_config.total_step) + log_info
log_info += "lr: {:6f}".format(cur_G_lr)
self.logger.info(log_info)
# tensorboard
if step % self.log_config.tensorboard_step == 0 or step == start: # and step > start:
self.tb_logger.scalar_summary('Loss', loss, step)
# detailed losses
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None:
self.tb_logger.scalar_summary('Loss_' + loss_key.upper(),
self.loss_dict[loss_key], step)
self.tb_logger.scalar_summary('LearnRate', cur_G_lr, step)
if self.train_config.clip_grad:
self.tb_logger.scalar_summary('Moving_Max_Grad', moving_max_grad, step)
self.tb_logger.scalar_summary('Max_Grad', max_grad, step)
# write images to tensorboard
if step % self.log_config.tensorboard_image_step == 0 or step == start:
if self.model_config.trimap_channel == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap==2] = 1
alpha_pred[trimap==0] = 0
image_set = {'image': (utils.normalize_image(image[-1, ...]).data.cpu().numpy()
* 255).astype(np.uint8),
'trimap': (trimap[-1, ...].data.cpu().numpy() * 127).astype(np.uint8),
'alpha': (alpha[-1, ...].data.cpu().numpy() * 255).astype(np.uint8),
'alpha_pred': (alpha_pred[-1, ...].data.cpu().numpy() * 255).astype(np.uint8)}
if info_dict is not None:
for key in info_dict.keys():
if key.startswith('offset'):
image_set[key] = utils.flow_to_image(info_dict[key][0][-1,...].data.cpu()
.numpy()).transpose([2, 0, 1]).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict[key][1].cpu()
image_set[key] = utils.put_text(image_set[key], 'unknown: {:.2f}, known: {:.2f}'
.format(scale[-1,0].item(), scale[-1,1].item()))
else:
image_set[key] = (utils.normalize_image(info_dict[key][-1,...]).data.cpu().numpy()
* 255).astype(np.uint8)
self.tb_logger.image_summary(image_set, step)
"""===== TEST ====="""
if ((step % self.train_config.val_step) == 0 or step == self.train_config.total_step):# and step > start:
self.G.eval()
test_loss = 0
log_info = ""
self.test_loss_dict['mse'] = 0
self.test_loss_dict['sad'] = 0
for loss_key in self.loss_dict.keys():
if loss_key in self.test_loss_dict and self.loss_dict[loss_key] is not None:
self.test_loss_dict[loss_key] = 0
with torch.no_grad():
for image_dict in self.test_dataloader:
image, alpha, trimap = image_dict['image'], image_dict['alpha'], image_dict['trimap']
alpha_shape = image_dict['alpha_shape']
image = image.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
alpha_pred, info_dict = self.G(image, trimap)
h, w = alpha_shape
alpha_pred = alpha_pred[..., :h, :w]
trimap = trimap[..., :h, :w]
weight = utils.get_unknown_tensor(trimap)
# value of MSE/SAD here is different from test.py and matlab version
self.test_loss_dict['mse'] += self.mse(alpha_pred, alpha, weight)
self.test_loss_dict['sad'] += self.sad(alpha_pred, alpha, weight)
if self.train_config.rec_weight > 0:
self.test_loss_dict['rec'] += self.regression_loss(alpha_pred, alpha, weight=weight) \
* self.train_config.rec_weight
if self.train_config.smooth_l1_weight > 0:
self.test_loss_dict['smooth_l1'] += self.smooth_l1(alpha_pred, alpha, weight=weight) \
* self.train_config.smooth_l1_weight
if self.train_config.grad_weight > 0:
self.test_loss_dict['grad'] = self.grad_loss(alpha_pred, alpha, weight=weight,
grad_filter=self.grad_filter) \
* self.train_config.grad_weight
if self.train_config.gabor_weight > 0:
self.test_loss_dict['gabor'] = self.gabor_loss(alpha_pred, alpha, weight=weight,
gabor_filter=self.gabor_filter) \
* self.train_config.gabor_weight
# reduce losses from GPUs
if CONFIG.dist:
self.test_loss_dict = utils.reduce_tensor_dict(self.test_loss_dict, mode='mean')
"""===== Write Log and Tensorboard ====="""
# stdout log
for loss_key in self.test_loss_dict.keys():
if self.test_loss_dict[loss_key] is not None:
self.test_loss_dict[loss_key] /= len(self.test_dataloader)
# logging
log_info += loss_key.upper()+": {:.4f} ".format(self.test_loss_dict[loss_key])
self.tb_logger.scalar_summary('Loss_'+loss_key.upper(),
self.test_loss_dict[loss_key], step, phase='test')
if loss_key in ['rec', 'smooth_l1', 'grad', 'gabor']:
test_loss += self.test_loss_dict[loss_key]
self.logger.info("TEST: LOSS: {:.4f} ".format(test_loss)+log_info)
self.tb_logger.scalar_summary('Loss', test_loss, step, phase='test')
if self.model_config.trimap_channel == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap==2] = 1
alpha_pred[trimap==0] = 0
image_set = {'image': (utils.normalize_image(image[-1, ...]).data.cpu().numpy()
* 255).astype(np.uint8),
'trimap': (trimap[-1, ...].data.cpu().numpy() * 127).astype(np.uint8),
'alpha': (alpha[-1, ...].data.cpu().numpy() * 255).astype(np.uint8),
'alpha_pred': (alpha_pred[-1, ...].data.cpu().numpy() * 255).astype(np.uint8)}
if info_dict is not None:
for key in info_dict.keys():
if key.startswith('offset'):
image_set[key] = utils.flow_to_image(info_dict[key][0][-1,...].data.cpu()
.numpy()).transpose([2, 0, 1]).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict[key][1].cpu()
image_set[key] = utils.put_text(image_set[key], 'unknown: {:.2f}, known: {:.2f}'
.format(scale[-1,0].item(), scale[-1,1].item()))
else:
image_set[key] = (utils.normalize_image(info_dict[key][-1,...]).data.cpu().numpy()
* 255).astype(np.uint8)
self.tb_logger.image_summary(image_set, step, phase='test')
"""===== Save Model ====="""
if (step % self.log_config.checkpoint_step == 0 or step == self.train_config.total_step) \
and CONFIG.local_rank == 0 and (step > start):
self.logger.info('Saving the trained models from step {}...'.format(iter))
self.save_model("latest_model", step, loss)
if self.test_loss_dict['mse'] < self.best_loss:
self.best_loss = self.test_loss_dict['mse']
self.save_model("best_model", step, loss)
def save_model(self, checkpoint_name, iter, loss):
"""Restore the trained generator and discriminator."""
torch.save({
'iter': iter,
'loss': loss,
'state_dict': self.G.state_dict(),
'opt_state_dict': self.G_optimizer.state_dict(),
'lr_state_dict': self.G_scheduler.state_dict()
}, os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(checkpoint_name)))
@staticmethod
def regression_loss(logit, target, loss_type='l1', weight=None):
"""
Alpha reconstruction loss
:param logit:
:param target:
:param loss_type: "l1" or "l2"
:param weight: tensor with shape [N,1,H,W] weights for each pixel
:return:
"""
if weight is None:
if loss_type == 'l1':
return F.l1_loss(logit, target)
elif loss_type == 'l2':
return F.mse_loss(logit, target)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
else:
if loss_type == 'l1':
return F.l1_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
elif loss_type == 'l2':
return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
@staticmethod
def smooth_l1(logit, target, weight):
loss = torch.sqrt((logit * weight - target * weight)**2 + 1e-6)
loss = torch.sum(loss) / (torch.sum(weight) + 1e-8)
return loss
@staticmethod
def mse(logit, target, weight):
# return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
return Trainer.regression_loss(logit, target, loss_type='l2', weight=weight)
@staticmethod
def sad(logit, target, weight):
return F.l1_loss(logit * weight, target * weight, reduction='sum') | |
import inspect
import collections
from django.utils.html import html_safe,conditional_escape,mark_safe
from django.core.exceptions import ValidationError
from django.utils import six
from django import forms
from django.db import models
from django.utils import safestring
from . import widgets
from . import fields
iterator_map = {}
def get_boundfielditerator(form,fields=None):
fields = fields or form._meta.ordered_fields
try:
return iterator_map[id(fields)](form,fields)
except:
if not fields:
cls = BoundFieldIterator
elif not isinstance(fields[0],(list,tuple)):
cls = BoundFieldIterator
elif not isinstance(fields[0][-1],(list,tuple)):
cls = MultiRowBoundFieldIterator
else:
cls = MultiTableBoundFieldIterator
iterator_map[id(fields)] = cls
return cls(form,fields)
class BoundFieldIterator(collections.Iterable):
#fields is a field list
#iterator member is a boundfield iterator
def __init__(self,form,fields=None):
self.form = form
self._index = None
self._fields = fields or self.form._meta.ordered_fields
self._length = len(self._fields)
def __iter__(self):
self._index = -1
return self
def __next__(self):
self._index += 1
if self._index >= self._length:
raise StopIteration()
else:
return self.form[self._fields[self._index]]
class MultiRowBoundFieldIterator(BoundFieldIterator):
#fields is a row list, each row is a field list
def __next__(self):
self._index += 1
if self._index >= self._length:
raise StopIteration()
else:
return [self.form[f] for f in self._fields[self._index]]
class MultiTableBoundFieldIterator(BoundFieldIterator):
#fields is a table list
#each table is a tuple (table title, collapsable(True of False), default collapse status(collpase if True) , fields(can be field list or row list))
#iterator member is a tuple (table title, collapsable(True of False), default collapse status(collpase if True), is multi row(Multi row if true) , boundfield iterator)
def __next__(self):
self._index += 1
if self._index >= self._length:
raise StopIteration()
else:
t = self._fields[self._index]
field_iterator = get_boundfielditerator(self.form,t[3])
if isinstance(field_iterator,MultiRowBoundFieldIterator):
return [t[0],t[1],t[2],True,get_boundfielditerator(self.form,t[3])]
else:
return [t[0],t[1],t[2],False,get_boundfielditerator(self.form,t[3])]
class HtmlStringBoundField(forms.boundfield.BoundField):
def __init__(self, form, field, name):
self.form_field_name = name
self.form = form
self.name = name
self.field = field
@property
def is_display(self):
return True
@property
def is_hidden(self):
return False
@property
def initial(self):
return self.field.html
@property
def auto_id(self):
return ""
def html(self):
return mark_safe(self.as_widget())
@property
def hascleanvalue(self):
return False
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
return self.field.html
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
return self.field.widget.render(self.name,self.value())
class BoundField(forms.boundfield.BoundField):
defult_display_widget = widgets.TextDisplay()
def __init__(self, form, field, name):
self.form_field_name = name
if isinstance(field,fields.AliasFieldMixin) and name != field.field_name:
super(BoundField,self).__init__(form,field,field.field_name)
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
else:
super(BoundField,self).__init__(form,field,name)
def css_classes(self, extra_classes=None):
return None
"""
Extend django's BoundField to support the following features
1. Get extra css_classes from field's attribute 'css_classes'
"""
def css_classes(self, extra_classes=None):
if hasattr(self.field,"css_classes"):
if extra_classes:
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes += getattr(self.field,"css_classes")
return super(BoundField,self).css_classes(extra_classes)
else:
return super(BoundField,self).css_classes(getattr(self.field,"css_classes"))
else:
return super(BoundField,self).css_classes(extra_classes)
@property
def is_display(self):
return isinstance(self.field.widget,widgets.DisplayMixin)
@property
def is_hidden(self):
return isinstance(self.field.widget,widgets.HiddenInput) and not self.field.widget.display_widget
@property
def initial(self):
data = super(BoundField,self).initial
#print("{}: {} = {}".format("view" if self.is_display else "edit",self.name ,data))
if not self.is_display and isinstance(data,models.Model):
return data.pk
else:
return data
@property
def auto_id(self):
if self.is_display:
return ""
else:
html_id = super(BoundField,self).auto_id
if "." in html_id:
return html_id.replace(".","_")
else:
return html_id
def html(self,template=None,method="as_widget"):
if hasattr(self.field,"css_classes"):
attrs = " class=\"{}\"".format(" ".join(self.field.css_classes))
else:
attrs = ""
if template:
return mark_safe(template.format(attrs=attrs,widget=getattr(self,method)()))
else:
return mark_safe(getattr(self,method)())
@property
def cleanvalue(self):
return self.form.cleaned_data.get(self.name)
@property
def hascleanvalue(self):
return self.name in self.form.cleaned_data
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound or isinstance(self.field.widget,widgets.DisplayWidget) or self.field.widget.attrs.get("disabled"):
data = self.initial
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
if self.is_display and (isinstance(data,models.Model) or isinstance(data,models.query.QuerySet) or (isinstance(data,(list,tuple)) and data and isinstance(data[0],models.Model))):
return data
else:
return self.field.prepare_value(data)
@property
def display(self):
if self.is_display:
return self.html()
elif hasattr(self.field,"display_widget"):
return super(BoundField,self).as_widget(self.field.display_widget)
else:
return super(BoundField,self).as_widget(self.default_display_widget)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if self.is_hidden:
attrs = {'style':'display:none'}
html = super(BoundField,self).as_widget(widget,attrs,only_initial)
if not self.is_display and self.name in self.form.errors:
html = "<div style=\"display:inline\"><table class=\"error\" style=\"width:100%;\"><tr><td class=\"error\">{}<div class=\"text-error\" style=\"margin:0px\"><i class=\"icon-warning-sign\"></i> {}</div></td></tr></table></div>".format(html,"<br>".join(self.form.errors[self.name]))
pass
return html
class LoginUserBoundField(BoundField):
@property
def initial(self):
if self.form.request:
return self.form.request.user
else:
return None
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
return self.initial
class AggregateBoundField(BoundField):
def value(self):
return self.field.value(self.form)
def as_widget(self, widget=None, attrs=None, only_initial=False):
return self.field.widget.render(self.name,self.value())
@html_safe
class CompoundBoundFieldMixin(object):
"""
a mixin to implement compound bound field
"""
def __init__(self, form, field, name):
super(CompoundBoundFieldMixin,self).__init__(form,field,name)
if self.field.field_prefix:
self.related_fields = [self.form["{}{}".format(self.field.field_prefix,name)] for name in field.related_field_names]
else:
self.related_fields = [self.form[name] for name in field.related_field_names]
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types + (slice,)):
raise TypeError
return list(self.__iter__())[idx]
def get_field(self,field_name):
if self.field.field_prefix:
return self.form["{}{}".format(self.field.field_prefix,field_name)]
else:
return self.form[field_name]
def get_fieldvalue(self,field_name):
if self.field.field_prefix:
return self.form["{}{}".format(self.field.field_prefix,field_name)].value()
else:
return self.form[field_name].value()
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
#print("============{} {}".format(self.name,self.field.field_name))
#if self.field.field_name == "prescription__loc_locality":
# import ipdb;ipdb.set_trace()
html_layout,field_names,include_primary_field = self.field.get_layout(self)
def get_args():
index0 = 0
index1 = 0
args = []
while index1 < len(field_names):
if isinstance(field_names[index1],(tuple,list)):
if field_names[index1][0] != self.field.related_field_names[index0]:
index0 += 1
else:
args.append(self.related_fields[index0].as_widget(only_initial=only_initial,attrs=field_names[index1][1]))
index0 += 1
index1 += 1
elif field_names[index1] != self.field.related_field_names[index0]:
index0 += 1
else:
args.append(self.related_fields[index0].as_widget(only_initial=only_initial))
index0 += 1
index1 += 1
return args
if include_primary_field:
if isinstance(html_layout,(tuple,list)):
html = super(CompoundBoundFieldMixin,self).as_widget(attrs=html_layout[1],only_initial=only_initial)
html_layout = html_layout[0]
else:
html = super(CompoundBoundFieldMixin,self).as_widget(only_initial=only_initial)
if field_names:
args = get_args()
args.append(self.auto_id)
return safestring.SafeText(html_layout.format(html,*args))
elif html_layout:
return safestring.SafeText(html_layout.format(html,self.auto_id))
else:
return html
elif field_names:
args = get_args()
return safestring.SafeText(html_layout.format(*args))
elif html_layout:
return safestring.SafeText(html_layout)
else:
return ""
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
raise Exception("Not supported")
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
raise Exception("Not supported")
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
html = super(CompoundBoundFieldMixin,self).as_widget(self.field.hidden_widget(), attrs, **kwargs)
return self.field.hidden_layout.format(html,*[f.as_widget(f.field.hidden_widget(),None,**kwargs) for f in self.related_fields])
class FormBoundField(BoundField):
def __init__(self,*args,**kwargs):
super(FormBoundField,self).__init__(*args,**kwargs)
self._bound_fields_cache = {}
if self.form.is_bound and not self.field.is_display:
raise NotImplementedError
else:
self.innerform = self.field.form_class(instance=self.value(),prefix=self.name,check=self.form.check)
@property
def initial(self):
return self.form.initial.get(self.name, self.field.get_initial())
def html(self,template=None,method="as_widget"):
raise NotImplementedError
@property
def is_bound(self):
return self.form.is_bound and not self.field.is_display
@property
def is_changed(self):
return self.innerform.is_changed
def set_data(self):
obj = self.initial
self.form.set_data(obj)
def full_clean(self):
if self.innerform.is_valid():
return self.innerform.cleaned_data
else:
raise ValidationError("") #error placeholder, but not display in page
def full_check(self):
return self.innerform.full_check()
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if self.form.is_bound and not self.field.is_display:
raise NotImplementedError
else:
return self.form.initial.get(self.name, self.field.get_initial())
def as_widget(self, widget=None, attrs=None, only_initial=False):
raise NotImplementedError
def __getitem__(self, name):
"""Return a BoundField with the given name."""
return self.innerform[name]
def save(self):
return self.innnerform.save(savemessage=False)
class FormSetBoundField(BoundField):
_is_changed = None
def __init__(self,*args,**kwargs):
super(FormSetBoundField,self).__init__(*args,**kwargs)
self.formset = self.field.formset_class(
data=self.form.data if self.form.is_bound else None,
instance_list=self.initial,
prefix=self.name,
parent_instance=self.form.instance,
check=self.form.check,
request=self.form.request,
requesturl=self.form.requesturl
)
@property
def initial(self):
return self.form.initial.get(self.name, self.field.get_initial())
def html(self,template=None,method="as_widget"):
raise NotImplementedError
@property
def is_bound(self):
return self.form.is_bound and not self.field.is_display
def full_clean(self):
if self.formset.is_valid():
return [form.cleaned_data for form in self.formset]
else:
raise ValidationError("") #error placeholder, but not display in page
def full_check(self):
return self.formset.full_check()
def set_data(self):
objs = self.initial
if isinstance(objs,models.manager.Manager):
objs = objs.all()
self.formset.set_data(objs)
@property
def is_changed(self):
if self._is_changed is None:
try:
changed = False
for form in self.formset:
if form.can_delete:
if form.instance.pk:
changed = True
break
elif form.is_changed:
changed = True
break
| |
= \
scatter.get_xs(nuclides=nuclide, xs_type=xs_type,
moment=moment, subdomains=subdomain)
else:
self._scatter_matrix[i] = \
scatter.get_xs(nuclides=nuclide, xs_type=xs_type,
subdomains=subdomain)
def set_multiplicity_matrix_mgxs(self, nuscatter, scatter=None,
temperature=294., nuclide='total',
xs_type='macro', subdomain=None):
"""This method allows for either the direct use of only an
openmc.mgxs.MultiplicityMatrixXS or an openmc.mgxs.ScatterMatrixXS and
openmc.mgxs.ScatterMatrixXS to be used to set the scattering
multiplicity for this XSdata object. Multiplicity, in OpenMC parlance,
is a factor used to account for the production of neutrons introduced by
scattering multiplication reactions, i.e., (n,xn) events. In this sense,
the multiplication matrix is simply defined as the ratio of the
nu-scatter and scatter matrices.
Parameters
----------
nuscatter: openmc.mgxs.ScatterMatrixXS or openmc.mgxs.MultiplicityMatrixXS
MGXS Object containing the matrix cross section for the domain
of interest.
scatter: openmc.mgxs.ScatterMatrixXS
MGXS Object containing the scattering matrix cross section
for the domain of interest.
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
nuclide : str
Individual nuclide (or 'total' if obtaining material-wise data)
to gather data for. Defaults to 'total'.
xs_type: {'macro', 'micro'}
Provide the macro or micro cross section in units of cm^-1 or
barns. Defaults to 'macro'.
subdomain : iterable of int
If the MGXS contains a mesh domain type, the subdomain parameter
specifies which mesh cell (i.e., [i, j, k] index) to use.
See also
--------
openmc.mgxs.Library.create_mg_library()
openmc.mgxs.Library.get_xsdata()
"""
check_type('nuscatter', nuscatter, (openmc.mgxs.ScatterMatrixXS,
openmc.mgxs.MultiplicityMatrixXS))
check_value('energy_groups', nuscatter.energy_groups,
[self.energy_groups])
check_value('domain_type', nuscatter.domain_type,
openmc.mgxs.DOMAIN_TYPES)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
if scatter is not None:
check_type('scatter', scatter, openmc.mgxs.ScatterMatrixXS)
if isinstance(nuscatter, openmc.mgxs.MultiplicityMatrixXS):
msg = 'Either an MultiplicityMatrixXS object must be passed ' \
'for "nuscatter" or the "scatter" argument must be ' \
'provided.'
raise ValueError(msg)
check_value('energy_groups', scatter.energy_groups,
[self.energy_groups])
check_value('domain_type', scatter.domain_type,
openmc.mgxs.DOMAIN_TYPES)
i = np.where(self.temperatures == temperature)[0][0]
nuscatt = nuscatter.get_xs(nuclides=nuclide,
xs_type=xs_type, moment=0,
subdomains=subdomain)
if isinstance(nuscatter, openmc.mgxs.MultiplicityMatrixXS):
self._multiplicity_matrix[i] = nuscatt
else:
scatt = scatter.get_xs(nuclides=nuclide,
xs_type=xs_type, moment=0,
subdomains=subdomain)
if scatter.scatter_format == 'histogram':
scatt = np.sum(scatt, axis=0)
if nuscatter.scatter_format == 'histogram':
nuscatt = np.sum(nuscatt, axis=0)
self._multiplicity_matrix[i] = np.divide(nuscatt, scatt)
self._multiplicity_matrix[i] = \
np.nan_to_num(self._multiplicity_matrix[i])
def set_inverse_velocity_mgxs(self, inverse_velocity, temperature=294.,
nuclide='total', xs_type='macro',
subdomain=None):
"""This method allows for an openmc.mgxs.InverseVelocity
to be used to set the inverse velocity for this XSdata object.
Parameters
----------
inverse_velocity : openmc.mgxs.InverseVelocity
MGXS object containing the inverse velocity for the domain of
interest.
temperature : float
Temperature (in Kelvin) of the data. Defaults to room temperature
(294K).
nuclide : str
Individual nuclide (or 'total' if obtaining material-wise data)
to gather data for. Defaults to 'total'.
xs_type: {'macro', 'micro'}
Provide the macro or micro cross section in units of cm^-1 or
barns. Defaults to 'macro'.
subdomain : iterable of int
If the MGXS contains a mesh domain type, the subdomain parameter
specifies which mesh cell (i.e., [i, j, k] index) to use.
See also
--------
openmc.mgxs.Library.create_mg_library()
openmc.mgxs.Library.get_xsdata()
"""
check_type('inverse_velocity', inverse_velocity,
openmc.mgxs.InverseVelocity)
check_value('energy_groups', inverse_velocity.energy_groups,
[self.energy_groups])
check_value('domain_type', inverse_velocity.domain_type,
openmc.mgxs.DOMAIN_TYPES)
check_type('temperature', temperature, Real)
check_value('temperature', temperature, self.temperatures)
i = np.where(self.temperatures == temperature)[0][0]
self._inverse_velocity[i] = inverse_velocity.get_xs(
nuclides=nuclide, xs_type=xs_type, subdomains=subdomain)
def convert_representation(self, target_representation, num_polar=None,
num_azimuthal=None):
"""Produce a new XSdata object with the same data, but converted to the
new representation (isotropic or angle-dependent).
This method cannot be used to change the number of polar or
azimuthal bins of an XSdata object that already uses an angular
representation. Finally, this method simply uses an arithmetic mean to
convert from an angular to isotropic representation; no flux-weighting
is applied and therefore reaction rates will not be preserved.
Parameters
----------
target_representation : {'isotropic', 'angle'}
Representation of the MGXS (isotropic or angle-dependent flux
weighting).
num_polar : int, optional
Number of equal width angular bins that the polar angular domain is
subdivided into. This is required when `target_representation` is
"angle".
num_azimuthal : int, optional
Number of equal width angular bins that the azimuthal angular domain
is subdivided into. This is required when `target_representation` is
"angle".
Returns
-------
openmc.XSdata
Multi-group cross section data with the same data as self, but
represented as specified in `target_representation`.
"""
check_value('target_representation', target_representation,
_REPRESENTATIONS)
if target_representation == 'angle':
check_type('num_polar', num_polar, Integral)
check_type('num_azimuthal', num_azimuthal, Integral)
check_greater_than('num_polar', num_polar, 0)
check_greater_than('num_azimuthal', num_azimuthal, 0)
xsdata = copy.deepcopy(self)
# First handle the case where the current and requested
# representations are the same
if target_representation == self.representation:
# Check to make sure the num_polar and num_azimuthal values match
if target_representation == 'angle':
if num_polar != self.num_polar or num_azimuthal != self.num_azimuthal:
raise ValueError("Cannot translate between `angle`"
" representations with different angle"
" bin structures")
# Nothing to do as the same structure was requested
return xsdata
xsdata.representation = target_representation
# We have different actions depending on the representation conversion
if target_representation == 'isotropic':
# This is not needed for the correct functionality, but these
# values are changed back to None for clarity
xsdata._num_polar = None
xsdata._num_azimuthal = None
elif target_representation == 'angle':
xsdata.num_polar = num_polar
xsdata.num_azimuthal = num_azimuthal
# Reset xs_shapes so it is recalculated the next time it is needed
xsdata._xs_shapes = None
for i, temp in enumerate(xsdata.temperatures):
for xs in ['total', 'absorption', 'fission', 'nu_fission',
'scatter_matrix', 'multiplicity_matrix',
'prompt_nu_fission', 'delayed_nu_fission',
'kappa_fission', 'chi', 'chi_prompt', 'chi_delayed',
'beta', 'decay_rate', 'inverse_velocity']:
# Get the original data
orig_data = getattr(self, '_' + xs)[i]
if orig_data is not None:
if target_representation == 'isotropic':
# Since we are going from angle to isotropic, the
# current data is just the average over the angle bins
new_data = orig_data.mean(axis=(0, 1))
elif target_representation == 'angle':
# Since we are going from isotropic to angle, the
# current data is just copied for every angle bin
new_shape = (num_polar, num_azimuthal) + \
orig_data.shape
new_data = np.resize(orig_data, new_shape)
setter = getattr(xsdata, 'set_' + xs)
setter(new_data, temp)
return xsdata
def convert_scatter_format(self, target_format, target_order=None):
"""Produce a new MGXSLibrary object with the same data, but converted
to the new scatter format and order
Parameters
----------
target_format : {'tabular', 'legendre', 'histogram'}
Representation of the scattering angle distribution
target_order : int
Either the Legendre target_order, number of bins, or number of
points used to describe the angular distribution associated with
each group-to-group transfer probability
Returns
-------
openmc.XSdata
Multi-group cross section data with the same data as in self, but
represented as specified in `target_format`.
"""
check_value('target_format', target_format, _SCATTER_TYPES)
check_type('target_order', target_order, Integral)
if target_format == 'legendre':
check_greater_than('target_order', target_order, 0, equality=True)
else:
check_greater_than('target_order', target_order, 0)
xsdata = copy.deepcopy(self)
xsdata.scatter_format = target_format
xsdata.order = target_order
# Reset and re-generate XSdata.xs_shapes with the new scattering format
xsdata._xs_shapes = None
for i, temp in enumerate(xsdata.temperatures):
orig_data = self._scatter_matrix[i]
new_shape = orig_data.shape[:-1] + (xsdata.num_orders,)
new_data = np.zeros(new_shape)
if self.scatter_format == 'legendre':
if target_format == 'legendre':
# Then we are changing orders and only need to change
# dimensionality of the mu data and pad/truncate as needed
order = min(xsdata.num_orders, self.num_orders)
new_data[..., :order] = orig_data[..., :order]
elif target_format == 'tabular':
mu = np.linspace(-1, 1, xsdata.num_orders)
# Evaluate the legendre on the mu grid
for imu in range(len(mu)):
for l in range(self.num_orders):
new_data[..., imu] += (
(l + 0.5) * eval_legendre(l, mu[imu]) *
orig_data[..., l])
elif target_format == 'histogram':
# This code uses the vectorized integration capabilities
# instead of having an isotropic and angle representation
# path.
# Set the histogram mu grid
mu = np.linspace(-1, 1, xsdata.num_orders + 1)
# For every bin perform simpson integration of a finely
# sampled orig_data
for h_bin in range(xsdata.num_orders):
mu_fine = np.linspace(mu[h_bin], mu[h_bin + 1], _NMU)
table_fine = np.zeros(new_data.shape[:-1] + (_NMU,))
for imu in range(len(mu_fine)):
for l in range(self.num_orders):
table_fine[..., imu] += ((l + 0.5)
* eval_legendre(l, mu_fine[imu]) *
orig_data[..., l])
new_data[..., h_bin] = simps(table_fine, mu_fine)
elif self.scatter_format == 'tabular':
# Calculate the mu points of the current data
mu_self = np.linspace(-1, 1, self.num_orders)
if target_format == 'legendre':
# Find the Legendre coefficients via integration. To best
# use the vectorized integration capabilities of scipy,
# this is done with fixed sample integration routines.
mu_fine = np.linspace(-1, 1, _NMU)
y = [interp1d(mu_self, orig_data)(mu_fine) *
eval_legendre(l, mu_fine)
for l in range(xsdata.num_orders)]
for l in range(xsdata.num_orders):
new_data[..., l] = simps(y[l], mu_fine)
elif target_format == 'tabular':
# Simply use an | |
<gh_stars>1-10
""" Class description goes here. """
import logging
import time
import uuid
import datetime
from dataclay.communication.grpc.messages.common.common_messages_pb2 import LANG_PYTHON
from dataclay.exceptions.exceptions import DataClayException
from dataclay.commonruntime.DataClayRuntime import DataClayRuntime
from dataclay.commonruntime.RuntimeType import RuntimeType
from dataclay.commonruntime.Settings import settings
from dataclay.serialization.lib.SerializationLibUtils import SerializationLibUtilsSingleton
from dataclay.heap.ExecutionEnvironmentHeapManager import ExecutionEnvironmentHeapManager
from dataclay.loader.ExecutionObjectLoader import ExecutionObjectLoader
from dataclay.commonruntime.Runtime import threadLocal
from dataclay.util import Configuration
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = '2015 Barcelona Supercomputing Center (BSC-CNS)'
from dataclay.util.management.metadataservice.RegistrationInfo import RegistrationInfo
logger = logging.getLogger(__name__)
current_milli_time = lambda: int(round(time.time() * 1000))
class ExecutionEnvironmentRuntime(DataClayRuntime):
def __init__(self, theexec_env):
DataClayRuntime.__init__(self)
self.current_type = RuntimeType.exe_env
""" Execution Environment using this runtime. """
self.execution_environment = theexec_env
"""
References hold by sessions. Resource note: Maximum size of this map is maximum number of objects allowed in EE x sessions.
Also, important to think what happens if one single session is associated to two client threads? use case?
should we allow that?
Must be thread-safe.
"""
self.references_hold_by_sessions = dict()
"""
Sessions in quarantine. note: maximum size of this map is max number of sessions per EE: This map is needed to solve a race
condition in Global Garbage collection (@see getReferenceCounting).
"""
self.quarantine_sessions = set()
"""
Per each session, it's expiration date. This is used to control 'retained' objects from sessions in Garbage collection.
Must be thread-safe.
"""
self.session_expires_dates = dict()
def initialize_runtime_aux(self):
self.dataclay_heap_manager = ExecutionEnvironmentHeapManager(self)
self.dataclay_object_loader = ExecutionObjectLoader(self)
def is_exec_env(self):
return True
def get_or_new_instance_from_db(self, object_id, retry):
"""
@postcondition: Get object from memory or database and WAIT in case we are still waiting for it to be persisted.
@param object_id: ID of object to get or create
@param retry: indicates if we should retry and wait
"""
return self.dataclay_object_loader.get_or_new_instance_from_db(object_id, retry)
def load_object_from_db(self, instance, retry):
"""
@postcondition: Load DataClayObject from Database
@param instance: DataClayObject instance to fill
@param retry: Indicates retry loading in case it is not in db.
"""
return self.dataclay_object_loader.load_object_from_db(instance, retry)
def get_hint(self):
"""
@postcondition: Get hint of the current EE
@return Hint of current EE
"""
return settings.environment_id
def flush_all(self):
"""
@postcondition: Flush all objects in memory to disk.
"""
self.dataclay_heap_manager.flush_all()
def get_session_id(self):
"""
@postcondition: Get Session ID associated to current thread
@return: Session ID associated to current thread
"""
if hasattr(threadLocal, "session_id"):
return threadLocal.session_id
else:
return None
def get_execution_environment(self):
"""
@return: Return execution environment using this runtime
"""
return self.execution_environment
def store_object(self, instance):
if not instance.is_persistent():
raise RuntimeError("StoreObject should only be called on Persistent Objects. "
"Ensure to call make_persistent first")
self.internal_store(instance, make_persistent=False)
def make_persistent(self, instance, alias, backend_id, recursive):
""" This method creates a new Persistent Object using the provided stub
instance and, if indicated, all its associated objects also Logic module API used for communication
This function is called from a stub/execution class
:param instance: Instance to make persistent
:param backend_id: Indicates which is the destination backend
:param recursive: Indicates if make persistent is recursive
:param alias: Alias for the object
:returns: ID of the backend in which te object was persisted.
:type instance: DataClayExecutionObject
:type backend_id: DataClayID
:type recursive: boolean
:type alias: string
:rtype: DataClayID
:raises RuntimeError: if backend id is UNDEFINED_LOCAL.
"""
self.logger.debug("Starting make persistent object for instance %s with id %s", instance,
instance.get_object_id())
location = instance.get_hint()
if location is None:
location = backend_id
# Choose location if needed
# If object is already persistent -> it must have a Hint (location = hint here)
# If object is not persistent -> location is choosen (provided backend id or random, hash...).
if location is None:
location = self.choose_location(instance, alias)
if alias is not None:
# Add a new alias to an object.
# We call 'addAlias' with registration information in case we need to register it.
# Use cases:
# 1 - object was persisted without alias and not yet registered -> we need to register it with new alias.
# 2 - object was persisted and it is already registered -> we only add a new alias
# 3 - object was persisted with an alias and it must be already registered -> we add a new alias.
if instance.is_pending_to_register():
# Use case 1
reg_infos = list()
reg_info = RegistrationInfo(instance.get_object_id(), instance.get_class_extradata().class_id,
self.get_session_id(), instance.get_dataset_id(), alias)
reg_infos.append(reg_info)
# TODO: Review if we use hint of the object or the hint of the runtime.
new_object_ids = self.ready_clients["@LM"].register_objects(reg_infos, instance.get_hint(), LANG_PYTHON)
new_object_id = next(iter(new_object_ids))
self.update_object_id(instance, new_object_id)
else:
# Use case 2 and 3 - add new alias
instance.set_alias(alias)
return instance.get_location()
def execute_implementation_aux(self, operation_name, instance, parameters, exeenv_id=None):
object_id = instance.get_object_id()
logger.debug("Calling execute_implementation inside EE for operation %s and object id %s", operation_name, object_id)
# # ============================== PARAMS/RETURNS ========================== //
# # Check if object is being deserialized (params/returns)
under_deserialization = self.check_and_fill_volatile_under_deserialization(instance, None)
if under_deserialization:
logger.debug("Object %s is a volatile under deserialization, executing", object_id)
return self.execution_environment.internal_exec_impl(operation_name, instance, parameters)
# // === HINT === //
thisExecEnv = settings.environment_id
using_hint = False
if exeenv_id is None:
if instance.get_hint() is not None:
exeenv_id = instance.get_hint()
using_hint = True
logger.debug("Using hint %s for object id %s", exeenv_id, object_id)
else:
logger.debug("Asking for EE of object with id %s", object_id)
exeenv_id = next(iter(self.get_metadata(object_id).locations))
if exeenv_id == thisExecEnv:
logger.debug("Object execution is local")
# Note that fat_instance tend to be the same as instance...
# *except* if it is a proxy
try:
fat_instance = self.execution_environment.get_local_instance(object_id)
# get_local_instance should indeed modify the same instance instance,
# so @abarcelo is leaving the assert just in case
assert instance is fat_instance, \
"A tiny mess with get_local_instance and heap management, check that"
return self.execution_environment.internal_exec_impl(operation_name, fat_instance, parameters)
except Exception as e:
return self.execution_environment.internal_exec_impl(operation_name, instance, parameters)
else:
logger.debug("Object execution is not local")
object_id = instance.get_object_id()
return self.call_execute_to_ds(instance, parameters, operation_name, exeenv_id, using_hint)
#########################################
# Helper functions, not commonruntime methods #
#########################################
def internal_store(self, instance, make_persistent=True):
"""Perform the storage (StoreObject call) for an instance.
:param instance: The DataClayObject willing to be stored.
:param make_persistent: Flag, True when DS_STORE_OBJECT should be called
and False when DS_UPSERT_OBJECT is the method to be called.
:return: A dictionary containing the classes for all stored objects.
This function works for two main scenarios: the makePersistent one (in
which the instance is not yet persistent) and the update (in which the
instance is persistent).
The return dictionary is the same in both cases, but note that the update
should not use the provided instance for updating metadata to the LM.
"""
client = self.ready_clients["@STORAGE"]
pending_objs = [instance]
stored_objects_classes = dict()
serialized_objs = list()
reg_infos = list()
dataset_id = self.execution_environment.thread_local_info.dataset_id
while pending_objs:
current_obj = pending_objs.pop()
# Lock and make sure it is loaded
current_obj_id = current_obj.get_object_id()
self.lock(current_obj_id) # Avoid GC clean object while storing it
try:
if not current_obj.is_loaded():
current_obj = self.get_or_new_instance_from_db(current_obj_id, False)
dcc_extradata = current_obj.get_class_extradata()
object_id = current_obj.get_object_id()
if make_persistent:
# Ignore already persistent objects
if current_obj.is_persistent():
continue
dcc_extradata = current_obj.get_class_extradata()
infos = RegistrationInfo(object_id, dcc_extradata.class_id,
self.execution_environment.thread_local_info.session_id, dataset_id, None)
reg_infos.append(infos)
# This object will soon be persistent
current_obj.set_persistent(True)
current_obj.set_hint(settings.environment_id)
# Just in case (should have been loaded already)
logger.debug("Setting loaded to true from internal store for object %s" % str(object_id))
current_obj.set_loaded(True)
# First store since others OIDs are recursively created while creating MetaData
if not object_id:
if not make_persistent:
raise DataClayException("Objects should never be uuid-less for non-make_persistent use cases")
object_id = uuid.uuid4()
current_obj.set_object_id(object_id)
current_obj.set_dataset_id(self.execution_environment.thread_local_info.dataset_id)
logger.debug("Ready to make persistent object {%s} of class %s {%s}" %
(object_id, dcc_extradata.classname, dcc_extradata.class_id))
stored_objects_classes[object_id] = dcc_extradata.class_id
# If we are not in a make_persistent, the dataset_id hint is null (?)
serialized_objs.append(SerializationLibUtilsSingleton.serialize_dcobj_with_data(
current_obj, pending_objs, False, None, self, False))
finally:
self.unlock(current_obj_id)
if make_persistent:
lm_client = self.ready_clients["@LM"]
lm_client.register_objects(reg_infos, settings.environment_id, LANG_PYTHON)
client.ds_store_objects(self.execution_environment.thread_local_info.session_id, serialized_objs, False, None)
else:
client.ds_upsert_objects(self.execution_environment.thread_local_info.session_id, serialized_objs)
def get_operation_info(self, object_id, operation_name):
dcc_extradata = self.get_object_by_id(object_id).get_class_extradata()
metaclass_container = dcc_extradata.metaclass_container
operation = metaclass_container.get_operation_from_name(operation_name)
return operation
def get_implementation_id(self, object_id, operation_name, implementation_idx=0):
operation = self.get_operation_info(object_id, operation_name)
return operation.implementations[0].dataClayID
def check_and_fill_volatile_under_deserialization(self, volatile_obj, ifacebitmaps):
""" | |
= list(np.copy(x1))
resY = list(np.copy(y1))
# print('resX = {}, resY = {}'.format(resX, resY))
# print('x2 = {}, y2 = {}'.format(x2, y2))
for i in range(len(x2)):
flag = False
for j in range(len(resX)):
if (floatEqual(x2[i], resX[j])):
resY[j] += y2[i]
flag = True
if (not flag):
resX.append(x2[i])
resY.append(y2[i])
resX, resY = sortListBy(resX, resY)
return np.array([resX, resY])
def combine2DHistogram(a1, a2):
if (a1.shape == a2.shape):
return a1 + a2
h1, w1 = a1.shape
h2, w2 = a2.shape
maxH = np.max([h1, h2])
maxW = np.max([w1, w2])
res = np.zeros((maxH, maxW))
res[:h1, :w1] += a1
res[:h2, :w2] += a2
return res
def log2DHistogram(a, lims = None):
if (lims is None):
aa = np.zeros(a.shape)
else:
aShape = a.shape
limX = max([lims, aShape[0]])
limY = max([lims, aShape[1]])
aa = np.zeros((limX, limY))
for i in range(len(a)):
for j in range(len(a[0])):
if (a[i][j] > 0):
aa[i][j] = np.log(a[i][j])
return aa
def combine2DHistogramList(hList):
# for h in hList:
# print(h.shape)
res = hList[0]
for i in range(1, len(hList)):
res = combine2DHistogram(res, hList[i])
return res
def combineHistogramList(hList):
res = hList[0]
for i in range(1, len(hList)):
res = combineHistogram(res, hList[i])
histList = []
for i in range(len(res[0])):
histList.append([])
for hL in hList:
for j in range(len(res[0])):
flag = False
for i in range(len(hL[0])):
if (floatEqual(hL[0][i], res[0][j])):
histList[j].append(hL[1][i])
flag = True
if (not flag):
histList[j].append(0)
for i in range(len(histList)):
histList[i] = np.array(histList[i])
# print(histList)
# print(hList)
return res, histList
def setErrorBar(x):
return np.std(x) / np.sqrt(len(x) - 1)
def getkbr(x, y):
axx = np.average(x ** 2)
ayy = np.average(y ** 2)
axy = np.average(x * y)
ax = np.average(x)
ay = np.average(y)
k = (axy - ax * ay) / (axx - ax ** 2)
b = ay - ax * k
r = (axy - ax * ay) / np.sqrt((axx - ax ** 2) * (ayy - ay * ay))
return k, b, r
def analyzeHistogram(x, y, label):
n = len(x) // 2
xFrag = np.array(x[n:])
yFrag = np.log(np.array(y[n:]))
# xFrag = np.log(np.array(x[1:n]))
# # yFrag = np.log(np.array(y[1:n]))
# yFrag = np.array(y[1:n])
k, b, r = getkbr(xFrag, yFrag)
print('{}: k = {}, b = {}, r = {}'.format(label, k, b, r))
return k
def maximumGroup(groups, excep = None):
maxLength = 0
res = []
maxKey = None
for key in groups:
if (key == excep):
continue
if (len(groups[key]) > maxLength):
maxLength = len(groups[key])
res = groups[key]
maxKey = key
return maxKey, res
def dealCheckerBoard(x):
resX = []
resY = []
for i in range(len(x)):
if (i % 2 == 1):
resX.append(i)
resY.append(x[i])
return {'x': resX, 'y': resY}
def getFitting(data):
x = np.array(data['x'])
y = np.array(data['y'])
if (len(x) < 8):
print('data not enough for fitting, stop.')
return None
print('data length = {}'.format(len(x)))
# startIdx = int(len(x) * 0.25)
# endIdx = int(len(x) * 0.75)
startIdx = 2
endIdx = 8
xFit = np.log(x[startIdx : endIdx])
yFit = y[startIdx : endIdx]
k, b, r = getkbr(xFit, yFit)
return {'k': k, 'b': b, 'r': r}
# return
def isTimeObsName(dataName):
return (dataName.find('time') != -1)
def getList(x):
if (isinstance(x, int) or isinstance(x, float)):
return [x]
else:
return x
def getSingle(x):
if (isinstance(x, int) or isinstance(x, float)):
return x
else:
return x[0]
def makeFitting(x, y):
# print('x = {}, y = {}'.format(x, y))
x = np.array(x)
y = np.array(y)
axx = np.average(x * x)
axy = np.average(x * y)
ayy = np.average(y * y)
ax = np.average(x)
ay = np.average(y)
k = (axy - ax * ay) / (axx - ax * ax)
b = ay - k * ax
r = (axy - ax * ay) / np.sqrt((axx - ax * ax) * (ayy - ay * ay))
return k, b, r
def keyCombine(key1, val1, key2, val2, funcs):
key1, val1 = sortListBy(key1, val1)
key2, val2 = sortListBy(key2, val2)
cur1 = 0
cur2 = 0
resKey = []
resVal = []
while (cur1 < len(key1)) and (cur2 < len(key2)):
if (floatEqual(key1[cur1], key2[cur2])):
resKey.append(key1[cur1])
resVal.append(funcs(val1[cur1], val2[cur2]))
cur1 += 1
cur2 += 1
continue
if (key1[cur1] < key2[cur2]):
cur1 += 1
else:
cur2 += 1
return resKey, resVal
def getTotalSize(sizeTuple):
res = 1
for x in sizeTuple:
res *= x
return res
def decodeSnapshot(snapshot, totalBondN, snapshotBit):
res = []
bondCount = 0
while (bondCount < totalBondN):
for x in snapshot:
for bit in range(snapshotBit):
resBit = (x >> bit) & 1
if (resBit == 1):
res.append(True)
else:
res.append(False)
bondCount += 1
if (bondCount >= totalBondN):
return res
return res
def decode2DDimer(idx, h, w):
direct = idx // (h * w)
x = (idx - direct * h * w) // w
y = idx % w
return (direct, x, y)
def decode2DCorr(x, L):
# xx = (x - (-0.0625)) / 0.25
xx = x
return np.reshape(xx[:(L * L)], (L, L)), np.reshape(xx[(L * L):], (L, L))
def zipLists(*args):
assert (len(args) > 0), "Error: length of args must be positive for zipLists(*args)."
if (len(args) == 1):
return [(x, ) for x in args[0]]
else:
partRes = zipLists(*args[1:])
res = []
for x in args[0]:
for y in partRes:
res.append((x, *y))
return res
def generateList(x, type):
if (isinstance(x, type)):
return [x]
else:
return x
def adjustAxixRange(ax, xRange = 1.0, yRange = 1.0):
pos = ax.get_position()
x0, x1, y0, y1 = pos.x0, pos.x1, pos.y0, pos.y1
newX1 = x0 + (x1 - x0) * xRange
newY1 = y0 + (y1 - y0) * yRange
ax.set_position(mplTransforms.Bbox([[x0, y0], [newX1, newY1]]))
def setNewAxisAtRight(fig, ax, xRange = 0.90, yScale = 0.5):
pos = ax.get_position()
x0, x1, y0, y1 = pos.x0, pos.x1, pos.y0, pos.y1
# print(x0, x1, y0, y1)
newX1 = x0 + (x1 - x0) * xRange
# return fig.add_axes([newX1, y0, x1 - newX1, y1 - y0])
# return fig.add_axes([newX1, x1 - newX1, y0, y1 - y0])
newAx = fig.add_axes([newX1, y0, x1 - newX1, y1 - y0])
# newAx.set_position(mplTransforms.Bbox([[x0, y0], [newX1, y1]]))
return newAx
def addColorBar(ax, fig, im, adjustRange = 0.85, colorBarRange = 0.90):
cax = setNewAxisAtRight(fig, ax, xRange = colorBarRange)
adjustAxixRange(ax, xRange = adjustRange)
fig.colorbar(im, cax = cax)
def normalizeArray(x, errorBar = None):
xArray = np.array(x)
lowV = np.min(xArray)
highV = np.max(xArray)
resX = ((xArray - lowV) / (highV - lowV)) * 2.0 - 1.0
if (errorBar is not None):
errorBar = np.array(errorBar) * 2.0 / (highV - lowV)
return resX, errorBar
else:
return resX
def floorInt(x, eps = 1e-8):
return math.floor(x + eps)
def flipAppend(x):
return np.array(list(x) + list(-x))
def resortMVariables(x):
return np.array([x[1], -x[0], x[3], -x[2]])
def getM0011(x):
return x[0] - x[1] - x[2] + x[3], x[0] + x[1] - x[2] - x[3]
def makeHist(lim, bins, x):
data, _, _ = np.histogram2d(x = x[:, 0], y = x[:, 1], bins = bins, range = lim)
return {'lim0': np.array([lim[0][0], lim[1][0]]), 'lim1': np.array([lim[0][1], lim[1][1]]), 'steps': np.array(bins), 'data': data.flatten()}
# return {'lim0': self.data[dataName + ' bins begin'], 'lim1': self.data[dataName + ' bins end'], 'steps': self.data[dataName + ' bins'], 'data': self.data[dataName + ' bins data']}
def weightedBinder(x, weights, dim = 2):
xArray = np.array(x)
weightArray = np.array(weights)
x2 = np.sum((xArray ** 2) * weightArray)
x4 = np.sum((xArray ** 4) * weightArray)
if (dim == 0):
return x4 / (x2 ** 2)
else:
return 1.0 - (x4 / (x2 * x2 * dim))
def binderPreparation(psiX, psiY):
# first consider theta series
# theta = np.arctan2(psiX, psiY)
# cos2Theta = np.cos(2 * theta)
# cos(2theta) = cos^2(theta) - sin^2(theta) = (psiX^2 - psiY^2) / |psi|^2
cos2Theta = ((psiX ** 2) - (psiY ** 2)) / (psiX ** 2 + psiY ** 2)
atanhCos2Theta = np.arctanh(cos2Theta)
atanAtanhCos2Theta = (2.0 / np.pi) * np.arctan((2.0 / np.pi) * atanhCos2Theta)
# print(psiX, psiY, atanhCos2Theta, atanAtanhCos2Theta)
return atanhCos2Theta, atanAtanhCos2Theta
def floatAllTheSame(l):
if len(l) == 0:
return True
v = l[0]
for vv in l:
if not floatEqual(v, vv):
return False
return True
def anyFilter(filters):
def filter(x):
for f in filters:
if f(x):
return True
return False
return filter
def allFilter(filters):
def filter(x):
for f in filters:
if not f(x):
return False
return True
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.