input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
= 1
start_found = False
for line_num, line in enumerate(csv_reader):
line.remove("")
# Check if data points are continuous and sequential
if not line_num == 0:
try:
if not int(line[self.data_point_col]) == (int(prev_line[self.data_point_col]) + 1):
raise ValueError
except:
print("line = ", line)
messagebox.showerror("Data Point Error",
"".join((print_file_name + "Error near data point " + str(prev_line[self.data_point_col]) +
". Data point number is not sequential with regrad to previous data point.")))
return False
prev_datetime = niq_misc.get_datetime(self, prev_line)
cur_datetime = niq_misc.get_datetime(self, line)
datetime_diff = (cur_datetime - prev_datetime).seconds / 60
if datetime_diff == 0 or datetime_diff == self.time_interval:
start_found = True
if datetime_valid and start_found:
if cur_datetime == False:
return False
if datetime_diff != self.time_interval:
if not interval_clock > 0:
datetime_valid = False
else:
if datetime_diff == 0:
interval_time += 1
elif datetime_diff != 1:
datetime_valid = False
else:
if interval_time == interval_clock:
interval_time = 1
else:
datetime_valid = False
if not datetime_valid:
if self.show_warns_BV.get():
messagebox.showwarning("Date/time Warning",
"".join((print_file_name + "Discontinuous date/time found for data point " + line[self.data_point_col] +
". The program will continue, but this could cause inacurate statistical output.")))
if line[self.egg_temper_col] == "":
if self.show_warns_BV.get():
messagebox.showwarning("Egg Temperature Warning",
"".join((print_file_name + "No egg temperature detected for data point " + line[self.data_point_col] +
". If left, the program will populate this cell with the temperature above it.")))
else:
try: float(line[self.egg_temper_col])
except:
messagebox.showerror("Temperature Error",
"".join((print_file_name + "Invalid temperature given for data point " + line[self.data_point_col] + ".")))
return False
if self.air_valid:
try:
if line[self.air_temper_col] == "":
self.air_valid = False
if self.show_warns_BV.get():
messagebox.showwarning("Air Temperature Warning",
"".join((print_file_name + "No air temperature detected for data point " + line[self.data_point_col] +
". Air temperatures will not be plotted or included in statistical output.")))
else:
try: float(line[self.air_temper_col])
except:
self.air_valid = False
if self.show_warns_BV.get():
messagebox.showwarning("Air Temperature Warning",
"".join((print_file_name + "Invalid air temperature detected for data point " + line[self.data_point_col] +
". Air temperatures will not be plotted or included in statistical output.")))
except IndexError:
self.air_valid = False
prev_line = line
return True
# flag
except Exception as e:
print(e)
print_file_name = ("File: " + os.path.basename(os.path.normpath(in_file)) + " \n\n")
messagebox.showerror("Unknown Error",
"".join((print_file_name + "There was an unidentifiable error with the provided input file. " +
"This is sometimes the result of \"extra\" cells in the input file.\n\n" +
"Please reference the NestIQ manual for details regarding proper input file format." +
" This can be accessed by clicking \"Help\" in the top right.")))
return False
def check_input_file(self):
"""
Checks several aspects of the input file to ensure it is compatable with all downstream processing.
Also displays warnings for less severe format violations.
"""
in_file = self.input_file_E.get().lstrip("{").rstrip("}")
datetime_valid = True
print_file_name = ("For file: " + os.path.normpath(in_file) + "\n\n")
if in_file == "":
messagebox.showerror("Input error (Main tab)", "No input file provided.")
return False
try:
if not os.path.exists(in_file):
# Check for curly brace addition (sometimes done for paths with spaces)
if in_file[0] == "{":
if os.path.exists(in_file[1:(len(in_file) - 1)]):
replace_entry(self.input_file_E, (in_file[1:(len(in_file) - 1)]))
in_file = self.input_file_E.get()
else: raise ValueError
else: raise ValueError
except ValueError:
messagebox.showerror("Input File Error",
"".join((print_file_name, "File with provided path could not be found.")))
return False
if in_file[-4:] != ".csv":
messagebox.showerror("Input File Error",
"".join((print_file_name, "Input file must end in \".csv\" (comma separated value file format).")))
return False
# Flag
try:
with open(in_file, "r") as csv_file:
csv_lines = csv_file.readlines()
master_list = [line.strip().rstrip(",").split(",") for line in csv_lines]
pop_indices = []
# Remove lines not conforming to expected format (such as headers)
for i, cur_line in enumerate(master_list[:-1]):
# print("line = ", master_list[i])
if any((
re.search("\D", master_list[i][self.data_point_col]),
not re.search("\d", master_list[i][self.data_point_col])
)):
pop_indices.append(i)
for pop_count, index in enumerate(pop_indices):
master_list.pop(index - pop_count)
master_list.pop(len(master_list) - 1)
prev_line = master_list[0]
if not (self.get_data_time_interval(niq_misc.list_to_gen(master_list[1:]), prev_line)):
return False
if len(prev_line) < 3:
self.air_valid = False
if not niq_misc.get_datetime(self, prev_line):
return False
interval_clock = 0 if self.time_interval >= 1 else round(1 / self.time_interval)
interval_time = 1
start_found = False
for line_num, line in enumerate(master_list[1:]):
line = line[:4] if self.air_valid else line[:3]
# Check if data points are continuous and sequential
try:
if not int(line[self.data_point_col]) == (int(prev_line[self.data_point_col]) + 1):
raise ValueError
except:
messagebox.showerror("Data Point Error",
"".join((print_file_name + "Error after data point " + str(prev_line[self.data_point_col]) +
". Data point number is not sequential with regard to previous data point.")))
return False
prev_datetime = niq_misc.get_datetime(self, prev_line)
cur_datetime = niq_misc.get_datetime(self, line)
datetime_diff = (cur_datetime - prev_datetime).seconds / 60
if datetime_diff == 0 or datetime_diff == self.time_interval:
start_found = True
if datetime_valid and start_found:
if cur_datetime == False:
return False
if datetime_diff != self.time_interval:
if not interval_clock > 0:
datetime_valid = False
else:
if datetime_diff == 0:
interval_time += 1
elif datetime_diff != 1:
datetime_valid = False
else:
if interval_time == interval_clock:
interval_time = 1
else:
datetime_valid = False
if not datetime_valid:
if self.show_warns_BV.get():
messagebox.showwarning("Date/time Warning",
"".join((print_file_name + "Discontinuous date/time found for data point " + line[self.data_point_col] +
". The program will continue, but this could cause inaccurate statistical output.")))
try:
float(line[self.egg_temper_col])
if line[self.egg_temper_col] == "":
raise ValueError
except:
messagebox.showerror("Temperature Error",
"".join((print_file_name + "Invalid temperature given for data point " + line[self.data_point_col] + ".")))
return False
if self.air_valid:
try:
if line[self.air_temper_col] == "":
self.air_valid = False
if self.show_warns_BV.get():
messagebox.showwarning("Air Temperature Warning",
"".join((print_file_name + "No air temperature detected for data point " + line[self.data_point_col] +
". Air temperatures will not be plotted or included in statistical output.")))
else:
try: float(line[self.air_temper_col])
except:
self.air_valid = False
if self.show_warns_BV.get():
messagebox.showwarning("Air Temperature Warning",
"".join((print_file_name + "Invalid air temperature detected for data point " + line[self.data_point_col] +
". Air temperatures will not be plotted or included in statistical output.")))
except IndexError:
self.air_valid = False
prev_line = line
return True
# flag
except Exception as e:
print(e)
print_file_name = ("File: " + os.path.basename(os.path.normpath(in_file)) + " \n\n")
messagebox.showerror("Unknown Error",
"".join((print_file_name + "There was an unidentifiable error with the provided input file. " +
"This is sometimes the result of \"extra\" cells in the input file.\n\n" +
"Please reference the NestIQ manual for details regarding proper input file format." +
" This can be accessed by clicking \"Help\" in the top right.")))
return False
def check_out_file(gui, entry, title):
"""
Checks if the name provided for a given output file is valid. This includes asking the user if
they want to override if a file with the same name already exists.
Args:
entry (tk.Entry): entry box being examined
title (string): how to reference the current entry box if error messeage is triggered
"""
if entry.get().strip() == "":
messagebox.showerror((title + " Error"), "File name is empty.")
return False
if entry.get()[-1] == "/" or entry.get()[-1] == "\\":
messagebox.showerror((title + " Error"), "Directory provided but no file name.")
return False
# Check if plot file already exists and if so, ask to override
if entry == gui.plot_file_E:
if not re.search("(.html$)", gui.plot_file_E.get()):
new_name = (gui.plot_file_E.get() + ".html")
replace_entry(gui.plot_file_E, new_name)
if os.path.exists(entry.get()):
if messagebox.askyesno("Override?", ("The file \"" + entry.get() + "\" already exists. Do you want to override?")):
try:
os.remove(entry.get())
except:
os.remove(entry.get() + ".html")
else:
return False
# Check if output or compile statistics file already exists and if so, ask to override
if entry == gui.stats_file_E or entry == gui.multi_in_stats_file_E:
if not re.search("(.csv$)", gui.stats_file_E.get()):
new_name = (gui.stats_file_E.get() + ".csv")
replace_entry(gui.stats_file_E, new_name)
if not re.search("(.csv$)", gui.multi_in_stats_file_E.get()):
new_name = (gui.multi_in_stats_file_E.get() + ".csv")
replace_entry(gui.multi_in_stats_file_E, new_name)
if os.path.exists(entry.get()) or os.path.exists(entry.get() + ".csv"):
if not messagebox.askyesno("Override?", ("The file \"" + entry.get() + "\" already exists. Do you want to override?")):
return False
try:
os.remove(entry.get())
except:
try:
os.remove(entry.get() + ".csv")
except:
messagebox.showerror(("Override Error"), ("Could not override file. Please close \"" + entry.get() + "\" if open and try again."))
return False
if not os.path.exists(entry.get()):
try:
with open(entry.get(), "a+") as _:
pass
except:
messagebox.showerror((title + " Error"), "Invalid directory/file name.")
return False
os.remove(entry.get())
return True
def check_time(time, DN):
"""
Checks if times provided for daytime start and nighttime start are valid.
Args:
time (string): string provided in the entry box
DN (string): "day" or "night" depending on entry box being analyzed
"""
time_re = re.search("(\d+)(:)(\d+)", time)
show_default_error = False
# If time found, store hour and minute values
if time_re:
hour = int(time_re.group(1))
minute = int(time_re.group(3))
else: show_default_error = True
# Detects non-numerical characters (possibly due to use of 12hr, am/pm format)
if re.search("([^0-9:])", time):
messagebox.showerror("Start Time Error", (DN + " start time must be entered in 24 hr format."))
return False
elif hour < 0 or hour > 23:
show_default_error = True
elif minute < 0 or minute > 59:
show_default_error = True
if show_default_error:
messagebox.showerror("Start Time Error", ("Invalid " + DN + " start time."))
return False
return True
# Check output
try:
os.chdir(self.out_path_E.get())
except:
messagebox.showerror("Output Path Error",
"Provided output path could not be found. Ensure the path is to a directory not a file (path should end with a slash).")
return False
if not check_time(self.day_start_E.get(), "day") or not check_time(self.night_start_E.get(), "night"):
return False
try:
if not float(self.smoothing_radius_E.get()).is_integer():
raise ValueError
if not int(self.smoothing_radius_E.get()) >= 0:
messagebox.showerror("Data Smoothing Radius Error", "Data smoothing radius must be greater than or equal to zero.")
return False
except ValueError:
messagebox.showerror("Data Smoothing Radius Error", "Data smoothing radius must be an integer.")
return False
try:
if int(float(self.dur_thresh_E.get())) < 0:
messagebox.showerror("Duration Threshold Error", "Duration threshold cannot be less than zero.")
return False
except ValueError:
messagebox.showerror("Duration Threshold Error", "Invalid duration threshold (could not convert to integer).")
return False
if not check_input_file(self):
return False
if check_output:
if self.make_plot_BV.get():
if not check_out_file(self, self.plot_file_E, "Plot File"):
return False
if self.get_stats_BV.get():
if not check_out_file(self, self.stats_file_E, "Stats Output File"):
return False
if self.multi_in_stats_BV.get() and first_in:
if not check_out_file(self, self.multi_in_stats_file_E, "Compile Summary"):
return False
return True
def check_valid_plot_ops(self):
"""
Checks for valid configuration of all parameters housed on the Plot Options tab.
"""
# Check plot dimensions
if self.manual_plot_dims.get():
valid = True
try:
if int(self.plot_dim_x_E.get()) < 1 or int(self.plot_dim_y_E.get()) < 1:
valid = False
except:
valid = False
if not valid:
messagebox.showwarning("Plot Dimensions Warning",
("Provided plot | |
import numpy as np
from random import choice
## Note: Non-smooth surfaces or bad triangulations may lead to non-spiral orderings of the vertices.
## Common issue in badly triangulated surfaces is that there exist some edges that belong to more than two triangles. In this
## case the mathematical definition of the spiral is insufficient. In this case, in this version of the code, we randomly
## choose two triangles in order to continue the inductive assignment of the order to the rest of the vertices.
def get_adj_trigs(A, F, reference_mesh, meshpackage = 'mpi-mesh'):
Adj = []
for x in A:
adj_x = []
dx = x.todense()
for i in range(x.shape[0]):
adj_x.append(dx[i].nonzero()[1])
Adj.append(adj_x)
if meshpackage =='trimesh':
mesh_faces = reference_mesh.faces
elif meshpackage =='mpi-mesh':
mesh_faces = reference_mesh.f
# Create Triangles List
trigs_full = [[] for i in range(len(Adj[0]))]
for t in mesh_faces:
u, v, w = t
trigs_full[u].append((u,v,w))
trigs_full[v].append((u,v,w))
trigs_full[w].append((u,v,w))
Trigs = [trigs_full]
for i,T in enumerate(F):
trigs_down = [[] for i in range(len(Adj[i+1]))]
for u,v,w in T:
trigs_down[u].append((u,v,w))
trigs_down[v].append((u,v,w))
trigs_down[w].append((u,v,w))
Trigs.append(trigs_down)
return Adj, Trigs
def generate_spirals(step_sizes, M, Adj, Trigs, reference_points, dilation=None, random=False, meshpackage = 'mpi-mesh', counter_clockwise = True, nb_stds = 2):
Adj_spirals = []
for i in range(len(Adj)):
if meshpackage =='trimesh':
mesh_vertices = M[i].vertices
elif meshpackage =='mpi-mesh':
mesh_vertices = M[i].v
sp = get_spirals(mesh_vertices, Adj[i],Trigs[i],reference_points[i], n_steps=step_sizes[i],\
padding='zero', counter_clockwise = counter_clockwise, random = random)
Adj_spirals.append(sp)
print('spiral generation for hierarchy %d (%d vertices) finished' %(i,len(Adj_spirals[-1])))
## Dilated convolution
if dilation:
for i in range(len(dilation)):
dil = dilation[i]
dil_spirals = []
for j in range(len(Adj_spirals[i])):
s = Adj_spirals[i][j][:1] + Adj_spirals[i][j][1::dil]
dil_spirals.append(s)
Adj_spirals[i] = dil_spirals
# Calculate the lengths of spirals
# Use mean + 2 * std_dev, to capture 97% of data
L = []
for i in range(len(Adj_spirals)):
L.append([])
for j in range(len(Adj_spirals[i])):
L[i].append(len(Adj_spirals[i][j]))
L[i] = np.array(L[i])
spiral_sizes = []
for i in range(len(L)):
sz = L[i].mean() + nb_stds*L[i].std()
spiral_sizes.append(int(sz))
print('spiral sizes for hierarchy %d: %d' %(i,spiral_sizes[-1]))
# 1) fill with -1 (index to the dummy vertex, i.e the zero padding) the spirals with length smaller than the chosen one
# 2) Truncate larger spirals
spirals_np = []
for i in range(len(spiral_sizes)): #len(Adj_spirals)):
S = np.zeros((1,len(Adj_spirals[i])+1,spiral_sizes[i])) - 1
for j in range(len(Adj_spirals[i])):
S[0,j,:len(Adj_spirals[i][j])] = Adj_spirals[i][j][:spiral_sizes[i]]
#spirals_np.append(np.repeat(S,args['batch_size'],axis=0))
spirals_np.append(S)
return spirals_np, spiral_sizes, Adj_spirals
def distance(v,w):
return np.sqrt(np.sum(np.square(v-w)))
def single_source_shortest_path(V,E,source,dist=None,prev=None):
import heapq
if dist == None:
dist = [None for i in range(len(V))]
prev = [None for i in range(len(V))]
q = []
seen = set()
heapq.heappush(q,(0,source,None))
while len(q) > 0 and len(seen) < len(V):
d_,v,p = heapq.heappop(q)
if v in seen:
continue
seen.add(v)
prev[v] = p
dist[v] = d_
for w in E[v]:
if w in seen:
continue
dw = d_ + distance(V[v],V[w])
heapq.heappush(q,(dw,w,v))
return prev, dist
def get_spirals(mesh, adj, trig, reference_points, n_steps=1, padding='zero', counter_clockwise = True, random = False):
spirals = []
if not random:
heat_path = None
dist = None
for reference_point in reference_points:
heat_path,dist = single_source_shortest_path(mesh,adj,reference_point, dist, heat_path)
heat_source = reference_points
for i in range(mesh.shape[0]):
seen = set(); seen.add(i)
trig_central = list(trig[i]); A = adj[i]; spiral = [i]
# 1) Frist degree of freedom - choose starting pooint:
if not random:
if i in heat_source: # choose closest neighbor
shortest_dist = np.inf
init_vert = None
for neighbor in A:
d = np.sum(np.square(mesh[i] - mesh[neighbor]))
if d < shortest_dist:
shortest_dist = d
init_vert = neighbor
else: # on the shortest path to the reference point
init_vert = heat_path[i]
else:
# choose starting point:
# random for first ring
init_vert = choice(A)
# first ring
if init_vert is not None:
ring = [init_vert]; seen.add(init_vert)
else:
ring = []
while len(trig_central) > 0 and init_vert is not None:
cur_v = ring[-1]
cur_t = [t for t in trig_central if t in trig[cur_v]]
if len(ring) == 1:
orientation_0 = (cur_t[0][0]==i and cur_t[0][1]==cur_v) \
or (cur_t[0][1]==i and cur_t[0][2]==cur_v) \
or (cur_t[0][2]==i and cur_t[0][0]==cur_v)
if not counter_clockwise:
orientation_0 = not orientation_0
# 2) Second degree of freedom - 2nd point/orientation ambiguity
if len(cur_t) >=2:
# Choose the triangle that will direct the spiral counter-clockwise
if orientation_0:
# Third point in the triangle - next vertex in the spiral
third = [p for p in cur_t[0] if p!=i and p!=cur_v][0]
trig_central.remove(cur_t[0])
else:
third = [p for p in cur_t[1] if p!=i and p!=cur_v][0]
trig_central.remove(cur_t[1])
ring.append(third)
seen.add(third)
# 3) Stop if the spiral hits the boundary in the first point
elif len(cur_t) == 1:
break
else:
# 4) Unique ordering for the rest of the points (3rd onwards)
if len(cur_t) >= 1:
# Third point in the triangle - next vertex in the spiral
third = [p for p in cur_t[0] if p!= cur_v and p!=i][0]
# Don't append the spiral if the vertex has been visited already
# (happens when the first ring is completed and the spiral returns to the central vertex)
if third not in seen:
ring.append(third)
seen.add(third)
trig_central.remove(cur_t[0])
# 4) Stop when the spiral hits the boundary (the already visited triangle is no longer in the list): First half of the spiral
elif len(cur_t) == 0:
break
rev_i = len(ring)
if init_vert is not None:
v = init_vert
if orientation_0 and len(ring)==1:
reverse_order = False
else:
reverse_order = True
need_padding = False
# 5) If on the boundary: restart from the initial vertex towards the other direction,
# but put the vertices in reverse order: Second half of the spiral
# One exception if the starting point is on the boundary + 2nd point towards the desired direction
while len(trig_central) > 0 and init_vert is not None:
cur_t = [t for t in trig_central if t in trig[v]]
if len(cur_t) != 1:
break
else:
need_padding = True
third = [p for p in cur_t[0] if p!=v and p!=i][0]
trig_central.remove(cur_t[0])
if third not in seen:
ring.insert(rev_i,third)
seen.add(third)
if not reverse_order:
rev_i = len(ring)
v = third
# Add a dummy vertex between the first half of the spiral and the second half - similar to zero padding in a 2d grid
if need_padding:
ring.insert(rev_i,-1)
"""
ring_copy = list(ring[1:])
rev_i = rev_i - 1
for z in range(len(ring_copy)-2):
if padding == 'zero':
ring.insert(rev_i,-1) # -1 is our sink node
elif padding == 'mirror':
ring.insert(rev_i,ring_copy[rev_i-z-1])
"""
spiral += ring
# Next rings:
for step in range(n_steps-1):
next_ring = set([]); next_trigs = set([]);
if len(ring) == 0:
break
base_triangle = None
init_vert = None
# Find next hop neighbors
for w in ring:
if w!=-1:
for u in adj[w]:
if u not in seen:
next_ring.add(u)
# Find triangles that contain two outer ring nodes. That way one can folllow the spiral ordering in the same way
# as done in the first ring: by simply discarding the already visited triangles+nodes.
for u in next_ring:
for tr in trig[u]:
if len([x for x in tr if x in seen]) == 1:
next_trigs.add(tr)
elif ring[0] in tr and ring[-1] in tr:
base_triangle = tr
# Normal case: starting point in the second ring ->
# the 3rd point in the triangle that connects the 1st and the last point in the 1st ring with the 2nd ring
if base_triangle is not None:
init_vert = [x for x in base_triangle if x != ring[0] and x != ring[-1]]
# Make sure that the the initial point is appropriate for starting the spiral,
# i.e it is connected to at least one of the next candidate vertices
if len(list(next_trigs.intersection(set(trig[init_vert[0]]))))==0:
init_vert = None
# If no such triangle exists (one of the vertices is dummy,
# or both the first and the last vertex take part in a specific type of boundary)
# or the init vertex is not connected with the rest of the ring -->
# Find | |
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_asset(self, catalog_id, data_asset_key, **kwargs):
"""
Deletes a specific data asset identified by it's key.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_asset_tag(self, catalog_id, data_asset_key, tag_key, **kwargs):
"""
Deletes a specific data asset tag.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str tag_key: (required)
Unique tag key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/tags/{tagKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_asset_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"tagKey": tag_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_entity(self, catalog_id, data_asset_key, entity_key, **kwargs):
"""
Deletes a specific data entity.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str entity_key: (required)
Unique entity key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/entities/{entityKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_entity got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"entityKey": entity_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_entity_tag(self, catalog_id, data_asset_key, entity_key, tag_key, **kwargs):
"""
Deletes a specific entity tag.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str entity_key: (required)
Unique entity key.
:param str tag_key: (required)
Unique tag key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/entities/{entityKey}/tags/{tagKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_entity_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key,
"entityKey": entity_key,
"tagKey": tag_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or | |
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "dev.txt"))
examples = self._create_examples(lines, "dev")
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "test.txt"))
examples = self._create_examples(lines, "test")
return examples
@staticmethod
def get_labels():
"""See base class."""
labels = range(130)
labels = [str(label) for label in labels]
return labels
class ATISIntentProcessor(DataProcessor):
"""Processor for the ATIS intent data set."""
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if len(line) != 2:
print("data format error: %s" % "\t".join(line))
print("data row contains two parts: label \t conversation_content")
continue
guid = "%s-%d" % (set_type, i)
text_a = line[1]
text_a = tokenization.convert_to_unicode(text_a)
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(
guid=guid, text_a=text_a, label=label))
return examples
def get_train_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "train.txt"))
examples = self._create_examples(lines, "train")
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "dev.txt"))
examples = self._create_examples(lines, "dev")
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "test.txt"))
examples = self._create_examples(lines, "test")
return examples
@staticmethod
def get_labels():
"""See base class."""
labels = range(26)
labels = [str(label) for label in labels]
return labels
class DSTC2Processor(DataProcessor):
"""Processor for the DSTC2 data set."""
def _create_turns(self, conv_example):
"""create multi turn dataset"""
samples = []
max_turns = 20
for i in range(len(conv_example)):
conv_turns = conv_example[max(i - max_turns, 0): i + 1]
conv_info = "\1".join([sample[0] for sample in conv_turns])
samples.append((conv_info.split('\1'), conv_example[i][1]))
return samples
def _create_examples(self, lines, set_type):
"""Creates examples for multi-turn dialogue sets."""
examples = []
conv_id = -1
index = 0
conv_example = []
for (i, line) in enumerate(lines):
if len(line) != 3:
print("data format error: %s" % "\t".join(line))
print("data row contains three parts: conversation_content \t question \1 answer \t state1 state2 state3......")
continue
conv_no = line[0]
text_a = line[1]
label_list = line[2].split()
if conv_no != conv_id and i != 0:
samples = self._create_turns(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
history = sample[0]
dst_label = sample[1]
examples.append(InputExample(guid=guid, text_a=history, label=dst_label))
conv_example = []
conv_id = conv_no
if i == 0:
conv_id = conv_no
conv_example.append((text_a, label_list))
if conv_example:
samples = self._create_turns(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
history = sample[0]
dst_label = sample[1]
examples.append(InputExample(guid=guid, text_a=history, label=dst_label))
return examples
def get_train_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "train.txt"))
examples = self._create_examples(lines, "train")
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "dev.txt"))
examples = self._create_examples(lines, "dev")
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "test.txt"))
examples = self._create_examples(lines, "test")
return examples
@staticmethod
def get_labels():
"""See base class."""
labels = range(217)
labels = [str(label) for label in labels]
return labels
class MULTIWOZProcessor(DataProcessor):
"""Processor for the MULTIWOZ data set."""
def _create_turns(self, conv_example):
"""create multi turn dataset"""
samples = []
max_turns = 2
for i in range(len(conv_example)):
prefix_turns = conv_example[max(i - max_turns, 0): i]
conv_info = "\1".join([turn[0] for turn in prefix_turns])
current_turns = conv_example[i][0]
samples.append((conv_info.split('\1'), current_turns.split('\1'), conv_example[i][1]))
return samples
def _create_examples(self, lines, set_type):
"""Creates examples for multi-turn dialogue sets."""
examples = []
conv_id = -1
index = 0
conv_example = []
for (i, line) in enumerate(lines):
conv_no = line[0]
text_a = line[2]
label_list = line[1].split()
if conv_no != conv_id and i != 0:
samples = self._create_turns(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
history = sample[0]
current = sample[1]
dst_label = sample[2]
examples.append(InputExample(guid=guid, text_a=history, text_b=current, label=dst_label))
conv_example = []
conv_id = conv_no
if i == 0:
conv_id = conv_no
conv_example.append((text_a, label_list))
if conv_example:
samples = self._create_turns(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
history = sample[0]
current = sample[1]
dst_label = sample[2]
examples.append(InputExample(guid=guid, text_a=history, text_b=current, label=dst_label))
return examples
def get_train_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "train.txt"))
examples = self._create_examples(lines, "train")
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "dev.txt"))
examples = self._create_examples(lines, "dev")
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples = []
lines = self._read_tsv(os.path.join(data_dir, "test.txt"))
examples = self._create_examples(lines, "test")
return examples
@staticmethod
def get_labels():
"""See base class."""
labels = range(722)
labels = [str(label) for label in labels]
return labels
def create_dialogue_examples(conv):
"""Creates dialogue sample"""
samples = []
for i in range(len(conv)):
cur_txt = "%s : %s" % (conv[i][2], conv[i][3])
pre_txt = ["%s : %s" % (c[2], c[3]) for c in conv[max(0, i - 5): i]]
suf_txt = ["%s : %s" % (c[2], c[3]) for c in conv[i + 1: min(len(conv), i + 3)]]
sample = [conv[i][1], pre_txt, cur_txt, suf_txt]
samples.append(sample)
return samples
def create_multi_turn_examples(lines, set_type):
"""Creates examples for multi-turn dialogue sets."""
conv_id = -1
examples = []
conv_example = []
index = 0
for (i, line) in enumerate(lines):
if len(line) != 4:
print("data format error: %s" % "\t".join(line))
print("data row contains four parts: conversation_id \t label \t caller \t conversation_content")
continue
tokens = line
conv_no = tokens[0]
if conv_no != conv_id and i != 0:
samples = create_dialogue_examples(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
label = sample[0]
text_a = sample[1]
text_b = sample[2]
text_c = sample[3]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, label=label))
conv_example = []
conv_id = conv_no
if i == 0:
conv_id = conv_no
conv_example.append(tokens)
if conv_example:
samples = create_dialogue_examples(conv_example)
for sample in samples:
guid = "%s-%s" % (set_type, index)
index += 1
label = sample[0]
text_a = sample[1]
text_b = sample[2]
text_c = sample[3]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, label=label))
return examples
def convert_tokens(tokens, sep_id, tokenizer):
"""Converts tokens to ids"""
tokens_ids = []
if not tokens:
return tokens_ids
if isinstance(tokens, list):
for text in tokens:
tok_text = tokenizer.tokenize(text)
ids = tokenizer.convert_tokens_to_ids(tok_text)
tokens_ids.extend(ids)
tokens_ids.append(sep_id)
tokens_ids = tokens_ids[: -1]
else:
tok_text = tokenizer.tokenize(tokens)
tokens_ids = tokenizer.convert_tokens_to_ids(tok_text)
return tokens_ids
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, task_name):
"""Converts a single DA `InputExample` into a single `InputFeatures`."""
label_map = {}
SEP = 102
CLS = 101
if task_name == 'udc':
INNER_SEP = 1
limit_length = 60
elif task_name == 'swda':
INNER_SEP = 1
limit_length = 50
elif task_name == 'mrda':
INNER_SEP = 1
limit_length = 50
elif task_name == 'atis_intent':
INNER_SEP = -1
limit_length = -1
elif task_name == 'atis_slot':
INNER_SEP = -1
limit_length = -1
elif task_name == 'dstc2':
INNER_SEP = 1
limit_length = -1
elif task_name == 'dstc2_asr':
INNER_SEP = 1
limit_length = -1
elif task_name == 'multi-woz':
INNER_SEP = 1
limit_length = 200
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = example.text_a
tokens_b = example.text_b
tokens_c = example.text_c
tokens_a_ids = convert_tokens(tokens_a, INNER_SEP, tokenizer)
tokens_b_ids = convert_tokens(tokens_b, INNER_SEP, tokenizer)
tokens_c_ids = convert_tokens(tokens_c, INNER_SEP, tokenizer)
if tokens_b_ids:
tokens_b_ids = tokens_b_ids[:min(limit_length, len(tokens_b_ids))]
else:
if len(tokens_a_ids) > max_seq_length - 2:
tokens_a_ids = tokens_a_ids[len(tokens_a_ids) - max_seq_length + 2:]
if not tokens_c_ids:
if len(tokens_a_ids) > max_seq_length - len(tokens_b_ids) - 3:
tokens_a_ids = tokens_a_ids[len(tokens_a_ids) - max_seq_length + len(tokens_b_ids) + 3:]
else:
if len(tokens_a_ids) + len(tokens_b_ids) + len(tokens_c_ids) > max_seq_length - 4:
left_num = max_seq_length - len(tokens_b_ids) - 4
if len(tokens_a_ids) > len(tokens_c_ids):
suffix_num = int(left_num / 2)
tokens_c_ids = tokens_c_ids[: min(len(tokens_c_ids), suffix_num)]
prefix_num = left_num - len(tokens_c_ids)
tokens_a_ids = tokens_a_ids[max(0, len(tokens_a_ids) - prefix_num):]
else:
if not tokens_a_ids:
tokens_c_ids = tokens_c_ids[max(0, len(tokens_c_ids) - left_num):]
else:
prefix_num = int(left_num / 2)
tokens_a_ids = tokens_a_ids[max(0, len(tokens_a_ids) - prefix_num):]
suffix_num = left_num - len(tokens_a_ids)
tokens_c_ids = tokens_c_ids[: min(len(tokens_c_ids), suffix_num)]
input_ids = []
segment_ids = []
input_ids.append(CLS)
segment_ids.append(0)
input_ids.extend(tokens_a_ids)
segment_ids.extend([0] * len(tokens_a_ids))
input_ids.append(SEP)
segment_ids.append(0)
if tokens_b_ids:
| |
]
way_part2 = way.way[idx:]
break
if idx > 0 and way.way[idx-1].dist > p.dist:
# ref_way.way[0] point doesn't exist, insert it
way_part1 = way.way[:idx] + [ ref_way.way[-1] ]
way_part2 = [ ref_way.way[0] ] + way.way[idx:]
break
way2 = way.make_copy_new_way(way_part2)
way.way = way_part1
r1, w1 = self._adapt_way_into_reference_geometry(way, data_src_name)
r2, w2 = self._adapt_way_into_reference_geometry(way2, data_src_name)
assert r1 == ref_way and r2 == ref_way and len(w1) == 1 and len(w2) == 1
return ref_way, [ w1[0], w2[0] ]
# way could have different starting point in the closed loop, make sure they are the same
min_idx = 0
for idx, p in enumerate(way.way):
if p.dist < way.way[min_idx].dist:
min_idx = idx
if min_idx != 0:
way.way = way.way[min_idx:] + way.way[:min_idx]
if closed_loop:
if way.way[0] != ref_way.way[0]:
assert len(way.way) < len(ref_way.way)
way.way.insert(0, ref_way.way[0])
way.way.append(ref_way.way[-1])
assert way.way[0] == way.way[-1]
elif len(way.way) > 2 and way.way[-1] == ref_way.way[-1] and way.way[-1].dist < way.way[-2].dist:
# P-shaped loop, ie endpoint attaches to midpoint on the own way.
# Very rare (seen in Stockholm dataset), and sort of illegal
_log.warning(f"endpoint attaches to own midpoint for RLID {way.rlid}")
way.way[-1].dist = ref_way.way[-1].dist
if self._perform_self_testing:
self._test_way_dist(way)
# if this way has fewer points that the reference geometry it snaps to (happens in
# some cases), we need to insert missing points we can assume that:
# - matching ways are oriented in the same direction
# - matching ways have the same RLID
# - reference geometry has each way for RLID in its full length, ie it should cover
# the full length of the inserted way
ref_it = iter(ref_way.way)
ref_p = next(ref_it)
while ref_p != way.way[0]:
try:
ref_p = next(ref_it)
except StopIteration as stop_iteration:
raise RuntimeError("Could not find start %s of way %s in reference geometry (does it extend reference geometry?)" % (latlon_str(way.way[0]), way.rlid)) from stop_iteration
assert ref_p == way.way[0]
new_way = []
#_log.info(way.rlid)
#_log.info("ref_way", ref_way.way)
#_log.info("way.way", way.way)
for p in way.way:
while ref_p != p:
assert p.dist >= 0
new_way.append(ref_p)
ref_p = next(ref_it)
new_way.append(p)
try:
ref_p = next(ref_it)
except StopIteration:
assert ref_p == p
if len(new_way) > len(way.way):
#_log.info("Added points to way of RLID %s (%s => %s)" % (way.rlid, len(way.way), len(new_way)))
way.way = new_way
return ref_way, [ way ]
def remove_short_sub_segments(self):
_log.info("Removing short sub-segments...")
# "Remove" in this context means merging with neighbor segment
remove_count = 0
for segs in list(self.way_db.values()):
new_segs = []
for idx, seg in enumerate(segs):
length, _ = calc_way_length(seg.way)
if length > 8.0:
new_segs.append(seg)
continue
prev_length = 0
next_length = 0
if len(new_segs) > 0 and new_segs[-1].way[-1] == seg.way[0]:
prev_length, _ = calc_way_length(new_segs[-1].way)
next_idx = (idx+1) % len(segs)
if segs[next_idx].way[0] == seg.way[-1]:
next_length, _ = calc_way_length(segs[next_idx].way)
if prev_length == 0 and next_length == 0:
# unconnected short segment (hopefully rare)
if len(segs) == 1:
_log.debug(f"RLID {seg.rlid} is an alone short segment ({length:g}), must be kept")
else:
_log.debug(f"RLID {seg.rlid} has a short unconnected segment ({length:g}), must be kept")
new_segs.append(seg)
continue
if length > 2.0:
# for longer stubs, we only remove them if they are on the start/end and
# only if only two points. This metric is based on what is seen in NVDB
# data.
if (prev_length != 0 and next_length != 0) or len(seg.way) > 2 or keep_end_stub(seg):
new_segs.append(seg)
continue
# we can mess up dist value of points here for closed loops, but since
# this is run at the end we don't care
if prev_length > next_length:
new_segs[-1].way += seg.way[1:]
else:
segs[next_idx].way = seg.way[:-1] + segs[next_idx].way
remove_count += 1
if len(new_segs) < len(segs):
if len(new_segs) == 0:
del self.way_db[segs[0].rlid]
else:
self.way_db[segs[0].rlid] = new_segs
_log.info(f"done ({remove_count} short sub-segments were removed)")
self.join_segments_with_same_tags()
def _get_way(self, rlid, point):
segs = self.way_db.get(rlid, [])
for seg in segs:
for p in seg.way:
if p == point:
return seg
return None
def test_segments(self):
for segs in self.way_db.values():
self._test_segment(segs)
def setup_geometry_search(self):
_log.info("Setting up search data structure for all geometry...")
self.gs = GeometrySearch(GEO_FILL_LENGTH, use_dist=False, perform_self_testing=self._perform_self_testing)
self.gs.insert_waydb(self.way_db)
_log.info("done")
def get_endpoint_map(self):
endpoints = {}
for segs in self.way_db.values():
for seg in segs:
for p in [ seg.way[0], seg.way[-1] ]:
if p in endpoints:
endpoints[p].append(seg)
else:
endpoints[p] = [ seg ]
return endpoints
@staticmethod
def _join_rlid_pick_best_matching_ways(ep, endpoints):
ways = endpoints[ep]
if len(ways) == 1:
# no connections
return None, None
max_angle = -1
best_way = None
for w1 in ways:
w1_closed = w1.way[0] == w1.way[-1]
if w1.way[0] == ep:
p1 = w1.way[1]
w1_start = True
else:
p1 = w1.way[-2]
w1_start = False
for w2 in ways:
if w1 == w2 or w1.tags != w2.tags:
continue
w2_closed = w2.way[0] == w2.way[-1]
if w2.way[0] == ep:
p3 = w2.way[1]
w2_start = True
else:
p3 = w2.way[-2]
w2_start = False
if w1_start == w2_start and not way_may_be_reversed(w1) and not way_may_be_reversed(w2):
# one way must be reversed, but none can be reversed
# if one way is closed, we can swap start/end to recover, otherwise skip this pair
if w1_closed:
if w1_start:
p1 = w1.way[-2]
else:
p1 = w1.way[1]
elif w2_closed:
if w2_start:
p3 = w2.way[-2]
else:
p3 = w2.way[1]
else:
continue
# calculate angle between p1 and p2
xa = p1.x - ep.x
ya = p1.y - ep.y
xb = p3.x - ep.x
yb = p3.y - ep.y
denom = math.sqrt(xa*xa + ya*ya) * math.sqrt(xb*xb + yb*yb)
if denom != 0:
q = (xa * xb + ya * yb) / denom
if q < -1:
# this can happen due to precision limitation, -1.0000000000000002 seen in tests
angle = 180
elif q > 1:
angle = 0
else:
angle = math.acos((xa * xb + ya * yb) / denom) * 180 / math.pi
else:
angle = 0
if angle > max_angle:
max_angle = angle
if max_angle > 30:
best_way = (w1, w2)
else:
_log.debug(f"Skipping extreme angle {angle} between {w1.rlid} {w2.rlid}")
if best_way is None:
return None, None
_log.debug(f"Max angle {max_angle} for {best_way[0].rlid} to {best_way[1].rlid}")
return best_way[0], best_way[1]
def _remove_seg_before_join(self, seg, endpoints):
segs = self.way_db[seg.rlid]
segs.remove(seg)
if len(segs) == 0:
_log.debug(f"RLID {seg.rlid} completely removed when joining with other segment")
del self.way_db[seg.rlid]
endpoints.remove(seg.way[0], seg)
if seg.way[-1] != seg.way[0]: # closed loop special case
endpoints.remove(seg.way[-1], seg)
def _join_rlid(self, seg, endpoints, directional_nodes):
rlid_join_count = 0
ep_idx = -1
consecutive_fails = 0
while consecutive_fails < 2:
if ep_idx == -1:
ep_idx = 0
connecting_ep_idx = -1
else:
ep_idx = -1
connecting_ep_idx = 0
ep = seg.way[ep_idx]
w1, w2 = self._join_rlid_pick_best_matching_ways(ep, endpoints)
if seg not in (w1, w2):
consecutive_fails += 1
continue
consecutive_fails = 0
if w1 == seg:
join_way = w2
else:
join_way = w1
self._remove_seg_before_join(join_way, endpoints)
self._remove_seg_before_join(seg, endpoints)
if join_way.way[connecting_ep_idx] != ep and seg.way[0] != seg.way[-1]:
# reversing required
if way_may_be_reversed(seg) and way_may_be_reversed(join_way):
l1, _ = calc_way_length(seg.way)
l2, _ = calc_way_length(join_way.way)
reverse_join_way = l1 >= l2
elif way_may_be_reversed(join_way):
reverse_join_way = True
else:
reverse_join_way = False
assert way_may_be_reversed(seg)
if reverse_join_way:
_log.debug(f"Reversing joining RLID {join_way.rlid}")
reverse_way(join_way, directional_nodes)
else:
_log.debug(f"Reversing base RLID {seg.rlid}")
reverse_way(seg, directional_nodes)
if ep_idx == 0:
ep_idx = -1
else:
ep_idx = 0
# create new RLID by joining the current, sort them to get repeatable result
new_rlid = seg.rlid.split(';')
new_rlid.append(join_way.rlid)
new_rlid.sort()
new_rlid = ";".join(new_rlid)
rlid_join_count += 1
if seg.way[0] == join_way.way[-1]:
seg.way.pop(0)
seg.way = join_way.way + seg.way
elif seg.way[-1] == join_way.way[0]:
join_way.way.pop(0)
seg.way += join_way.way
else:
_log.error(f"{seg.rlid}, {seg.way}")
_log.error(f"{join_way.rlid}, {join_way.way}")
raise RuntimeError("Disconnected segments cannot be joined")
join_way.way = None
seg.rlid = new_rlid
if new_rlid in self.way_db:
self.way_db[new_rlid].append(seg)
else:
_log.debug(f"Inserted joined RLID {new_rlid}")
self.way_db[new_rlid] = [ seg ]
endpoints.insert(seg.way[0], seg)
endpoints.insert(seg.way[-1], seg)
return rlid_join_count
def join_segments_with_same_tags(self, join_rlid=False):
if join_rlid:
_log.info("Joining segments with same tags even if different RLID...")
else:
_log.info("Joining RLID segments with same tags...")
join_count = 0
for segs in self.way_db.values():
it = iter(segs)
prev = next(it)
nsegs = [prev]
for seg in it:
lastseg = nsegs[-1]
if len(lastseg.way) < 2:
raise RuntimeError("Short way")
if len(seg.way) < 2:
raise RuntimeError("Short way %s %s" % (seg, segs))
if lastseg.way[-1] == seg.way[0] and lastseg.tags == seg.tags:
join_count += 1
seg.way.pop(0)
lastseg.way += seg.way
| |
cell in neuronDict[neuron]["class.ids"].split(","):
specificTissue, generalTissue, classTissue, matchTissue = annotationDict[cell]["specific.tissue"], annotationDict[cell]["general.tissue"], annotationDict[cell]["class.tissue"], annotationDict[cell]["match.tissue"]
if not cell in cellMatrix[neuron]:
cellMatrix[neuron][cell] = 0
if not specificTissue in specificMatrix[neuron]:
specificMatrix[neuron][specificTissue] = 0
if not generalTissue in generalMatrix[neuron]:
generalMatrix[neuron][generalTissue] = 0
if not classTissue in classMatrix[neuron]:
classMatrix[neuron][classTissue] = 0
if not matchTissue in matchMatrix[neuron]:
matchMatrix[neuron][matchTissue] = 0
cellList.append(cell)
cellMatrix[neuron][cell] += binaryMatrix[neuron][cell]
specificMatrix[neuron][specificTissue] += binaryMatrix[neuron][cell]
generalMatrix[neuron][generalTissue] += binaryMatrix[neuron][cell]
classMatrix[neuron][classTissue] += binaryMatrix[neuron][cell]
matchMatrix[neuron][matchTissue] += binaryMatrix[neuron][cell]
cellList = sorted(list(set(cellList)))
# Note: The above dictionaries record how many of the cell (ids)
# in a given neuron have correspond to a given tissue.
# prepare class tallies for normalization:
specificTallies, generalTallies, classTallies, matchTallies = dict(), dict(), dict(), dict()
for cell in cellList:
if not annotationDict[cell]["specific.tissue"] in specificTallies:
specificTallies[annotationDict[cell]["specific.tissue"]] = 0
if not annotationDict[cell]["general.tissue"] in generalTallies:
generalTallies[annotationDict[cell]["general.tissue"]] = 0
if not annotationDict[cell]["class.tissue"] in classTallies:
classTallies[annotationDict[cell]["class.tissue"]] = 0
if not annotationDict[cell]["match.tissue"] in matchTallies:
matchTallies[annotationDict[cell]["match.tissue"]] = 0
specificTallies[annotationDict[cell]["specific.tissue"]] += 1
generalTallies[annotationDict[cell]["general.tissue"]] += 1
classTallies[annotationDict[cell]["class.tissue"]] += 1
matchTallies[annotationDict[cell]["match.tissue"]] += 1
# Note: The above tallies record the number of cells (observed,
# in neurons) that correspond to each tissue.***
# prepare output files:
f_output = open(compositionpath + "mapcells_composition_codes.txt", "w")
c_output = open(compositionpath + "mapcells_composition_cellular.txt", "w")
s_output = open(compositionpath + "mapcells_composition_specific.txt", "w")
g_output = open(compositionpath + "mapcells_composition_general.txt", "w")
l_output = open(compositionpath + "mapcells_composition_class.txt", "w")
m_output = open(compositionpath + "mapcells_composition_match.txt", "w")
# print out headers:
print >>f_output, "\t".join(["neuron", "id", "fraction.ids"])
print >>c_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>s_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>g_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>l_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
print >>m_output, "\t".join(["neuron", "id", "id.found", "id.cells", "fraction.ids", "fraction.sum", "fraction.max", "fraction.nrm", "pvalue", "pvalue.adj", "score"])
# Note: We will now output the following information:
# id.found : is ID found in neuron?
# id.cells : number of cells (diversity) that match ID.
# fraction.ids: fraction of ID diversity in neuron.
# fraction.sum: fraction of cellular diversity in neuron that matches ID.
# fraction.rat: fraction of cellular diversity in neuron that matches ID, normalized by the representation of the ID.
# fraction.max: fraction of cellular diversity in neuron as normalized by the ID with the highest cellular diversity in neuron.
# fraction.nrm: fraction of cellular diversity in neuron as normalized by the total number of cells with said ID.
# determine missed tissues:
print
specificMissed, generalMissed, classMissed, matchMissed = set(specificTissues).difference(set(specificTallies.keys())), set(generalTissues).difference(set(generalTallies.keys())), set(classTissues).difference(set(classTallies.keys())), set(matchTissues).difference(set(matchTallies.keys()))
print "Specific tissues not found:", str(len(specificMissed)) + " (" + str(len(specificTissues)) + ") ; " + ",".join(sorted(specificMissed))
print "General tissues not found:", str(len(generalMissed)) + " (" + str(len(generalTissues)) + ") ; " + ",".join(sorted(generalMissed))
print "Class tissues not found:", str(len(classMissed)) + " (" + str(len(classTissues)) + ") ; " + ",".join(sorted(classMissed))
print "Match tissues not found:", str(len(matchMissed)) + " (" + str(len(matchTissues)) + ") ; " + ",".join(sorted(matchMissed))
print
# export the fractions:
print "Exporting representation per neuron..."
for neuron in sorted(neuronDict.keys()):
if neuron in codeDict:
# export factor signals:
index = 0
for code in codes:
print >>f_output, "\t".join(map(str, [neuron, code, codeDict[neuron][index]]))
index += 1
# export cell counts:
for cell in cellList:
adjust = len(neuronDict.keys())*len(cellList)
types = len(cellMatrix[neuron].keys())
total = sum(cellMatrix[neuron].values())
maxxx = max(cellMatrix[neuron].values())
if cell in cellMatrix[neuron]:
count = float(cellMatrix[neuron][cell])
index = 1
else:
count = 0
index = 0
print >>c_output, "\t".join(map(str, [neuron, cell, index, count, float(index)/types, float(count)/total, float(count)/maxxx, 1, 1, 1, 0]))
# export specific tissue enrichment:
for specificTissue in sorted(specificTallies.keys()):
types = len(specificMatrix[neuron].keys())
total = sum(specificMatrix[neuron].values())
maxxx = max(specificMatrix[neuron].values())
tally = specificTallies[specificTissue]
if specificTissue in specificMatrix[neuron]:
count = float(specificMatrix[neuron][specificTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(specificTallies.keys())
universe = sum(specificTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>s_output, "\t".join(map(str, [neuron, specificTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export general tissue enrichment:
for generalTissue in sorted(generalTallies.keys()):
types = len(generalMatrix[neuron].keys())
total = sum(generalMatrix[neuron].values())
maxxx = max(generalMatrix[neuron].values())
tally = generalTallies[generalTissue]
if generalTissue in generalMatrix[neuron]:
count = float(generalMatrix[neuron][generalTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(generalTallies.keys())
universe = sum(generalTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>g_output, "\t".join(map(str, [neuron, generalTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export class tissue enrichment:
for classTissue in sorted(classTallies.keys()):
types = len(classMatrix[neuron].keys())
total = sum(classMatrix[neuron].values())
maxxx = max(classMatrix[neuron].values())
tally = classTallies[classTissue]
if classTissue in classMatrix[neuron]:
count = float(classMatrix[neuron][classTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(classTallies.keys())
universe = sum(classTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>l_output, "\t".join(map(str, [neuron, classTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# export match tissue enrichment:
for matchTissue in sorted(matchTallies.keys()):
types = len(matchMatrix[neuron].keys())
total = sum(matchMatrix[neuron].values())
maxxx = max(matchMatrix[neuron].values())
tally = matchTallies[matchTissue]
if matchTissue in matchMatrix[neuron]:
count = float(matchMatrix[neuron][matchTissue])
index = 1
else:
count = 0
index = 0
adjust = len(neuronDict.keys())*len(matchTallies.keys())
universe = sum(matchTallies.values())
pvalue = hyper.fishers(count, universe, total, tally, adjust=1, method="right")
adjPvalue = hyper.limit(pvalue*adjust)
score = hyper.directional(count, universe, total, tally, adjust=adjust)
print >>m_output, "\t".join(map(str, [neuron, matchTissue, index, count, float(index)/types, float(count)/total, float(count)/maxxx, float(count)/tally, pvalue, adjPvalue, score]))
# close outputs:
f_output.close()
c_output.close()
s_output.close()
g_output.close()
l_output.close()
m_output.close()
print
print "Combining cell and factor (mix) information.."
# load input factor information:
factorDict = general.build2(compositionpath + "mapcells_composition_codes.txt", i="neuron", j="id", x="fraction.ids", mode="matrix")
# define input cell/tissue files:
infiles = ["mapcells_composition_cellular.txt", "mapcells_composition_specific.txt", "mapcells_composition_general.txt", "mapcells_composition_class.txt", "mapcells_composition_match.txt"]
for infile in infiles:
print "Processing:", infile
# initiate neuron data extraction:
f_output = open(compositionpath + infile.replace(".txt", ".mix"), "w")
inheader = open(compositionpath + infile).readline().strip().split("\t")
inlines = open(compositionpath + infile).readlines()
print >>f_output, inlines.pop(0)
# append factor information to neuron data:
processed = list()
for inline in inlines:
neuron, label = inline.strip().split("\t")[:2]
if not neuron in processed:
processed.append(neuron)
for factor in factorDict[neuron]:
output = list()
for column in inheader:
if column == "neuron":
output.append(neuron)
elif column == "id":
output.append(factor)
elif column in ["pvalue", "pvalue.adj"]:
output.append("1")
else:
output.append(factorDict[neuron][factor])
print >>f_output, "\t".join(output)
print >>f_output, inline.strip()
# close outputs:
f_output.close()
print
# examine co-association correspondence between genes:
elif option.mode == "test.similarity":
# update path to neurons:
neuronspath = neuronspath + option.peaks + "/"
# define input/output paths:
bedpath = neuronspath + option.technique + "/results/" + option.neurons + "/regions/bed/"
querypath = cellsetpath + option.query + "/"
targetpath = cellsetpath + option.target + "/"
hyperpath = comparepath + option.name + "/hyper/"
logpath = comparepath + option.name + "/log/"
general.pathGenerator(hyperpath)
general.pathGenerator(logpath)
# load query cells:
print
print "Loading query cells..."
query_matrix = dict()
for query in os.listdir(querypath):
queryCells = general.clean(open(querypath + query).read().split("\n"), "")
query_matrix[query] = queryCells
#print query, queryCells
print "Generating merged region file..."
#queryfile = hyperpath + "query.bed"
#regionsfile = hyperpath + "regions.bed"
#overlapfile = hyperpath + "overlap.bed"
joint = " " + bedpath
command = "cat " + bedpath + joint.join(os.listdir(bedpath)) + " > " + regionsfile
os.system(command)
# load gene coordinates:
print "Loading gene/feature coordinates..."
coord_dict = dict()
ad = general.build_header_dict(annotationspath + option.reference)
inlines = open(annotationspath + option.reference).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
chrm, start, stop, feature, strand, name = initems[ad["#chrm"]], initems[ad["start"]], initems[ad["end"]], initems[ad["feature"]], initems[ad["strand"]], initems[ad["name"]]
if strand == "+":
start, end = int(start)-option.up, int(start)+option.dn
elif strand == "-":
start, end = int(stop)-option.dn, int(stop)+option.up
for query in query_matrix:
if query == feature or query == name:
f_output = open(queryfile, "w")
print >>f_output, "\t".join(map(str, [chrm, start, end, feature, 0, strand]))
f_output.close()
overlaps = list()
command = "intersectBed -u -a " + regionsfile + " -b " + queryfile + " > " + overlapfile
os.system(command)
for inline in open(overlapfile).readlines():
overlaps.append(inline.strip())
print query, len(overlaps)
if len(overlaps) > 0:
pdb.set_trace()
break
# tree building mode:
elif option.mode == "tree.build":
# build cell-expression matrix:
print
print "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
# trim tree:
cell_tree, parent_tree = dict(), dict()
for parent in parent_dict:
for cell in parent_dict[parent]:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
process = False
if option.lineages == "complete":
process = True
elif parent in trackedCells and cell in trackedCells:
process = True
elif option.ascendants != "OFF" and len(ascendants) < int(option.ascendants):
process = True
if process:
if not parent in parent_tree:
parent_tree[parent] = list()
parent_tree[parent].append(cell)
cell_tree[cell] = parent
tree = treeBuilder(parent_tree, cell_tree)
#print sorted(tree.keys())
#print tree["P0"]
#pdb.set_trace()
f_output = open(cellspath + "mapcells_tree_" + option.name + ".json", "w")
json.dump(tree["P0"], f_output)
f_output.close()
# tree coloring mode:
elif option.mode == "tree.color":
# build cell-expression matrix:
print
print "Loading cellular expression..."
quantitation_matrix, expression_matrix, tracking_matrix, trackedCells = expressionBuilder(expressionfile=option.expression, path=expressionpath, cutoff=option.fraction, minimum=option.minimum, metric="fraction.expression")
# store cell-parent relationships:
print "Loading cell-parent relationships..."
cell_dict, parent_dict, pedigreeCells = relationshipBuilder(pedigreefile=option.pedigree, path=extraspath, mechanism="simple")
print "Pedigree cells:", len(pedigreeCells)
print "Tracked cells:", len(trackedCells)
# trim tree:
cell_tree, parent_tree = dict(), dict()
for parent in parent_dict:
for cell in parent_dict[parent]:
ascendants = ascendantsCollector(cell, parent_dict, cell_dict, ascendants=list())
process = False
if option.lineages == "complete":
process = True
elif parent in trackedCells and cell in trackedCells:
process = True
elif option.ascendants != "OFF" and len(ascendants) < int(option.ascendants):
process = True
if process:
if not parent in parent_tree:
parent_tree[parent] = list()
parent_tree[parent].append(cell)
cell_tree[cell] = parent
# build header dict:
hd = general.build_header_dict(option.infile)
# load input lines:
pvalue_matrix, cells_matrix = dict(), dict()
inlines = open(option.infile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip("\n").split("\t")
query, target, pvalue, cells = initems[hd["query"]], initems[hd["target"]], initems[hd["pvalue"]], initems[hd["cells"]]
if not query in pvalue_matrix:
pvalue_matrix[query] = dict()
cells_matrix[query] = dict()
pvalue_matrix[query][target] = float(pvalue)
cells_matrix[query][target] = cells.split(",")
# scan inputs, selecting the targets of highest enrichment and generating color tree for each:
k = 0
print
print "Scanning queries..."
for query in cells_matrix:
target = general.valuesort(pvalue_matrix[query])[0]
cells = cells_matrix[query][target]
print query, target, pvalue_matrix[query][target], len(cells)
tree = treeBuilder(parent_tree, cell_tree, highlights=cells)
#print | |
<gh_stars>0
# MINLP written by GAMS Convert at 01/15/21 11:37:33
#
# Equation counts
# Total E G L N X C B
# 1486 571 111 804 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 865 685 180 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 3373 3193 180 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,20),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x405 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x406 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x407 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x408 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x409 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x410 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x411 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x412 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x413 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x414 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x415 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x416 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x417 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x418 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x419 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x420 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x421 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x422 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x423 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x424 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x425 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x426 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x427 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x428 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x429 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x430 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x431 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x432 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x433 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x434 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x435 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x436 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x547 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x548 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x549 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x550 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x551 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x552 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x553 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x554 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x555 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x556 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x557 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x558 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x559 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x560 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x561 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x562 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x563 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x564 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x565 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x566 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x567 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x568 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x569 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x570 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x571 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x572 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x573 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x574 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x575 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x576 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x577 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x578 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x579 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x580 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x581 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x582 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x583 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x584 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x585 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x586 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x587 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x588 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b596 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b597 | |
"""Test selecting shapes."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
layer.selected_data = {0, 1}
assert layer.selected_data == {0, 1}
layer.selected_data = {9}
assert layer.selected_data == {9}
layer.selected_data = set()
assert layer.selected_data == set()
def test_removing_all_shapes_empty_list():
"""Test removing all shapes with an empty list."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
assert layer.nshapes == 10
layer.data = []
assert layer.nshapes == 0
def test_removing_all_shapes_empty_array():
"""Test removing all shapes with an empty list."""
data = 20 * np.random.random((10, 4, 2))
np.random.seed(0)
layer = Shapes(data)
assert layer.nshapes == 10
layer.data = np.empty((0, 2))
assert layer.nshapes == 0
def test_removing_selected_shapes():
"""Test removing selected shapes."""
np.random.seed(0)
data = [
20 * np.random.random((np.random.randint(2, 12), 2)) for i in range(5)
] + list(np.random.random((5, 4, 2)))
shape_type = ['polygon'] * 5 + ['rectangle'] * 3 + ['ellipse'] * 2
layer = Shapes(data, shape_type=shape_type)
# With nothing selected no points should be removed
layer.remove_selected()
assert len(layer.data) == len(data)
# Select three shapes and remove them
layer.selected_data = {1, 7, 8}
layer.remove_selected()
keep = [0] + list(range(2, 7)) + [9]
data_keep = [data[i] for i in keep]
shape_type_keep = [shape_type[i] for i in keep]
assert len(layer.data) == len(data_keep)
assert len(layer.selected_data) == 0
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data_keep)])
assert layer.ndim == 2
assert np.all(
[s == so for s, so in zip(layer.shape_type, shape_type_keep)]
)
def test_changing_modes():
"""Test changing modes."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
layer.mode = 'select'
assert layer.mode == 'select'
assert layer.interactive is False
layer.mode = 'direct'
assert layer.mode == 'direct'
assert layer.interactive is False
layer.mode = 'vertex_insert'
assert layer.mode == 'vertex_insert'
assert layer.interactive is False
layer.mode = 'vertex_remove'
assert layer.mode == 'vertex_remove'
assert layer.interactive is False
layer.mode = 'add_rectangle'
assert layer.mode == 'add_rectangle'
assert layer.interactive is False
layer.mode = 'add_ellipse'
assert layer.mode == 'add_ellipse'
assert layer.interactive is False
layer.mode = 'add_line'
assert layer.mode == 'add_line'
assert layer.interactive is False
layer.mode = 'add_path'
assert layer.mode == 'add_path'
assert layer.interactive is False
layer.mode = 'add_polygon'
assert layer.mode == 'add_polygon'
assert layer.interactive is False
layer.mode = 'pan_zoom'
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
def test_name():
"""Test setting layer name."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.name == 'Shapes'
layer = Shapes(data, name='random')
assert layer.name == 'random'
layer.name = 'shps'
assert layer.name == 'shps'
def test_visiblity():
"""Test setting layer visibility."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.visible is True
layer.visible = False
assert layer.visible is False
layer = Shapes(data, visible=False)
assert layer.visible is False
layer.visible = True
assert layer.visible is True
def test_opacity():
"""Test setting opacity."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
# Check default opacity value of 0.7
assert layer.opacity == 0.7
# Select data and change opacity of selection
layer.selected_data = {0, 1}
assert layer.opacity == 0.7
layer.opacity = 0.5
assert layer.opacity == 0.5
# Add new shape and test its width
new_shape = np.random.random((1, 4, 2))
layer.selected_data = set()
layer.add(new_shape)
assert layer.opacity == 0.5
# Instantiate with custom opacity
layer2 = Shapes(data, opacity=0.2)
assert layer2.opacity == 0.2
# Check removing data shouldn't change opacity
layer2.selected_data = {0, 2}
layer2.remove_selected()
assert len(layer2.data) == shape[0] - 2
assert layer2.opacity == 0.2
def test_blending():
"""Test setting layer blending."""
np.random.seed(0)
data = 20 * np.random.random((10, 4, 2))
layer = Shapes(data)
assert layer.blending == 'translucent'
layer.blending = 'additive'
assert layer.blending == 'additive'
layer = Shapes(data, blending='additive')
assert layer.blending == 'additive'
layer.blending = 'opaque'
assert layer.blending == 'opaque'
@pytest.mark.filterwarnings("ignore:elementwise comparison fail:FutureWarning")
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_switch_color_mode(attribute):
"""Test switching between color modes"""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# create a continuous property with a known value in the last element
continuous_prop = np.random.random((shape[0],))
continuous_prop[-1] = 1
properties = {
'shape_truthiness': continuous_prop,
'shape_type': _make_cycled_properties(['A', 'B'], shape[0]),
}
initial_color = [1, 0, 0, 1]
color_cycle = ['red', 'blue']
color_kwarg = f'{attribute}_color'
colormap_kwarg = f'{attribute}_colormap'
color_cycle_kwarg = f'{attribute}_color_cycle'
args = {
color_kwarg: initial_color,
colormap_kwarg: 'gray',
color_cycle_kwarg: color_cycle,
}
layer = Shapes(data, properties=properties, **args)
layer_color_mode = getattr(layer, f'{attribute}_color_mode')
layer_color = getattr(layer, f'{attribute}_color')
assert layer_color_mode == 'direct'
np.testing.assert_allclose(
layer_color, np.repeat([initial_color], shape[0], axis=0)
)
# there should not be an edge_color_property
color_property = getattr(layer, f'_{attribute}_color_property')
assert color_property == ''
# transitioning to colormap should raise a warning
# because there isn't an edge color property yet and
# the first property in shapes.properties is being automatically selected
with pytest.warns(UserWarning):
setattr(layer, f'{attribute}_color_mode', 'colormap')
color_property = getattr(layer, f'_{attribute}_color_property')
assert color_property == next(iter(properties))
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color[-1], [1, 1, 1, 1])
# switch to color cycle
setattr(layer, f'{attribute}_color_mode', 'cycle')
setattr(layer, f'{attribute}_color', 'shape_type')
color = getattr(layer, f'{attribute}_color')
layer_color = transform_color(color_cycle * int(shape[0] / 2))
np.testing.assert_allclose(color, layer_color)
# switch back to direct, edge_colors shouldn't change
setattr(layer, f'{attribute}_color_mode', 'direct')
new_edge_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(new_edge_color, color)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_color_direct(attribute: str):
"""Test setting face/edge color directly."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'black'}
layer = Shapes(data, **layer_kwargs)
color_array = transform_color(['black'] * shape[0])
current_color = getattr(layer, f'current_{attribute}_color')
layer_color = getattr(layer, f'{attribute}_color')
assert current_color == 'black'
assert len(layer.edge_color) == shape[0]
np.testing.assert_allclose(color_array, layer_color)
# With no data selected changing color has no effect
setattr(layer, f'current_{attribute}_color', 'blue')
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'blue'
np.testing.assert_allclose(color_array, layer_color)
# Select data and change edge color of selection
selected_data = {0, 1}
layer.selected_data = {0, 1}
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'black'
setattr(layer, f'current_{attribute}_color', 'green')
colorarray_green = transform_color(['green'] * len(layer.selected_data))
color_array[list(selected_data)] = colorarray_green
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(color_array, layer_color)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = set()
setattr(layer, f'current_{attribute}_color', 'blue')
layer.add(new_shape)
color_array = np.vstack([color_array, transform_color('blue')])
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(color_array, layer_color)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:])),
)
# set the color directly
setattr(layer, f'{attribute}_color', 'black')
color_array = np.tile([[0, 0, 0, 1]], (len(layer.data), 1))
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(color_array, layer_color)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_single_shape_properties(attribute):
"""Test creating single shape with properties"""
shape = (4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'red'}
layer = Shapes(data, **layer_kwargs)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == 1
np.testing.assert_allclose([1, 0, 0, 1], layer_color[0])
color_cycle_str = ['red', 'blue']
color_cycle_rgb = [[1, 0, 0], [0, 0, 1]]
color_cycle_rgba = [[1, 0, 0, 1], [0, 0, 1, 1]]
@pytest.mark.parametrize("attribute", ['edge', 'face'])
@pytest.mark.parametrize(
"color_cycle",
[color_cycle_str, color_cycle_rgb, color_cycle_rgba],
)
def test_color_cycle(attribute, color_cycle):
"""Test setting edge/face color with a color cycle list"""
# create Shapes using list color cycle
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'shape_type': _make_cycled_properties(['A', 'B'], shape[0])}
shapes_kwargs = {
'properties': properties,
f'{attribute}_color': 'shape_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Shapes(data, **shapes_kwargs)
assert layer.properties == properties
color_array = transform_color(
list(islice(cycle(color_cycle), 0, shape[0]))
)
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color, color_array)
# Add new shape and test its color
new_shape = np.random.random((1, 4, 2))
layer.selected_data = {0}
layer.add(new_shape)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array, transform_color('red'))),
)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:], transform_color('red'))),
)
# refresh colors
layer.refresh_colors(update_color_mapping=True)
# test adding a shape with a new property value
layer.selected_data = {}
current_properties = layer.current_properties
current_properties['shape_type'] = np.array(['new'])
layer.current_properties = current_properties
new_shape_2 = np.random.random((1, 4, 2))
layer.add(new_shape_2)
color_cycle_map = getattr(layer, f'{attribute}_color_cycle_map')
assert 'new' in color_cycle_map
np.testing.assert_allclose(
color_cycle_map['new'], np.squeeze(transform_color(color_cycle[0]))
)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_add_color_cycle_to_empty_layer(attribute):
"""Test adding a shape to an empty layer when edge/face color is a color cycle
See: https://github.com/napari/napari/pull/1069
"""
default_properties = {'shape_type': np.array(['A'])}
color_cycle = ['red', 'blue']
shapes_kwargs = {
'properties': default_properties,
f'{attribute}_color': 'shape_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_bbox_canvas.ipynb (unless otherwise specified).
__all__ = ['points2bbox_coords', 'coords_scaled', 'BBoxCanvas', 'BBoxVideoCanvas']
# Internal Cell
import io
import attr
from math import log
from pubsub import pub
from attr import asdict
from pathlib import Path
from copy import deepcopy
from enum import IntEnum
from typing import Dict, Optional, List, Any, Tuple
from abc import ABC, abstractmethod
from pydantic import root_validator
from .base import BaseState
from .docs.utils import is_building_docs
from .mltypes import BboxCoordinate, BboxVideoCoordinate
from ipycanvas import MultiCanvas as IMultiCanvas, Canvas, hold_canvas
from ipywidgets import Image, Label, Layout, HBox, VBox, Output
from PIL import Image as PILImage
# Internal Cell
if not is_building_docs():
class MultiCanvas(IMultiCanvas):
pass
else:
class MultiCanvas(Image): # type: ignore
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
image = PILImage.new('RGB', (100, 100), (255, 255, 255))
b = io.BytesIO()
image.save(b, format='PNG')
self.value = b.getvalue()
def __getitem__(self, key):
return self
def draw_image(self, image, x=0, y=0, width=None, height=None):
self.value = image.value
self.width = width
self.height = height
def __getattr__(self, name):
ignored = [
'flush',
'fill_rect',
'stroke_rect',
'stroke_rects',
'on_mouse_move',
'on_mouse_down',
'on_mouse_up',
'clear',
'on_client_ready',
'stroke_styled_line_segments'
]
if name in ignored:
def wrapper(*args, **kwargs):
return self._ignored(*args, **kwargs)
return wrapper
return object.__getattr__(self, name)
@property
def caching(self):
return False
@caching.setter
def caching(self, value):
pass
@property
def size(self):
return (self.width, self.height)
def _ignored(self, *args, **kwargs):
pass
# Internal Cell
def draw_bg(canvas, color='rgb(236,240,241)'):
with hold_canvas(canvas):
canvas.fill_style = color
canvas.fill_rect(0, 0, canvas.size[0], canvas.size[1])
# Internal Cell
def draw_bounding_box(canvas, coord: BboxCoordinate, color='white', line_width=1,
border_ratio=2, clear=False, stroke_color='black'):
with hold_canvas(canvas):
if clear:
canvas.clear()
line_width = line_width or log(canvas.height) / 5
gap = line_width * border_ratio
# paint and draw the middle stroked rectangle
canvas.line_width = gap
canvas.stroke_style = color
canvas.stroke_rect(coord.x + gap / 2, coord.y + gap / 2,
coord.width - gap, coord.height - gap)
# paint and draw the external stroked rectangle
canvas.line_width = line_width
canvas.stroke_style = stroke_color
canvas.stroke_rect(coord.x, coord.y, coord.width, coord.height)
# paint and draw the internal stroked rectangle
canvas.line_width = line_width
canvas.stroke_style = stroke_color
canvas.stroke_rect(coord.x + gap, coord.y + gap,
coord.width - 2 * gap, coord.height - 2 * gap)
# Internal Cell
class BoundingBox:
def __init__(self):
self.color = 'white'
self.line_width = 1
self.border_ratio = 2
self.stroke_color = 'black'
def _empty_bbox(self) -> Dict[str, List[int]]:
return {'x': [], 'y': [], 'width': [], 'height': []}
def _stroke_rects(self, canvas: Canvas, bbox: Dict[str, List[int]],
line_width: float, color: str):
canvas.line_width = line_width
canvas.stroke_style = color
canvas.stroke_rects(bbox['x'], bbox['y'], bbox['width'], bbox['height'])
def draw(self, canvas: Canvas, coords: List[BboxCoordinate], clear: bool = False):
with hold_canvas(canvas):
if clear:
canvas.clear()
mid_rect = self._empty_bbox()
inter_rect = self._empty_bbox()
ext_rect = self._empty_bbox()
line_width = self.line_width or log(canvas.height) / 5
gap = line_width * self.border_ratio
for coord in coords:
mid_rect['x'].append(coord.x + gap / 2)
mid_rect['y'].append(coord.y + gap / 2)
mid_rect['width'].append(coord.width - gap)
mid_rect['height'].append(coord.height - gap)
ext_rect['x'].append(coord.x)
ext_rect['y'].append(coord.y)
ext_rect['width'].append(coord.width)
ext_rect['height'].append(coord.height)
inter_rect['x'].append(coord.x + gap)
inter_rect['y'].append(coord.y + gap)
inter_rect['width'].append(coord.width - 2 * gap)
inter_rect['height'].append(coord.height - 2 * gap)
# paint and draw the middle stroked rectangle
self._stroke_rects(canvas, mid_rect, gap, self.color)
# paint and draw the external stroked rectangle
self._stroke_rects(canvas, ext_rect, line_width, self.stroke_color)
# paint and draw the internal stroked rectangle
self._stroke_rects(canvas, inter_rect, line_width, self.stroke_color)
# Internal Cell
from PIL import Image as pilImage
# can we do this without reading image?
def get_image_size(path):
pil_im = pilImage.open(path)
return pil_im.width, pil_im.height
# Internal Cell
@attr.define
class ImageCanvas:
image_widget: Image
x: int
y: int
width: int
height: int
scale: float
# Internal Cell
class ImageCanvasPrototype(ABC):
@abstractmethod
def prepare_canvas(self, canvas: Canvas, file: str) -> ImageCanvas:
pass
# Internal Cell
class CanvasScaleMixin:
def _calc_scale(
self,
width_canvas: int,
height_canvas: int,
width_img: float,
height_img: float
) -> float:
ratio_canvas = float(width_canvas) / height_canvas
ratio_img = float(width_img) / height_img
if ratio_img > ratio_canvas:
# wider then canvas, scale to canvas width
return width_canvas / width_img
# taller then canvas, scale to canvas height
return height_canvas / height_img
# Internal Cell
class ScaledImage(ImageCanvasPrototype, CanvasScaleMixin):
def prepare_canvas(self, canvas: Canvas, file: str) -> ImageCanvas:
image = Image.from_file(file)
width_img, height_img = get_image_size(file)
scale = self._calc_scale(
int(canvas.width),
int(canvas.height),
width_img,
height_img
)
image_width = width_img * min(1, scale)
image_height = height_img * min(1, scale)
return ImageCanvas(
image_widget=image,
x=0,
y=0,
width=image_width,
height=image_height,
scale=scale
)
# Internal Cell
class FitImage(ImageCanvasPrototype):
def prepare_canvas(self, canvas: Canvas, file: str) -> ImageCanvas:
image = Image.from_file(file)
return ImageCanvas(
image_widget=image,
x=0,
y=0,
width=canvas.width,
height=canvas.height,
scale=1
)
# Internal Cell
class ImageRenderer:
def __init__(
self,
clear: bool = False,
has_border: bool = False,
fit_canvas: bool = False
):
self.clear = clear
self.has_border = has_border
self.fit_canvas = fit_canvas
if fit_canvas:
self._strategy = FitImage() # type: ImageCanvasPrototype
else:
self._strategy = ScaledImage()
def render(self, canvas: Canvas, file: str) -> Tuple[int, int, float]:
with hold_canvas(canvas):
if self.clear:
canvas.clear()
image_canvas = self._strategy.prepare_canvas(canvas, file)
if self.has_border:
canvas.stroke_rect(x=0, y=0, width=image_canvas.width, height=image_canvas.height)
image_canvas.width -= 2
image_canvas.height -= 2
image_canvas.x, image_canvas.y = 1, 1
canvas.draw_image(
image_canvas.image_widget,
image_canvas.x,
image_canvas.y,
image_canvas.width,
image_canvas.height
)
return image_canvas.width, image_canvas.height, image_canvas.scale
# Cell
def points2bbox_coords(start_x, start_y, end_x, end_y) -> Dict[str, float]:
min_x, max_x = sorted((start_x, end_x))
min_y, max_y = sorted((start_y, end_y))
return {'x': min_x, 'y': min_y, 'width': max_x - min_x, 'height': max_y - min_y}
# Cell
def coords_scaled(bbox_coords: List[float], image_scale: float):
return [value * image_scale for value in bbox_coords]
# Internal Cell
class BBoxLayer(IntEnum):
bg = 0
image = 1
box = 2
highlight = 3
drawing = 4
# Internal Cell
class BBoxCanvasState(BaseState):
image_path: Optional[str]
bbox_coords: List[BboxCoordinate] = []
image_scale: float = 1
image_height: Optional[int] = None
image_width: Optional[int] = None
bbox_selected: Optional[int]
height: Optional[int]
width: Optional[int]
fit_canvas: bool = False
@root_validator
def set_height(cls, values):
if not values.get('image_height'):
values['image_height'] = values.get('height')
if not values.get('image_width'):
values['image_width'] = values.get('width')
return values
# Internal Cell
class BBoxCanvasGUI(HBox):
debug_output = Output(layout={'border': '1px solid black'})
def __init__(
self,
state: BBoxCanvasState,
has_border: bool = False,
drawing_enabled: bool = True
):
super().__init__()
self._state = state
self._start_point = ()
self.is_drawing = False
self.has_border = has_border
self.canvas_bbox_coords: Dict[str, Any] = {}
self.drawing_enabled = drawing_enabled
# do not stick bbox to borders
self.padding = 2
# Define each of the children...
self._image = Image(layout=Layout(display='flex',
justify_content='center',
align_items='center',
align_content='center',
overflow='hidden'))
if not drawing_enabled:
self.multi_canvas = MultiCanvas(
len(BBoxLayer),
width=self._state.width,
height=self._state.height
)
self.children = [VBox([self.multi_canvas])]
else:
self.multi_canvas = MultiCanvas(
len(BBoxLayer),
width=self._state.width,
height=self._state.height
)
self.im_name_box = Label()
children = [VBox([self.multi_canvas, self.im_name_box])]
self.children = children
draw_bg(self.multi_canvas[BBoxLayer.bg])
# link drawing events
self.multi_canvas[BBoxLayer.drawing].on_mouse_move(self._update_pos)
self.multi_canvas[BBoxLayer.drawing].on_mouse_down(self._start_drawing)
self.multi_canvas[BBoxLayer.drawing].on_mouse_up(self._stop_drawing)
@property
def highlight(self) -> BboxCoordinate:
return self._state.bbox_coords[self.bbox_selected]
@highlight.setter
def highlight(self, index: int):
self.clear_layer(BBoxLayer.highlight)
# unhighlight when double click
if self._state.bbox_coords and self._state.bbox_selected == index:
self._state.set_quietly('bbox_selected', None)
return
_bbox_coords = list(asdict(self._state.bbox_coords[index]).values())
_bbox_coords_scaled = coords_scaled(_bbox_coords,
self._state.image_scale)
bbox_coords = BboxCoordinate(*_bbox_coords_scaled)
draw_bounding_box(
self.multi_canvas[BBoxLayer.highlight],
bbox_coords,
stroke_color='black',
border_ratio=3,
color='yellow'
)
self._state.set_quietly('bbox_selected', index)
@debug_output.capture(clear_output=True)
def _update_pos(self, x, y):
# print(f"-> BBoxCanvasGUI::_update_post({x}, {y})")
if self.is_drawing:
self.canvas_bbox_coords = points2bbox_coords(*self._start_point, x, y)
self.draw_bbox(self.canvas_bbox_coords)
# bbox should not cross the canvas border:
if self._invalid_coords(x, y):
print(' !! Out of canvas border !!')
self._stop_drawing(x, y)
# print(f"<- BBoxCanvasGUI::_update_post({x}, {y})")
def _invalid_coords(self, x, y) -> bool:
return (
self.canvas_bbox_coords["x"] + self.canvas_bbox_coords["width"] >
self.multi_canvas.width - self.padding or
self.canvas_bbox_coords["y"] + self.canvas_bbox_coords["height"] >
self.multi_canvas.height - self.padding or
self.canvas_bbox_coords["x"] < self.padding or
self.canvas_bbox_coords["y"] < self.padding)
@debug_output.capture(clear_output=True)
def _stop_drawing(self, x, y):
# print(f"-> BBoxCanvasGUI::_stop_drawing({x}, {y})")
self.is_drawing = False
# if something is drawn
if self.canvas_bbox_coords:
# if bbox is not human visible, clean:
if (self.canvas_bbox_coords['width'] < 10 or
self.canvas_bbox_coords['height'] < 10):
self.clear_layer(BBoxLayer.drawing)
print(" !! too small bbox drawn !!")
else: # otherwise, save bbox values to backend
tmp_bbox_coords = deepcopy(self._state.bbox_coords)
tmp_bbox_coords.append(
BboxCoordinate(
**{k: v / self._state.image_scale for k, v in self.canvas_bbox_coords.items()} # noqa: E501
)
)
self._state.bbox_coords = tmp_bbox_coords
self.canvas_bbox_coords = {}
# print(f"<- BBoxCanvasGUI::_stop_drawing({x}, {y})")
def draw_bbox(self, canvas_bbox_coords: dict, color='white'):
# print('-> Observe canvas_coords: ', canvas_bbox_coords)
if not canvas_bbox_coords:
self.clear_layer(BBoxLayer.box)
self._state.bbox_coords = []
return
coords = BboxCoordinate(*canvas_bbox_coords.values())
draw_bounding_box(
self.multi_canvas[BBoxLayer.drawing],
coords,
color='white',
border_ratio=2,
clear=True
)
# print('<- Observe canvas_coords')
def clear_layer(self, layer: int):
self.multi_canvas[layer].clear()
@debug_output.capture(clear_output=True)
def _start_drawing(self, x, y):
# print(f"-> BBoxCanvasGUI::_start_drawing({x}, {y})")
self._start_point = (x, y)
self.is_drawing = True
# print(f"<- BBoxCanvasGUI::_start_drawing({x}, {y})")
# needed to support voila
# https://ipycanvas.readthedocs.io/en/latest/advanced.html#ipycanvas-in-voila
def observe_client_ready(self, cb=None):
self.multi_canvas.on_client_ready(cb)
# Internal Cell
class BBoxVideoCanvasGUI(BBoxCanvasGUI):
debug_output = Output(layout={'border': '1px solid black'})
def __init__(
self,
state: BBoxCanvasState,
has_border: bool = False,
drawing_enabled: bool = True
):
super().__init__(state, has_border, drawing_enabled)
@property
def highlight(self) -> BboxCoordinate:
return self._state.bbox_coords[self.bbox_selected]
@highlight.setter
def highlight(self, index: int):
self.clear_layer(BBoxLayer.highlight)
# unhighlight when double click
if self._state.bbox_coords and self._state.bbox_selected == index:
self._state.set_quietly('bbox_selected', None)
return
_bbox_coords = list(asdict(self._state.bbox_coords[index]).values())
_bbox_coords_scaled = coords_scaled(
_bbox_coords[:4], self._state.image_scale)
bbox_coords | |
index: int
:rtype: ndarray
"""
return self.__GetDataList()[index].Y
def GetXY(self, index: int) -> Tuple[ndarray, ndarray]:
"""Returns the data of spectrum for a specified index.
:type index: int
:rtype: Tuple[ndarray, ndarray]
"""
return self.__GetDataList()[index].XY
def GetBackground(self, index: int) -> ndarray:
"""Returns the background of spectrum for a specified index.
:type index: int
:rtype: ndarray
"""
return self.__GetDataList()[index].BackGround
def GetPeaks(self, index: int) -> PeakFunctionContainerList:
"""Returns the peaks of spectrum for a specified index.
:type index: int
:rtype: PeakFunctionContainerList
"""
return self.__GetDataList()[index].Peaks
def GetRecipe(self, index: int) -> Recipe:
"""Returns the recipe for a specified index.
:type index: int
:rtype: Recipe
"""
return self.__GetDataList()[index].Recipe
def GetSelectedRecipe(self) -> Recipe:
"""Returns the selected recipe.
:rtype: Recipe
"""
return deepcopy(self.__selected_recipe)
def SelectRecipe(self, recipe: Recipe):
"""Select a recipe.
:type recipe: Recipe
"""
if not isinstance(recipe, Recipe):
raise TypeError()
prev_recipe = self.__selected_recipe
self.__selected_recipe = recipe
event = RecipeSelectEvent(recipe, prev_recipe, id=self.__id)
self.__core_mgr.SendEvent(event)
def ApplyRecipe(self, recipe: Recipe, index_list: Iterable[int] = None):
"""Applies a recipe.
:type recipe: Recipe
:param index_list: Applies a recipe to the specified list of indexes. Defaults to None
:type index_list: Iterable[int], optional
"""
if not isinstance(recipe, Recipe):
raise TypeError()
data_list = self.__GetDataList()
index_list = self.GetSelection() if index_list is None else index_list
if len(index_list) == 0:
index_list = list(range(len(data_list)))
for index in index_list:
data = data_list[index]
data.Recipe = deepcopy(recipe)
event = DataContentsChangeEvent(index_list, data_list, recipe_changed_list=[True] * len(index_list), id=self.__id)
self.__core_mgr.SendEvent(event)
def GetMsg(self, index: int) -> str:
"""Returns the message for a specified index.
:type index: int
:rtype: str
"""
return self.__GetDataList()[index].Msg
def GetPath(self, index: int) -> str:
"""Returns the path for a specified index.
:type index: int
:rtype: str
"""
return self.__GetDataList()[index].Path
def GetSize(self, index: int) -> int:
"""Returns the size of spectrum for a specified index.
:type index: int
:rtype: int
"""
return self.__GetDataList()[index].GetSpectrumSize()
def GetDataSize(self) -> int:
"""Returns the size of data.
"""
return len(self.__GetDataList())
def GetMainSelection(self) -> int:
"""Return index of main selection.
:rtype: int
"""
return self.__main_selection[1]
def GetSelection(self) -> Tuple[int, ...]:
"""Returns a list of indexes for the selection.
:rtype: Tuple[int, ...]
"""
return tuple(self.__selection[1])
def Select(self, main_selection: Union[int, bool, None] = False, selection: Union[Iterable[int], bool, None] = False, operand: Optional[str] = None):
"""Change Data Selection.
:param main_selection: If the main selection is int, select it; if None, deselect the selected one; if False, no change.
:type main_selection: Union[int, False, None], optional
:param selection: If the selection is a list of int, change the selection according to the operand; if None, deselect the all selection; if False, no change.
:type selection: Union[Iterable[int], False, None], optional
:param operand: The possible choices are '|', '-', '^' and None. They represent the sum set, difference set, exclusive set, and no change, respectively. defaults to None
:type operand: Optional[str], optional
"""
if main_selection is False and selection is False:
raise ValueError()
if selection is False:
next_selection = self.__selection[1]
elif selection is None:
next_selection = set()
else:
selection = set(selection)
if operand is None:
next_selection = selection
elif operand == '|':
next_selection = self.__selection[1] | selection
elif operand == '-':
next_selection = self.__selection[1] - selection
elif operand == '^':
next_selection = self.__selection[1] ^ selection
else:
raise ValueError()
if main_selection is False:
next_main_selection = self.__main_selection[1]
else:
next_main_selection = main_selection
if next_main_selection is not None:
next_selection.add(main_selection)
self.__selection.append(next_selection)
self.__main_selection.append(next_main_selection)
event = DataSelectionChangeEvent(self.__main_selection[1], self.__main_selection[0], list(self.__selection[1]), list(self.__selection[0]))
self.__core_mgr.SendEvent(event)
def GetIndexList(self, data_list: Iterable[DataContainer]) -> List[int]:
"""Returns the index list corresponding to the data list.
:type data_list: Iterable[DataContainer]
:rtype: List[int]
"""
index_dict = {data: n for n, data in enumerate(self.__GetDataList())}
return [index_dict[data] for data in data_list]
def ExecuteSpectrumFunction(self, index_list: Optional[Iterable[int]] = None):
"""Executes the recipe provided for the data specified in the index list.
:param index_list: If index_list is None, it will convert to all selections. Defaults to None
:type index_list: Optional[Iterable[int]], optional
:raises ValueError: [description]
"""
data_list = self.__GetDataList()
index_list = list(range(self.GetDataSize())) if index_list is None else index_list
x_changed_list = []
y_changed_list = []
bg_changed_list = []
peaks_changed_list = []
msg_changed_list = []
for index in index_list:
data = data_list[index]
x_changed_list.append(False)
y_changed_list.append(False)
bg_changed_list.append(False)
peaks_changed_list.append(False)
msg_changed_list.append(False)
new_success_list = deepcopy(data.SuccessList)
new_recipe = deepcopy(data.Recipe)
for n in range(len(new_recipe)):
if new_success_list[n]:
continue
func_container = new_recipe[n]
x, y = data.XY
bg = data.BackGround
peaks = data.Peaks
try:
params = func_container.Execution(x, y, bg, peaks)
new_success_list[n] = True
msg = f'{str(func_container)} is successful in the execution.'
msg_changed_list[-1] = True
except Exception as e:
params = []
new_success_list[n] = False
msg = '\n'.join(e.args)
msg_changed_list[-1] = True
LogError(msg)
break
for param, return_param in zip(params, func_container.SendReturnParams()):
if return_param == 'x':
x = param
x_changed_list[-1] = True
elif return_param == 'y':
y = param
y_changed_list[-1] = True
elif return_param == 'b':
bg = param
bg_changed_list[-1] = True
elif return_param == 'p':
if isinstance(param, PeakFunctionContainerBase):
param = PeakFunctionContainerList([param])
peaks = param
peaks_changed_list[-1] = True
else:
raise ValueError()
data.Append(Spectrum(x, y, bg, peaks), new_recipe, new_success_list, msg)
index_list = self.GetIndexList(data_list)
event = DataContentsChangeEvent(index_list, data_list, x_changed_list, y_changed_list, bg_changed_list, peaks_changed_list, [True] * len(index_list), msg_changed_list, id=self.__id)
self.__core_mgr.SendEvent(event)
def OnEvent(self, event):
event.Skip()
if event.GetId() == self.__id:
return
event_type = event.GetEventType()
if event_type == wxEVT_DATA_SELECTION_CHANGE:
prv_main_selection = event.GetPreviousMainSelection()
crt_main_selection = event.GetMainSelection()
self.__main_selection.append(prv_main_selection)
self.__main_selection.append(crt_main_selection)
prv_selection = event.GetPreviousSelection()
crt_selection = event.GetSelection()
self.__selection.append(set(prv_selection))
self.__selection.append(set(crt_selection))
elif event_type == wxEVT_DATA_CONTENTS_CHANGE:
index_list = event.GetIndexList()
data_list = event.GetDataList()
project = self.__GetProject()
project.SetDataList(data_list, index_list)
elif event_type == wxEVT_RECIPE_SELECT:
recipe = event.GetRecipe()
self.__selected_recipe = recipe
class SpectrumManager(Singleton):
"""Manager for drawing spectra
"""
def __init__(self, *args, **kw):
"""Default constructor
"""
super().__init__()
self.__core_mgr = kw['core_manager']
self.__id = NewIdRef()
self.__focus_panel = None
def Draw(self, need_bg=True, need_peaks=False, multi_draw_alpha=0.0):
"""Draws a spectrum.
:param need_bg: Whether the background needs to be drawn or not. Defaults to True
:type need_bg: bool, optional
:param need_peaks: Whether the peak needs to be drawn or not, defaults to False
:type need_peaks: bool, optional
:param multi_draw_alpha: Transparency of the drawing of spectra selected other than the main selection. Defaults to 0.0
:type multi_draw_alpha: float, optional
"""
spectrum_panel = self.__core_mgr.Get(PANEL_MANAGER).GetPanel(SPECTRUM_PANEL)
if spectrum_panel is None:
print('set spectrum panel first!!')
return
spectrum_panel.Clear()
data_mgr = self.__core_mgr.Get(DATA_MANAGER)
if len(data_mgr.GetSelection()) == 0:
spectrum_panel.canvas.draw()
return
main_selection = data_mgr.GetMainSelection()
selection = data_mgr.GetSelection()
line_list = []
bg_line_list = []
if main_selection is not None:
x, y = data_mgr.GetXY(main_selection)
line_list.append(Line2D(x, y, ls='', marker='.', ms=3, c='gray'))
if need_bg:
bg = data_mgr.GetBackground(main_selection)
if len(bg) != 0:
bg_line_list.append(Line2D(x, bg, ls='', marker='.', ms=3, c='gray'))
if need_peaks:
for peak in data_mgr.GetPeaks(main_selection):
p_v = peak.Execution(x)
spectrum_panel.main_ax.add_line(Line2D(x, p_v, c='orange'))
if multi_draw_alpha > 0:
for i in selection:
if i == main_selection:
continue
x, y = data_mgr.GetXY(i)
line_list.append(Line2D(x, y, alpha=multi_draw_alpha, ls='', marker='.', ms=3, c='gray'))
if need_bg:
bg = data_mgr.GetBackground(i)
line_list.append(Line2D(x, y, alpha=multi_draw_alpha, ls='', marker='.', ms=3, c='gray'))
for line in line_list:
spectrum_panel.main_ax.add_line(line)
if need_bg:
for line in bg_line_list:
spectrum_panel.bg_ax.add_line(line)
spectrum_panel.main_ax.autoscale()
spectrum_panel.bg_ax.autoscale()
spectrum_panel.canvas.draw()
def OnEvent(self, event):
event.Skip()
if event.GetId() == self.__id:
return
event_type = event.GetEventType()
if event_type == wxEVT_PANEL_SELECTION_CHANGE:
panel = event.GetPanel()
if panel is not None and panel.NeedDraw():
self.__focus_panel = panel
if event_type in [wxEVT_DATA_SELECTION_CHANGE, wxEVT_DATA_CONTENTS_CHANGE, wxEVT_PANEL_SELECTION_CHANGE, wxEVT_PANEL_VIEW]:
if self.__focus_panel is None or not self.__focus_panel.NeedDraw():
self.Draw()
else:
need_bg = self.__focus_panel.NeedBackgroundDraw()
need_peaks = self.__focus_panel.NeedPeaksDraw()
multi_alpha = self.__focus_panel.NeedMultiDraw()
self.Draw(need_bg, need_peaks, multi_alpha)
class EncodeManager(Singleton):
"""Manages parameters related to encode.
"""
def __init__(self, *args, **kw):
"""Default constructor
"""
super().__init__()
self.__core_mgr = kw['core_manager']
self.__io_mgr = kw['io_manager']
self.__id = NewIdRef()
self.__encoding = None
self.__delimiter = None
def GetEncoding(self) -> ChoiceContainer:
"""Get the encoding to use when reading the file. This value is deepcopied.
:rtype: ChoiceContainer
"""
return deepcopy(self.__encoding)
def SelectEncoding(self, encoding: Union[str, ChoiceContainer]):
"""Select the encoding to be used when reading the file.
:param encoding: type of encoding or instance of ChoiceContainer for encoding.
:type encoding: Union[str, ChoiceContainer]
"""
if isinstance(encoding, str):
encoding_type = encoding
encoding = deepcopy(self.__encoding)
encoding.SetValue(encoding_type)
if not isinstance(encoding, ChoiceContainer):
raise TypeError()
prev_encoding = self.__encoding
self.__encoding = encoding
event = EncodeEvent(encoding=encoding, previous_encoding=prev_encoding, id=self.__id)
self.__core_mgr.SendEvent(event)
def GetDelimiter(self) -> ChoiceContainer:
"""Delimiter used to read files. This value is deepcopied.
:rtype: ChoiceContainer
"""
return deepcopy(self.__delimiter)
def SelectDelimiter(self, delimiter: Union[str, ChoiceContainer]):
"""Select the delimiter to be used when reading the file.
:param delimiter: type of delimiter or instance of ChoiceContainer for delimiter.
:type delimiter: Union[str, ChoiceContainer]
| |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates random traffic for SUMO simulation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
from absl import logging
import numpy as np
import sumolib
from simulation_research.traffic import file_util
from simulation_research.traffic import map_visualizer
TimeSamplerGammaKTheta = collections.namedtuple(
'TimeSampleGammaKTheta', ['k', 'theta'])
TimeSamplerGammaAlphaBeta = collections.namedtuple(
'TimeSampleGammaAlphaBeta', ['alpha', 'beta'])
TimeSamplerGammaMeanStd = collections.namedtuple(
'TimeSampleGammaMeanStd', ['mean', 'std'])
DemandsWithPath = collections.namedtuple(
'DemandsWithPath', ['time', 'origin', 'destination', 'num_cars', 'route'])
DemandsWithTAZExit = collections.namedtuple(
'DemandsWithTAZExit', ['time', 'origin', 'num_cars'])
FREEWAY_EDGE_TYPES = ['highway.motorway', 'highway.motorway_carpool']
ARTERIAL_EDGE_TYPES = ['highway.motorway_link', 'highway.primary',
'highway.primary_link', 'highway.secondary',
'highway.secondary_link', 'highway.trunk',
'highway.trunk_link']
class RandomTrafficGenerator(object):
"""Generates random traffic using inhomogeneous Poisson models."""
def __init__(self, sumo_net=None):
self._net = sumo_net
self._map_visualizer = map_visualizer.MapVisualizer(sumo_net)
def get_freeway_input_output(self, figure_path=None):
"""Gets freeway inputs and outputs of the map.
Args:
figure_path: The figure path for the freeway and input/output edges. If it
is set as None, then no output figure.
Returns:
input_output_pairs: A list of input-output pairs.
"""
freeway_edges = self._net.filterEdges(FREEWAY_EDGE_TYPES)
(freeway_input_edges,
freeway_output_edges) = self._net.getInputOutputEdges(freeway_edges)
if figure_path is not None:
self._map_visualizer.plot_edges(
[(freeway_edges, 'darkgreen', 1),
(freeway_input_edges, 'lime', 1),
(freeway_output_edges, 'red', 1)],
output_figure_path=figure_path)
input_output_pairs = list(itertools.product(freeway_input_edges,
freeway_output_edges))
return input_output_pairs
def get_arterial_input_output(self, figure_path=None):
"""Gets freeway inputs and outputs of the map.
Args:
figure_path: The figure path for the arterial roads and input/output
edges. It it is set as None, then no output figure.
Returns:
input_output_pairs: A list of input-output pairs.
"""
freeway_edges = self._net.filterEdges(FREEWAY_EDGE_TYPES)
(freeway_input_edges,
freeway_output_edges) = self._net.getInputOutputEdges(freeway_edges)
arterial_edges = self._net.filterEdges(ARTERIAL_EDGE_TYPES)
(arterial_input_edges,
arterial_output_edges) = self._net.getInputOutputEdges(arterial_edges)
if figure_path is not None:
self._map_visualizer.plot_edges(
[(freeway_edges, 'darkgreen', 1),
(freeway_input_edges, 'lime', 1),
(freeway_output_edges, 'red', 1),
(arterial_edges, 'b', 0.5),
(arterial_input_edges, 'lime', 0.9),
(arterial_output_edges, 'red', 0.9)],
output_figure_path=figure_path)
# Creates input-->output pairs.
input_output_pairs = (
list(itertools.product(freeway_input_edges, arterial_output_edges)) +
list(itertools.product(arterial_input_edges, freeway_output_edges)) +
list(itertools.product(arterial_input_edges, arterial_output_edges)))
return input_output_pairs
def setup_shortest_routes(self,
input_output_pairs,
edge_type_list=None,
vehicle_type_list='passenger',
routes_file=None,
figures_folder=None):
"""Generates the routes on freeways only.
Args:
input_output_pairs: Input-->output pairs of edges.
edge_type_list: Restrict the type of edges.
vehicle_type_list: Restrict the type of vehicles.
routes_file: The name of the output route file.
figures_folder: Whether to create figures for the routes. If it is set as
None, then no output figures. Since there chould be many figures for
all the routes, individual figures are named, for example
"edgeidfrom_edgeidto_route.pdf", automatically.
Returns:
A list of routes. Each entry has `edge_from`, `edge_to`, `path_edges`,
`edges_ids`, `route_length`, `route_id`.
"""
routes = []
route_counter = 0
for from_to_pair in input_output_pairs:
valid_path = False
edge_from, edge_to = from_to_pair
path_edges, route_length = self._net.getRestrictedShortestPath(
edge_from,
edge_to,
vehicleClass=vehicle_type_list,
edgeType=edge_type_list)
# Regardless of whether there is a path between them.
if figures_folder is not None:
selected_edges = [([edge_from], 'lime', 1), ([edge_to], 'red', 1)]
if route_length < float('inf'):
valid_path = True
route_counter += 1
edges_ids = [edge.getID() for edge in path_edges]
edges_ids = ' '.join(edges_ids)
route_id = (edge_from.getID() + '_to_' +
edge_to.getID() + '_' + str(route_counter))
token = ' <route id="%s" edges="%s"/>' % (route_id, edges_ids)
if routes_file:
file_util.append_line_to_file(routes_file, token)
route = {}
route['edge_from'] = edge_from
route['edge_to'] = edge_to
route['path_edges'] = path_edges
route['edges_ids'] = edges_ids
route['route_length'] = route_length
route['route_id'] = route_id
routes.append(route)
if figures_folder is not None and valid_path:
selected_edges = [(path_edges, 'darkblue', 1)] + selected_edges
figure_path = os.path.join(
figures_folder,
(edge_from.getID() + '_' + edge_to.getID() + '_route.pdf'))
self._map_visualizer.plot_edges(
selected_edges, output_figure_path=figure_path)
if routes_file:
file_util.append_line_to_file(routes_file, '')
return routes
def generate_incomplete_routes_flow(self,
time_point,
time_step_size,
incomplete_routes_demands,
routes_file):
"""Generates incomplete routes.
All traffic flow should be sorted by the departure time.
Args:
time_point: the time point for scheduled demands.
time_step_size: time step size for the demands.
incomplete_routes_demands: incomplete_routes_demands =
[('700010432', '706447588#1', 1.2), ('700010432', '5018655', 1)].
routes_file: output demand file.
"""
for edge_from, edge_to, rate in incomplete_routes_demands:
flow_id = edge_from + '_to_' + edge_to + '_' + str(time_point)
num_cars = np.random.poisson(time_step_size * rate, 1)
token = (' <flow id="%s" begin="%d" end="%d" number="%d" ' %
(flow_id, time_point, time_point + time_step_size, num_cars))
token += ('from="%s" to="%s" ' % (edge_from, edge_to))
token += 'departPos="base" departLane="best" departSpeed="max"/>'
if routes_file:
file_util.append_line_to_file(routes_file, token)
def generate_routes_flow(self,
time_point,
time_step_size,
routes,
routes_demands,
routes_file):
"""Generates traffic according to the demand rates.
Args:
time_point: The timestamp of the traffic.
time_step_size: Time step size.
routes: A list of routes.
routes_demands: A list of route indices and corresponding demand rates.
routes_file: Output file.
"""
for route_index, rate in routes_demands:
route_id = routes[route_index]['route_id']
flow_id = route_id + '_' + str(time_point)
num_cars = np.random.poisson(time_step_size*rate, 1)
token = (' <flow id="%s" begin="%d" end="%d" number="%d" ' %
(flow_id, time_point, time_point + time_step_size, num_cars,))
token += ('route="%s" ' % (route_id))
token += 'departPos="base" departLane="best" departSpeed="max"/>'
if routes_file:
file_util.append_line_to_file(routes_file, token)
@classmethod
def generate_departure_time(cls, parameters, sample_size):
"""Generates random time points for trips.
Args:
parameters: This is in a `collections.namedtuple`. The components can be
different depends on the distribution.
1. `parameters.distribution` == 'gamma_k_theta', `k`, `theta`, see
definition in https://en.wikipedia.org/wiki/Gamma_distribution.
`k` is called shape, and `theta` is called scale.
2. `parameters.distribution` == `gamma_alpha_beta`, `alpha`, `beta`,
see definition https://en.wikipedia.org/wiki/Gamma_distribution.
`alpha` is called shape, and `beta` is called rate. Equivalently,
`k` = `alpha`, `theta` = 1 / `beta`.
3. `parameters.distribution` == 'gamma_mean_std', `mean`, `std`, the
corresponding `k`, `theta` will be computed via the `mean` and the
`std`. It is more straightforward to use this pair of
parameters.
sample_size: Size of random samples.
Returns:
Random time point samples.
"""
if isinstance(parameters, TimeSamplerGammaKTheta):
# Mean = shape * scale. Variance = shape * scale * scale.
# Mode = (shape - 1) * scale.
return np.random.gamma(parameters.k, parameters.theta, sample_size)
elif isinstance(parameters, TimeSamplerGammaAlphaBeta):
k = parameters.alpha
theta = 1 / parameters.beta
return np.random.gamma(k, theta, sample_size)
elif isinstance(parameters, TimeSamplerGammaMeanStd):
mean, std, = parameters.mean, parameters.std
k = (mean / std) ** 2
theta = mean / k
return np.random.gamma(k, theta, sample_size)
else:
raise ValueError('Unknown trip time distribution.')
@classmethod
def create_evacuation_shortest_path_demands(
cls,
edges,
departure_time_distribution_parameters,
cars_per_meter,
evacuation_edges,
evacuation_path_trees,
evacuation_path_length):
"""Creates car demands for a group of edges.
The generator has the following assumptions:
1. On average there are H houses on a unit length residential edge, and
the houses are uniformly distributed.
2. On average there are C cars for each house, and all houses have cars.
3. To simplify the model, we assume the cars on the same edge leave at the
same time.
If an edge has length L, it will generate floor(L * H * C) cars. Thus
the input constant `cars_per_meter_residential` := L * H * C. This
constant can also be estimated using total number of cars of the city
divided by total length of the residential roads. Same calculation for the
roads in the parking area.
Args:
edges: Input edges. It has to be in sumolib.net.edge.Edge type, since
we need edge information from that.
departure_time_distribution_parameters: The time distribution parameters
for the vehicle departures. See `generate_departure_time` function
for more details.
cars_per_meter: Number of cars need to be generated from a unit length of
road.
evacuation_edges: The exits of the evacuation plan.
evacuation_path_trees: The shortest path tree from all roads to each exit.
This function assumes the vehicles choose the closest exit.
evacuation_path_length: The corresponding path length for the shortest
path trees above.
Returns:
zipped: The demands are zipped in _DemandsWithPath. Each tuple has entries
1. departure time point, 2. departure road, 3. destination
(one of the evacuation exits), 4. number of cars leaving from that
road, 5. evacuation path
"""
if isinstance(edges, sumolib.net.edge.Edge):
edges = [edges]
origin_edges = []
num_cars_per_edge = []
evacuation_destination = []
evacuation_route = []
# Calculates the number of cars on each edge.
for edge in edges:
if not isinstance(edge, sumolib.net.edge.Edge):
raise ValueError('Edge has to be type sumolib.net.edge.Edge.')
num_cars = int(np.floor(edge.getLength() * cars_per_meter))
origin_edges.append(edge.getID())
# 0 car is acceptable, it will be discarded later when the file is
| |
<filename>NC_optical_properties_v_alt_variable_coating.py<gh_stars>1-10
from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
cloud_droplet_conc = 0.5
wavelength = 550 #nm
rBC_RI = complex(2.26,1.26)
min_coat = 0 #assumed minimum coating thickness for particles with LEO failure or outside of detection range = 0
max_coat = 100 #assumed maximum coating thickness for particles with LEO failure or outside of detection range = 100
savefig = False
show_distr_plots = False
#alt parameters
min_alt = 0
max_alt =6000
alt_incr = 1000#800
#distr parameters
min_BC_VED = 80
max_BC_VED = 220
bin_incr = 10
flight_times = {
#'science 1' : [datetime(2015,4,5,9,43),datetime(2015,4,5,13,48),15.6500, 78.2200, 'Longyearbyen (sc1)'] , #longyearbyen
#'ferry 1' : [datetime(2015,4,6,9,0),datetime(2015,4,6,11,0),15.6500, 78.2200] ,
#'ferry 2' : [datetime(2015,4,6,15,0),datetime(2015,4,6,18,0),-16.6667, 81.6000] ,
'science 2' : [datetime(2015,4,7,16,31),datetime(2015,4,7,20,48),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 3' : [datetime(2015,4,8,13,51),datetime(2015,4,8,16,43),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 4' : [datetime(2015,4,8,17,53),datetime(2015,4,8,21,22),-70.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 5' : [datetime(2015,4,9,13,50),datetime(2015,4,9,17,47),-62.338, 82.0,'Alert (sc2-5)'] , #Alert
##'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),-75.338, 81] ,
'science 6' : [datetime(2015,4,11,15,57),datetime(2015,4,11,21,16),-90.9408, 80.5,'Eureka (sc6-7)'] , #eureka
'science 7' : [datetime(2015,4,13,15,14),datetime(2015,4,13,20,52),-95, 80.1,'Eureka (sc6-7)'] , #eureka
#'science 8' : [datetime(2015,4,20,15,49),datetime(2015,4,20,19,49),-133.7306, 67.1,'Inuvik (sc8-10)'], #inuvik
#'science 9' : [datetime(2015,4,20,21,46),datetime(2015,4,21,1,36),-133.7306, 69.3617,'Inuvik (sc8-10)'] , #inuvik
#'science 10' : [datetime(2015,4,21,16,07),datetime(2015,4,21,21,24),-131, 69.55,'Inuvik (sc8-10)'], #inuvik
}
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#define methods
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def MieCalc(wavelength,core_dia,coat_th):
mie = Mie()
wl = wavelength
core_rad = core_dia/2 #nm
shell_thickness = coat_th
size_par = 2*math.pi*core_rad*1/wl
#Refractive indices PSL 1.59-0.0i rBC 2.26- 1.26i shell 1.5-0.0i
core_RI = rBC_RI
shell_rad = core_rad + shell_thickness
shell_RI = complex(1.5,0.0)
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
abs = mie.qabs()
abs_xs_nm2 = abs*math.pi*shell_rad**2 #in nm^2
abs_xs = abs_xs_nm2*1e-14 #in cm^2
sca = mie.qsca()
sca_xs_nm2 = sca*math.pi*shell_rad**2 #in nm^2
sca_xs = sca_xs_nm2*1e-14 #in cm^2
ext_xs = sca_xs+abs_xs
return [abs_xs,sca_xs,ext_xs]
def find_dg(A, w, xc):
fit_vals = {}
for bin_val in range (35,1000,1):
fit_val = lognorm(bin_val, A, w, xc)
fit_vals[bin_val] = fit_val
return max(fit_vals.iterkeys(), key=(lambda key: fit_vals[key]))
def fraction_sampled(A, w, xc):
fit_vals = []
fit_vals_m = []
for bin_val in range (35,1000,1):
fit_val = lognorm(bin_val, A, w, xc)
fit_vals.append(fit_val)
full_distr = np.sum(fit_vals)
for bin_val in range (min_BC_VED,max_BC_VED,1):
fit_val_m = lognorm(bin_val, A, w, xc)
fit_vals_m.append(fit_val_m)
sampled_distr = np.sum(fit_vals_m)
return sampled_distr/full_distr
def sampling_time_at_alt(start_time,end_time,min_alt,max_alt):
cursor.execute(('''SELECT ftd.UNIX_UTC_ts, ftd.alt
FROM polar6_flight_track_details ftd
JOIN polar6_fssp_cloud_data fssp on ftd.fssd_id = fssp.id
WHERE ftd.UNIX_UTC_ts >= %s and ftd.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s ORDER BY ftd.UNIX_UTC_ts'''),
(start_time,end_time,cloud_droplet_conc,min_alt,max_alt))
alt_data = cursor.fetchall()
first_line = True
temp_list = []
interval_list = []
for line in alt_data:
current_ts = line[0]
alt = line[1]
if first_line == True:
prior_ts = current_ts
first_line = False
if (current_ts - prior_ts) <= 1:
temp_list.append(current_ts)
prior_ts = current_ts
else:
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
temp_list = []
temp_list.append(current_ts)
prior_ts = current_ts
#add in last interval
if len(temp_list):
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
total_sampling_time = np.sum(interval_list)
else:
total_sampling_time = 0
return total_sampling_time
def assign_min_max_coat(VED):
Dp_Dc = np.nan
optical_properties_max_coat = MieCalc(wavelength,VED,max_coat)
abs_xsec_max_coat = optical_properties_max_coat[0]
sca_xsec_max_coat = optical_properties_max_coat[1]
optical_properties_min_coat = MieCalc(wavelength,VED,min_coat)
abs_xsec_min_coat = optical_properties_min_coat[0]
sca_xsec_min_coat = optical_properties_min_coat[1]
return [Dp_Dc,abs_xsec_max_coat,sca_xsec_max_coat,abs_xsec_min_coat,sca_xsec_min_coat]
def assemble_bin_data(retrieved_records):
#set up data structure
LEO_successes = 0
LEO_failures = 0
bin_data = {
'mass':[],
'Dp_Dc':[],
'STP_correction_factor':[],
'sample_flow':[],
'abs_xsec_max_coat':[],
'sca_xsec_max_coat':[],
'abs_xsec_min_coat':[],
'sca_xsec_min_coat':[],
'abs_xsec_bare':[],
}
#parse each row in results
for row in retrieved_records:
mass = row[0]
coat = row[1]
LEO_amp = row[2]
sample_flow = row[3]
temperature = row[4] + 273.15 #convert to Kelvin
pressure = row[5]
VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
STP_correction_factor = (101325/pressure)*(temperature/273.15)
#succesful LEO fitting and pos coating
if (0 <= LEO_amp < 45000):
LEO_successes += 1
if coat >0:
optical_properties = MieCalc(wavelength,VED,coat)
if 160 <= bin < 180:
Dp_Dc = (VED+2*coat)/VED
else:
Dp_Dc = np.nan
opt_results = [Dp_Dc,optical_properties[0],optical_properties[1],optical_properties[0],optical_properties[1]] #[Dp_Dc,abs_xsec_max_coat,sca_xsec_max_coat,abs_xsec_min_coat,sca_xsec_min_coat]
else:
opt_results = assign_min_max_coat(VED)
#failed LEO fitting or neg coating, we calc a max and min case for these
else:
LEO_failures += 1
opt_results = assign_min_max_coat(VED)
bare_optical_properties = MieCalc(wavelength,VED,0.0)
abs_xsec_bare = bare_optical_properties[0]
bin_data['mass'].append(mass)
bin_data['Dp_Dc'].append(opt_results[0])
bin_data['STP_correction_factor'].append(STP_correction_factor)
bin_data['sample_flow'].append(sample_flow)
bin_data['abs_xsec_max_coat'].append(opt_results[1])
bin_data['sca_xsec_max_coat'].append(opt_results[2])
bin_data['abs_xsec_min_coat'].append(opt_results[3])
bin_data['sca_xsec_min_coat'].append(opt_results[4])
bin_data['abs_xsec_bare'].append(abs_xsec_bare)
bin_data_list = [bin_data,LEO_successes,LEO_failures]
return bin_data_list
def calc_bin_optical_properties(bin_start, binning_incr, bin_data_list,binned_data):
bin_data = bin_data_list[0]
LEO_successes = bin_data_list[1]
LEO_failures = bin_data_list[2]
bin_mid = bin_start + (binning_incr/2)
total_mass = np.sum(bin_data['mass']) #in fg
mean_sample_flow = np.nanmean(bin_data['sample_flow']) #in cm2/min
mean_STP_correction_factor = np.nanmean(bin_data['STP_correction_factor']) #no units
total_samping_time = sampling_time_at_alt(UNIX_start_time,UNIX_end_time,lower_alt,(lower_alt + alt_incr))
total_vol = mean_sample_flow*total_samping_time/60 #factor of 60 to convert minutes to secs, result is in cc
mass_conc = (total_mass/total_vol)*mean_STP_correction_factor #in fg/cm3
numb_conc = (len(bin_data['mass'])/total_vol)*mean_STP_correction_factor #in #/cm3
bin_mass_conc_norm = mass_conc/(math.log((bin_start+binning_incr))-math.log(bin_start)) #normalize mass
bin_numb_conc_norm = numb_conc/(math.log((bin_start+binning_incr))-math.log(bin_start)) #normalize number
mean_Dp_Dc = np.nanmean(bin_data['Dp_Dc'])
bin_vol_abs_coeff_max = np.nanmean(bin_data['abs_xsec_max_coat']) * bin_numb_conc_norm #in cm-1 (cm2 * /cm3)
bin_vol_sca_coeff_max = np.nanmean(bin_data['sca_xsec_max_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_min = np.nanmean(bin_data['abs_xsec_min_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_sca_coeff_min = np.nanmean(bin_data['sca_xsec_min_coat']) * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_bare = np.nanmean(bin_data['abs_xsec_bare']) * bin_numb_conc_norm #in cm-1 - need to calc absorption enhancement
fraction_successful = LEO_successes*1.0/(LEO_successes+LEO_failures)
binned_data[bin_mid] = {
'bin_mass_conc':mass_conc,
'bin_numb_conc':numb_conc,
'bin_Dp_Dc':mean_Dp_Dc,
'bin_mass_conc_norm': bin_mass_conc_norm ,
'bin_numb_conc_norm': bin_numb_conc_norm ,
'bin_vol_abs_coeff_max': bin_vol_abs_coeff_max,
'bin_vol_sca_coeff_max': bin_vol_sca_coeff_max,
'bin_vol_abs_coeff_min': bin_vol_abs_coeff_min,
'bin_vol_sca_coeff_min': bin_vol_sca_coeff_min,
'bin_vol_abs_coeff_bare':bin_vol_abs_coeff_bare,
'fraction_successful': fraction_successful,
}
return binned_data
def fit_distrs(binned_data_dict,bin_increment):
#create bin and step size for extrapolating to the full distr
fit_bins = []
for x in range(50,1000,bin_increment):
fit_bins.append(x)
fit_concs = {}
bin_midpoints = binned_data_dict.keys()
number_concs_norm = []
mass_concs_norm = []
LEO_fractions = []
#fit the number binned data so we can extrapolate outside of the detection range
for key in bin_midpoints:
number_concs_norm.append(binned_data_dict[key]['bin_numb_conc_norm'])
mass_concs_norm.append(binned_data_dict[key]['bin_mass_conc_norm'])
LEO_fractions.append(binned_data_dict[key]['fraction_successful'])
try:
popt, pcov = curve_fit(lognorm, np.array(bin_midpoints), np.array(number_concs_norm))
integrated_number = 0
for bin_val in fit_bins:
fit_number_val = lognorm(bin_val, popt[0], popt[1], popt[2])
fit_concs[bin_val] = [fit_number_val]
un_normed_numb = fit_number_val*(math.log((bin_val+bin_increment/2))-math.log(bin_val-bin_increment/2))
integrated_number = integrated_number + un_normed_numb
Dg_number = find_dg(popt[0], popt[1], popt[2])
sigma_number = math.exp(popt[1])
print Dg_number
except Exception,e:
integrated_number = np.nan
for bin_val in fit_bins:
fit_concs[bin_val]= [np.nan]
print str(e)
print 'number fit failure'
#fit the mass binned data so we can extrapolate outside of the detection range
try:
popt, pcov = curve_fit(lognorm, np.array(bin_midpoints), np.array(mass_concs_norm))
for bin_val in fit_bins:
fit_mass_val = lognorm(bin_val, popt[0], popt[1], popt[2])
fit_concs[bin_val].append(fit_mass_val)
Dg_mass_result = find_dg(popt[0], popt[1], popt[2])
fraction_mass_meas = fraction_sampled(popt[0], popt[1], popt[2])
sigma_mass_result = math.exp(popt[1])
print Dg_mass_result
except Exception,e:
Dg_mass_result = np.nan
sigma_mass_result = np.nan
fraction_mass_meas = np.nan
for bin_val in fit_bins:
fit_concs[bin_val].append(np.nan)
print str(e)
print 'mass fit failure'
fitted_data = []
for key,val in fit_concs.iteritems():
fitted_data.append([key, val[0],val[1]])
fitted_data.sort()
return [fitted_data,Dg_mass_result,sigma_mass_result,fraction_mass_meas,integrated_number]
def plot_distrs(fitted_concs,binned_data_results):
#####plotting distrs if desired
#fitted data
fitted_bin_mids = [row[0] for row in fitted_concs]
fit_binned_number_conc_vals = [row[1] for row in fitted_concs]
fit_binned_mass_conc_vals = [row[2] for row in fitted_concs]
#Leo successful fraction data
LEO_pts = []
binned_distrs = []
for bin_midpt in binned_data_results:
binned_distrs.append([bin_midpt,binned_data_results[bin_midpt]['bin_numb_conc_norm'],binned_data_results[bin_midpt]['bin_mass_conc_norm'],binned_data_results[bin_midpt]['fraction_successful']])
LEO_fraction = binned_data_results[bin_midpt]['fraction_successful']
if LEO_fraction > 0.97:
LEO_pts.append(bin_midpt)
LEO_cutoff = min(LEO_pts or [np.nan])
bin_midpt = [row[0] for row in binned_distrs]
number_concs_norm = [row[1] for row in binned_distrs]
mass_concs_norm = [row[2] for row in binned_distrs]
LEO_fractions = [row[3] for row in binned_distrs]
#plots
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(bin_midpt,number_concs_norm, color = 'k',marker='o')
ax1.plot(fitted_bin_mids,fit_binned_number_conc_vals, color = 'k',marker=None, label = 'number')
ax1.scatter(bin_midpt,mass_concs_norm, color = 'b',marker='o')
ax1.plot(fitted_bin_mids,fit_binned_mass_conc_vals, color = 'b',marker=None, label = 'mass')
ax1.set_xscale('log')
ax1.set_xlabel('rBC core VED (nm)')
ax1.set_ylabel('d/dlog(VED)')
ax1.set_ylim(0,35)
ax1.set_xlim(40,700)
plt.legend()
ax2 = ax1.twinx()
ax2.scatter(bin_midpt,LEO_fractions, color = 'r',marker='s')
ax2.set_ylim(0,1)
ax2.set_xlim(40,700)
ax2.set_ylabel('fraction successful LEO fits', color='r')
ax2.axvspan(min_BC_VED, LEO_cutoff, alpha=0.15, color='yellow')
ax2.axvspan(LEO_cutoff, max_BC_VED, alpha=0.15, color='green')
ax2.fill([160,180,180,160],[0,0,1,1], fill=False, hatch='\\',color ='grey')
ax2.fill([130,220,220,130],[0,0,1,1], fill=False, hatch='//',color ='grey')
ax2.axvspan(35, min_BC_VED, alpha=0.15, color='grey')
ax2.axvspan(max_BC_VED, 1000, alpha=0.15, color='grey')
ax2.set_xticks([40,50,60,80,100,150,200,300,400,600])
ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.show()
def add_vals_outside_range(fit_concs,binned_data_dict):
for row in fit_concs:
bin_mid = row[0]
if bin_mid > max_BC_VED or bin_mid < min_BC_VED:
bin_mass_conc_norm = row[2]
bin_numb_conc_norm = row[1]
fitted_optical_properties_max = MieCalc(wavelength,bin_mid,max_coat) #resturns [abs xsec, sca xsec, ext xsec]
fitted_optical_properties_min = MieCalc(wavelength,bin_mid,min_coat)
fitted_optical_properties_bare = MieCalc(wavelength,bin_mid,0.0)
bin_vol_abs_coeff_max = fitted_optical_properties_max[0] * bin_numb_conc_norm #in cm-1 (cm2 * /cm3)
bin_vol_sca_coeff_max = fitted_optical_properties_max[1] * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_min = fitted_optical_properties_min[0] * bin_numb_conc_norm #in cm-1
bin_vol_sca_coeff_min = fitted_optical_properties_min[1] * bin_numb_conc_norm #in cm-1
bin_vol_abs_coeff_bare = fitted_optical_properties_bare[0] * bin_numb_conc_norm #in cm-1
binned_data_dict[bin_mid] = {
'bin_mass_conc': 0,
'bin_Dp_Dc': np.nan,
'bin_mass_conc_norm': bin_mass_conc_norm ,
'bin_numb_conc_norm': bin_numb_conc_norm ,
'bin_vol_abs_coeff_max': bin_vol_abs_coeff_max,
'bin_vol_sca_coeff_max': bin_vol_sca_coeff_max,
'bin_vol_abs_coeff_min': bin_vol_abs_coeff_min,
'bin_vol_sca_coeff_min': bin_vol_sca_coeff_min,
'bin_vol_abs_coeff_bare':bin_vol_abs_coeff_bare,
#'fraction_successful': fraction_successful,
}
return binned_data_dict
#calc optical parameters for each altitude
def calc_opti_params(binned_data_dict,Dg_mass,sigma_mass,plot_data,fraction_measured,BC_frac):
mass_concs_raw_sum = 0
mass_concs_sum = 0
vol_abs_coeff_sum_max = 0
vol_sca_coeff_sum_max = 0
vol_abs_coeff_sum_min = 0
vol_sca_coeff_sum_min = 0
vol_abs_coeff_sum_bare = 0
Dp_Dcs = []
for bin_mid in binned_data_dict: #integrate
Dp_Dcs.append(binned_data_dict[bin_mid]['bin_Dp_Dc'])
mass_concs_raw_sum = mass_concs_raw_sum + binned_data_dict[bin_mid]['bin_mass_conc']
mass_concs_sum = mass_concs_sum + binned_data_dict[bin_mid]['bin_mass_conc_norm']
vol_abs_coeff_sum_max = vol_abs_coeff_sum_max + binned_data_dict[bin_mid]['bin_vol_abs_coeff_max']
vol_sca_coeff_sum_max = vol_sca_coeff_sum_max + binned_data_dict[bin_mid]['bin_vol_sca_coeff_max']
vol_abs_coeff_sum_min = vol_abs_coeff_sum_min + binned_data_dict[bin_mid]['bin_vol_abs_coeff_min']
vol_sca_coeff_sum_min = vol_sca_coeff_sum_min + binned_data_dict[bin_mid]['bin_vol_sca_coeff_min']
vol_abs_coeff_sum_bare = vol_abs_coeff_sum_bare + binned_data_dict[bin_mid]['bin_vol_abs_coeff_bare']
Dp_Dc_mean = np.nanmean(Dp_Dcs)
MAC_max = vol_abs_coeff_sum_max*(10**11)/mass_concs_sum
MAC_min = vol_abs_coeff_sum_min*(10**11)/mass_concs_sum
SSA_max = vol_sca_coeff_sum_max/(vol_abs_coeff_sum_max+vol_sca_coeff_sum_max)
SSA_min = vol_sca_coeff_sum_min/(vol_abs_coeff_sum_min+vol_sca_coeff_sum_min)
AE_max = vol_abs_coeff_sum_max/vol_abs_coeff_sum_bare
AE_min = vol_abs_coeff_sum_min/vol_abs_coeff_sum_bare
mass_conc_total = mass_concs_raw_sum/fraction_measured
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
if mean_alt not in plot_data:
plot_data[mean_alt] = {
'mass_concs' :[],
'Dp_Dcs' :[],
'Dgs_mass' :[],
'numb_frac_w_BC':[],
'sigmas_mass' :[],
'MAC_maxs' :[],
'MAC_mins' :[],
'SSA_maxs' :[],
'SSA_mins' :[],
'AE_maxs' :[],
'AE_mins' :[],
}
plot_data[mean_alt]['Dgs_mass' ].append(Dg_mass)
plot_data[mean_alt]['Dp_Dcs' ].append(Dp_Dc_mean)
plot_data[mean_alt]['sigmas_mass'].append(sigma_mass)
plot_data[mean_alt]['mass_concs'].append(mass_conc_total)
plot_data[mean_alt]['numb_frac_w_BC'].append(BC_frac)
plot_data[mean_alt]['MAC_maxs' ].append(MAC_max)
plot_data[mean_alt]['MAC_mins' ].append(MAC_min)
plot_data[mean_alt]['SSA_maxs' ].append(SSA_max)
plot_data[mean_alt]['SSA_mins' ].append(SSA_min)
plot_data[mean_alt]['AE_maxs' ].append(AE_max)
plot_data[mean_alt]['AE_mins' ].append(AE_min)
return plot_data
##start script
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
print
while (lower_alt + alt_incr) <= max_alt:
binned_data = {}
print lower_alt, lower_alt + alt_incr
for bin in range(min_BC_VED,max_BC_VED,bin_incr):
#retrieve the data for this bin
cursor.execute(('''SELECT bc.rBC_mass_fg, bc.coat_thickness_nm_jancalib, bc.LF_scat_amp, hk.sample_flow, ftd.temperature_C, ftd.BP_Pa
FROM polar6_coating_2015 bc
JOIN polar6_fssp_cloud_data fssp on bc.fssp_id = fssp.id
JOIN polar6_flight_track_details ftd on bc.flight_track_data_id = ftd.id
JOIN polar6_hk_data_2015 hk on bc.hk_data_id = hk.id
WHERE bc.rBC_mass_fg IS NOT NULL and bc.UNIX_UTC_ts >= %s and bc.UNIX_UTC_ts < %s and bc.particle_type = %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s and (POW(bc.rBC_mass_fg,(1/3.0))*101.994391398)>=%s and (POW( bc.rBC_mass_fg,(1/3.0))*101.994391398) <%s and hk.sample_flow >%s and hk.sample_flow <%s ORDER BY bc.UNIX_UTC_ts'''),
(UNIX_start_time,UNIX_end_time,'incand',cloud_droplet_conc,lower_alt, (lower_alt + alt_incr),bin, bin+bin_incr,100,200))
coat_data = cursor.fetchall()
#assemble the data for this bin
bin_data = assemble_bin_data(coat_data)
#calc the overall properties for this bin and add them to the dictionary for this alt
binned_data = calc_bin_optical_properties(bin,bin_incr,bin_data,binned_data)
#for this altitude, fit the mass and number distributions
distr_fit_results = fit_distrs(binned_data,bin_incr)
fit_conc_values = distr_fit_results[0]
Dg_mass = distr_fit_results[1]
sigma_mass = distr_fit_results[2]
fraction_mass_meas = distr_fit_results[3]
integrated_SP2_number = distr_fit_results[4]
if show_distr_plots == True:
plot_distrs(fit_conc_values,binned_data)
#add values from outside dectection range to the binned data
binned_data = add_vals_outside_range(fit_conc_values,binned_data)
#get UHSAS values
cursor.execute(('''SELECT AVG(uh.number_per_sccm)
FROM polar6_uhsas_total_number uh
JOIN polar6_fssp_cloud_data fssp on uh.fssp_id = fssp.id
JOIN polar6_flight_track_details ftd on uh.flight_track_data_id = ftd.id
WHERE uh.UNIX_UTC_ts >= %s and uh.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s and ftd.alt >=%s and ftd.alt < %s'''),
(UNIX_start_time,UNIX_end_time,cloud_droplet_conc,lower_alt, (lower_alt + alt_incr)))
uhsas_data = cursor.fetchall()
uhsas_number_conc = uhsas_data[0][0]
if uhsas_number_conc == None:
uhsas_number_conc = np.nan
numb_frac_w_BC = integrated_SP2_number/uhsas_number_conc
#calculate optical parameters for this altitude and add them to the overall dict
plot_data = calc_opti_params(binned_data,Dg_mass,sigma_mass,plot_data,fraction_mass_meas,numb_frac_w_BC)
lower_alt += alt_incr
cnx.close()
print 'next step . . .'
## make plots
plot_data_list = []
for mean_alt in plot_data:
mean_Dg | |
a non existing folder %s from %s.' % (
foldername, id_number)
log.warning(log_msg)
return HttpResponseForbidden(
'There is no such folder in %s' % escape(
revision.package.full_name))
revision, removed_attachments, removed_emptydirs = response
return render_json(request,
'json/%s_rmdir.json' % root, {
'revision': revision, 'path': foldername,
'removed_attachments': simplejson.dumps(removed_attachments),
'removed_dirs': simplejson.dumps(removed_emptydirs),
'foldername': foldername})
else:
revision.folder_remove(folder)
return render_json(request,
"json/folder_removed.json",
{'revision': revision, 'folder': folder})
@require_POST
@login_required
def switch_sdk(request, id_number, revision_number):
" switch SDK used to create XPI - sdk_id from POST "
revision = get_package_revision(id_number, 'a', revision_number)
if request.user.pk != revision.author.pk:
return HttpResponseForbidden('You are not the author of this Add-on')
sdk_id = request.POST.get('id', None)
sdk = get_object_or_404(SDK, id=sdk_id)
old_sdk = revision.sdk
log.info('Addon %s (%s) switched from Add-on Kit version %s to %s' % (
revision.package.full_name, revision.package.id_number,
old_sdk.version, sdk.version))
revision.sdk = sdk
revision.add_commit_message('Switched to Add-on Kit %s' % sdk.version)
revision.save()
return render_json(request,
"json/sdk_switched.json",
{'revision': revision, 'sdk': sdk,
'sdk_lib': revision.get_sdk_revision()})
@require_POST
@login_required
def upload_attachment(request, id_number, type_id,
revision_number=None, version_name=None):
""" Upload new attachment to the PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number,
version_name)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to upload attachment to package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
revision.package.get_type_name()))
file = request.FILES.get('upload_attachment')
filename = request.META.get('HTTP_X_FILE_NAME')
if not file:
log_msg = 'Path not found: %s, package: %s.' % (
filename, id_number)
log.error(log_msg)
return HttpResponseServerError('Path not found.')
content = file.read()
try:
attachment = revision.attachment_create_by_filename(
request.user, filename, content)
except ValidationError, e:
return HttpResponseForbidden('Validation errors.')
except Exception, e:
return HttpResponseForbidden(str(e))
return render_json(request,
"json/attachment_added.json",
{'revision': revision, 'attachment': attachment})
@require_POST
@login_required
def upload_attachments(request, id_number, type_id,
revision_number=None, version_name=None):
""" Upload new attachments to the PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number,
version_name)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to upload attachment to package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
revision.package.get_type_name()))
content = request.raw_post_data
filename = request.META.get('HTTP_X_FILE_NAME')
if not filename:
log_msg = 'Path not found: %s, package: %s.' % (
filename, id_number)
log.error(log_msg)
return HttpResponseServerError('Path not found.')
try:
attachment = revision.attachment_create_by_filename(
request.user, filename, content)
except ValidationError, e:
return HttpResponseForbidden('Validation errors.')
except Exception, e:
return HttpResponseForbidden(str(e))
return render_json(request,
"json/attachment_added.json",
{'revision': revision, 'attachment': attachment})
@require_POST
@login_required
def add_empty_attachment(request, id_number, type_id,
revision_number=None, version_name=None):
""" Add new empty attachment to the PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number,
version_name)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to add attachment to package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
revision.package.get_type_name()))
filename = request.POST.get('filename', False)
if not filename:
log_msg = 'Path not found: %s, package: %s.' % (
filename, id_number)
log.error(log_msg)
return HttpResponseServerError('Path not found.')
try:
attachment = revision.attachment_create_by_filename(request.user,
filename, '')
except ValidationError, e:
return HttpResponseForbidden('Validation error.')
except Exception, e:
return HttpResponseForbidden(str(e))
return render_json(request,
"json/attachment_added.json",
{'revision': revision, 'attachment': attachment})
@require_POST
@login_required
def revision_add_attachment(request, pk):
"""Add attachment, download if necessary
"""
revision = get_object_or_404(PackageRevision, pk=pk)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to add attachment to package (%s) by "
"non-owner (%s)" % (revision.package, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this %s' % escape(
revision.package.get_type_name()))
url = request.POST.get('url', None)
filename = request.POST.get('filename', None)
log.debug(filename)
if not filename or filename == "":
log.error('Trying to create an attachment without name')
return HttpResponseForbidden('Path not found.')
content = ''
if url:
# validate url
field = URLField(verify_exists=True)
try:
url = field.clean(url)
except ValidationError, err:
log.debug('Invalid url provided (%s)\n%s' % (url,
'\n'.join(err.messages)))
return HttpResponseForbidden(("Loading attachment failed<br/>"
"%s") % '<br/>'.join(err.messages))
except Exception, err:
return HttpResponseForbidden(str(err))
att = urllib2.urlopen(url, timeout=settings.URLOPEN_TIMEOUT)
# validate filesize
att_info = att.info()
if 'content-length' in att_info.dict:
att_size = int(att_info.dict['content-length'])
if att_size > settings.ATTACHMENT_MAX_FILESIZE:
log.debug('File (%s) is too big (%db)' % (url, att_size))
return HttpResponseForbidden("Loading attachment failed<br/>"
"File is too big")
# download attachment's content
log.info('Downloading (%s)' % url)
content = att.read(settings.ATTACHMENT_MAX_FILESIZE + 1)
if len(content) >= settings.ATTACHMENT_MAX_FILESIZE + 1:
log.debug('Downloaded file (%s) is too big' % url)
return HttpResponseForbidden("Loading attachment failed<br/>"
"File is too big")
log.debug('Downloaded %d, max %d' % (len(content),
settings.ATTACHMENT_MAX_FILESIZE))
att.close()
try:
attachment = revision.attachment_create_by_filename(
request.user, filename, content)
except ValidationError, err:
return HttpResponseForbidden('Validation error.<br/>%s' % str(err))
except Exception, err:
return HttpResponseForbidden(str(err))
return render_json(request,
"json/attachment_added.json",
{'revision': revision, 'attachment': attachment})
@require_POST
@login_required
def rename_attachment(request, id_number, type_id, revision_number):
"""
Rename an attachment in a PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to rename attachment in package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
uid = request.POST.get('uid', '').strip()
try:
attachment = revision.attachments.get(pk=uid)
except:
log_msg = ('Attempt to rename a non existing attachment. attachment: '
'%s, package: %s.' % (uid, id_number))
log.warning(log_msg)
return HttpResponseForbidden(
'There is no such attachment in %s' % escape(
revision.package.full_name))
new_name = request.POST.get('new_filename')
new_ext = request.POST.get('new_ext') or attachment.ext
if not revision.validate_attachment_filename(new_name, new_ext):
return HttpResponseForbidden(
('Sorry, there is already an attachment in your add-on '
'with the name "%s.%s". Each attachment in your add-on '
'needs to have a unique name.') % (new_name, attachment.ext)
)
attachment.filename = new_name
attachment.ext = new_ext
attachment = revision.update(attachment)
return render_json(request,
"json/attachment_renamed.json",
{'revision': revision, 'attachment': attachment})
@require_POST
@login_required
def rmdir(request, pk, target, path):
"""
Remove attachment from PackageRevision
"""
revision = get_object_or_404(PackageRevision, pk=pk)
if target not in ['data', 'lib']:
return HttpResponseForbidden
if target == 'lib':
return HttpResponseForbidden('not supported yet')
revision.attachment_rmdir(path) if target == 'data' else \
revision.modules_rmdir(path)
return render_json(request,
'%s_rmdir.json' % target, {'revision': revision, 'path': path})
@require_POST
@login_required
def remove_attachment(request, id_number, type_id, revision_number):
"""
Remove attachment from PackageRevision
"""
revision = get_package_revision(id_number, type_id, revision_number)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to remove attachment from package (%s) "
"by non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
uid = request.POST.get('uid', '').strip()
attachment = get_object_with_related_or_404(Attachment,
pk=uid, revisions=revision)
if not attachment:
log_msg = ('Attempt to remove a non existing attachment. attachment: '
'%s, package: %s.' % (uid, id_number))
log.warning(log_msg)
return HttpResponseForbidden(
'There is no such attachment in %s' % escape(
revision.package.full_name))
revision.attachment_remove(attachment)
return render_json(request,
"json/attachment_removed.json",
{'revision': revision, 'attachment': attachment})
def download_attachment(request, uid):
"""
Display attachment from PackageRevision
"""
attachment = get_object_with_related_or_404(Attachment, id=uid)
if not attachment.can_view(request.user):
log_msg = ("[security] Attempt to download private attachment (%s) by "
"non-owner (%s)" % (uid, request.user))
log.warning(log_msg)
return HttpResponseForbidden(
'You are not the author of this attachment.')
response = serve(request, attachment.path,
settings.UPLOAD_DIR, show_indexes=False)
response['Content-Disposition'] = 'filename=%s.%s' % (
attachment.filename, attachment.ext)
return response
@require_POST
@login_required
def save(request, id_number, type_id, revision_number=None,
version_name=None):
"""
Save package and modules
@TODO: check how dynamic module loading affects save
"""
revision = get_package_revision(id_number, type_id, revision_number,
version_name)
if request.user.pk != revision.author.pk:
log_msg = ("[security] Attempt to save package (%s) by "
"non-owner (%s)" % (id_number, request.user))
log.warning(log_msg)
return HttpResponseForbidden('You are not the author of this Package')
save_revision = False
save_package = False
start_version_name = revision.version_name
start_revision_message = revision.message
response_data = {}
package_full_name = request.POST.get('full_name', False)
version_name = request.POST.get('version_name', False)
# validate package_full_name and version_name
if version_name and not validator.is_valid(
'alphanum_plus', version_name):
return HttpResponseNotAllowed(escape(
validator.get_validation_message('alphanum_plus')))
# here we're checking if the *current* full_name is different than the
# revision's full_name
if package_full_name and package_full_name != revision.package.full_name:
try:
revision.set_full_name(package_full_name)
except ValidationError:
return HttpResponseNotAllowed(escape(
validator.get_validation_message('alphanum_plus_space')))
except IntegrityError:
return HttpResponseForbidden(
'You already have a %s with that name' % escape(
revision.package.get_type_name())
)
else:
save_package = True
save_revision = True
response_data['full_name'] = package_full_name
package_description = request.POST.get('package_description', False)
if package_description:
save_package = True
revision.package.description = package_description
response_data['package_description'] = package_description
changes = []
for mod in revision.modules.all():
if request.POST.get(mod.filename, False):
code = request.POST[mod.filename]
if mod.code != code:
mod.code = code
changes.append(mod)
for att in revision.attachments.all():
uid = str(att.pk)
if request.POST.get(uid):
att.data = request.POST[uid]
if att.changed():
changes.append(att)
attachments_changed = {}
if save_revision or changes:
try:
revision.save()
except ValidationError, err:
return HttpResponseForbidden(escape(err.__str__()))
if changes:
attachments_changed = simplejson.dumps(
revision.updates(changes, save=False))
revision_message = request.POST.get('revision_message', False)
if revision_message and revision_message != start_revision_message:
revision.message = revision_message
# save revision message without changeing the revision
super(PackageRevision, revision).save()
response_data['revision_message'] = revision_message
if version_name and version_name != start_version_name \
and version_name != revision.package.version_name:
save_package = False
try:
revision.set_version(version_name)
except Exception, err:
| |
import numpy as np
from ...external.qt.QtGui import (QAction, QLabel, QCursor, QMainWindow,
QToolButton, QIcon, QMessageBox,
QMdiSubWindow)
from ...external.qt.QtCore import Qt, QRect
from .data_viewer import DataViewer
from ... import core
from ... import config
from ...clients.image_client import ImageClient
from ...clients.ds9norm import DS9Normalize
from ...external.modest_image import imshow
from ...clients.layer_artist import Pointer
from ...core.callback_property import add_callback
from .data_slice_widget import DataSlice
from ..mouse_mode import (RectangleMode, CircleMode, PolyMode,
ContrastMode, ContourMode, PathMode)
from ..glue_toolbar import GlueToolbar
from ..spectrum_tool import SpectrumTool
from .mpl_widget import MplWidget, defer_draw
from ..decorators import set_cursor
from ..qtutil import cmap2pixmap, load_ui, get_icon, nonpartial
from ..widget_properties import CurrentComboProperty, ButtonProperty
WARN_THRESH = 10000000 # warn when contouring large images
__all__ = ['ImageWidget']
class ImageWidget(DataViewer):
LABEL = "Image Viewer"
_property_set = DataViewer._property_set + \
'data attribute rgb_mode rgb_viz ratt gatt batt slice'.split()
attribute = CurrentComboProperty('ui.attributeComboBox',
'Current attribute')
data = CurrentComboProperty('ui.displayDataCombo',
'Current data')
rgb_mode = ButtonProperty('ui.rgb',
'RGB Mode?')
rgb_viz = Pointer('ui.rgb_options.rgb_visible')
def __init__(self, session, parent=None):
super(ImageWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.label_widget = QLabel("", self.central_widget)
self.setCentralWidget(self.central_widget)
self.ui = load_ui('imagewidget', None)
self.option_widget = self.ui
self.ui.slice = DataSlice()
self.ui.slice_layout.addWidget(self.ui.slice)
self.client = ImageClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container)
self._spectrum_tool = SpectrumTool(self)
self._tweak_geometry()
self.make_toolbar()
self._connect()
self._init_widgets()
self.set_data(0)
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
self._slice_widget = None
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
self.ui.rgb_options.hide()
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self, name='Image')
for mode in self._mouse_modes():
result.add_mode(mode)
cmap = _colormap_mode(self, self.client.set_cmap)
result.addWidget(cmap)
# connect viewport update buttons to client commands to
# allow resampling
cl = self.client
result.buttons['HOME'].triggered.connect(nonpartial(cl.check_update))
result.buttons['FORWARD'].triggered.connect(nonpartial(
cl.check_update))
result.buttons['BACK'].triggered.connect(nonpartial(cl.check_update))
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
def apply_mode(mode):
self.apply_roi(mode.roi())
def slice(mode):
self._extract_slice(mode.roi())
rect = RectangleMode(axes, roi_callback=apply_mode)
circ = CircleMode(axes, roi_callback=apply_mode)
poly = PolyMode(axes, roi_callback=apply_mode)
contrast = ContrastMode(axes, move_callback=self._set_norm)
contour = ContourMode(axes, release_callback=self._contour_roi)
spectrum = self._spectrum_tool.mouse_mode
path = PathMode(axes, roi_callback=slice)
def toggle_3d_modes(data):
is3d = data.ndim > 2
path.enabled = is3d
spectrum.enabled = is3d
add_callback(self.client, 'display_data', toggle_3d_modes)
self._contrast = contrast
return [rect, circ, poly, contour, contrast, spectrum, path]
def _extract_slice(self, roi):
"""
Extract a PV-like slice, given a path traced on the widget
"""
vx, vy = roi.to_polygon()
pv, x, y = _slice_from_path(vx, vy, self.data, self.attribute, self.slice)
if self._slice_widget is None:
self._slice_widget = PVSliceWidget(pv, x, y, self)
self._session.application.add_widget(self._slice_widget,
label='Custom Slice')
else:
self._slice_widget.set_image(pv, x, y)
result = self._slice_widget
result.axes.set_xlabel("Position Along Slice")
result.axes.set_ylabel(_slice_label(self.data, self.slice))
result.show()
def _init_widgets(self):
pass
@defer_draw
def add_data(self, data):
"""Private method to ingest new data into widget"""
self.client.add_layer(data)
self.add_data_to_combo(data)
self.set_data(self._data_index(data))
return True
@defer_draw
def add_subset(self, subset):
self.client.add_scatter_layer(subset)
assert subset in self.client.artists
def _data_index(self, data):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
if combo.itemData(i) is data:
return i
return None
def add_data_to_combo(self, data):
""" Add a data object to the combo box, if not already present
"""
if not self.client.can_image_data(data):
return
combo = self.ui.displayDataCombo
label = data.label
pos = combo.findText(label)
if pos == -1:
combo.addItem(label, userData=data)
assert combo.findText(label) >= 0
@property
def ratt(self):
"""ComponentID assigned to R channel in RGB Mode"""
return self.ui.rgb_options.attributes[0]
@ratt.setter
def ratt(self, value):
att = list(self.ui.rgb_options.attributes)
att[0] = value
self.ui.rgb_options.attributes = att
@property
def gatt(self):
"""ComponentID assigned to G channel in RGB Mode"""
return self.ui.rgb_options.attributes[1]
@gatt.setter
def gatt(self, value):
att = list(self.ui.rgb_options.attributes)
att[1] = value
self.ui.rgb_options.attributes = att
@property
def batt(self):
"""ComponentID assigned to B channel in RGB Mode"""
return self.ui.rgb_options.attributes[2]
@batt.setter
def batt(self, value):
att = list(self.ui.rgb_options.attributes)
att[2] = value
self.ui.rgb_options.attributes = att
@defer_draw
def set_data(self, index):
if index is None:
return
if self.ui.displayDataCombo.count() == 0:
return
data = self.ui.displayDataCombo.itemData(index)
self.ui.slice.set_data(data)
self.client.set_data(data)
self.client.slice = self.ui.slice.slice
self.ui.displayDataCombo.setCurrentIndex(index)
self.set_attribute_combo(data)
self._update_window_title()
@property
def slice(self):
return self.client.slice
@slice.setter
def slice(self, value):
self.ui.slice.slice = value
@defer_draw
def set_attribute(self, index):
combo = self.ui.attributeComboBox
component_id = combo.itemData(index)
self.client.set_attribute(component_id)
self.ui.attributeComboBox.setCurrentIndex(index)
self._update_window_title()
def set_attribute_combo(self, data):
""" Update attribute combo box to reflect components in data"""
combo = self.ui.attributeComboBox
combo.blockSignals(True)
combo.clear()
fields = data.visible_components
index = 0
for i, f in enumerate(fields):
combo.addItem(f.label, userData=f)
if f == self.client.display_attribute:
index = i
combo.blockSignals(False)
combo.setCurrentIndex(index)
self.set_attribute(index)
def _connect(self):
ui = self.ui
ui.displayDataCombo.currentIndexChanged.connect(self.set_data)
ui.attributeComboBox.currentIndexChanged.connect(self.set_attribute)
ui.monochrome.toggled.connect(self._update_rgb_console)
ui.rgb_options.colors_changed.connect(self._update_window_title)
ui.rgb_options.current_changed.connect(
lambda: self._toolbars[0].set_mode(self._contrast))
ui.slice.slice_changed.connect(self._update_slice)
update_ui_slice = lambda val: setattr(ui.slice, 'slice', val)
add_callback(self.client, 'slice', update_ui_slice)
def _update_slice(self):
self.client.slice = self.ui.slice.slice
@defer_draw
def _update_rgb_console(self, is_monochrome):
if is_monochrome:
self.ui.rgb_options.hide()
self.ui.mono_att_label.show()
self.ui.attributeComboBox.show()
self.client.rgb_mode(False)
else:
self.ui.mono_att_label.hide()
self.ui.attributeComboBox.hide()
self.ui.rgb_options.show()
rgb = self.client.rgb_mode(True)
if rgb is not None:
self.ui.rgb_options.artist = rgb
def register_to_hub(self, hub):
super(ImageWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
dc_filt = lambda x: x.sender is self.client._data
layer_present_filter = lambda x: x.data in self.client.artists
hub.subscribe(self,
core.message.DataCollectionAddMessage,
handler=lambda x: self.add_data_to_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataCollectionDeleteMessage,
handler=lambda x: self.remove_data_from_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataUpdateMessage,
handler=lambda x: self._sync_data_labels()
)
hub.subscribe(self,
core.message.ComponentsChangedMessage,
handler=lambda x: self.set_attribute_combo(x.data),
filter=layer_present_filter)
def unregister(self, hub):
for obj in [self, self.client]:
hub.unsubscribe_all(obj)
def remove_data_from_combo(self, data):
""" Remvoe a data object from the combo box, if present """
combo = self.ui.displayDataCombo
pos = combo.findText(data.label)
if pos >= 0:
combo.removeItem(pos)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
clip_lo, clip_hi = mode.get_clip_percentile()
stretch = mode.stretch
return self.client.set_norm(clip_lo=clip_lo, clip_hi=clip_hi,
stretch=stretch,
bias=mode.bias, contrast=mode.contrast)
@set_cursor(Qt.WaitCursor)
def _contour_roi(self, mode):
""" Callback for ContourMode. Set edit_subset as new ROI """
im = self.client.display_data
att = self.client.display_attribute
if im is None or att is None:
return
if im.size > WARN_THRESH and not self._confirm_large_image(im):
return
roi = mode.roi(im[att])
if roi:
self.apply_roi(roi)
def _update_window_title(self):
if self.client.display_data is None:
title = ''
else:
data = self.client.display_data.label
a = self.client.rgb_mode()
if a is None: # monochrome mode
title = "%s - %s" % (self.client.display_data.label,
self.client.display_attribute.label)
else:
r = a.r.label if a.r is not None else ''
g = a.g.label if a.g is not None else ''
b = a.b.label if a.b is not None else ''
title = "%s Red = %s Green = %s Blue = %s" % (data, r, g, b)
self.setWindowTitle(title)
def _update_data_combo(self):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
combo.setItemText(i, combo.itemData(i).label)
def _sync_data_labels(self):
self._update_window_title()
self._update_data_combo()
def __str__(self):
return "Image Widget"
def _confirm_large_image(self, data):
"""Ask user to confirm expensive contour operations
:rtype: bool. Whether the user wishes to continue
"""
warn_msg = ("WARNING: Image has %i pixels, and may render slowly."
" Continue?" % data.size)
title = "Contour large image?"
ok = QMessageBox.Ok
cancel = QMessageBox.Cancel
buttons = ok | cancel
result = QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def options_widget(self):
return self.option_widget
@defer_draw
def restore_layers(self, rec, context):
self.client.restore_layers(rec, context)
for artist in self.layers:
self.add_data_to_combo(artist.layer.data)
self.set_attribute_combo(self.client.display_data)
self._update_data_combo()
def paintEvent(self, event):
super(ImageWidget, self).paintEvent(event)
pos = self.central_widget.canvas.mapFromGlobal(QCursor.pos())
x, y = pos.x(), self.central_widget.canvas.height() - pos.y()
self._update_intensity_label(x, y)
def _intensity_label(self, x, y):
x, y = self.client.axes.transData.inverted().transform([x, y])
value = self.client.point_details(x, y)['value']
lbl = '' if value is None else "data: %s" % value
return lbl
def _update_intensity_label(self, x, y):
lbl = self._intensity_label(x, y)
self.label_widget.setText(lbl)
fm = self.label_widget.fontMetrics()
w, h = fm.width(lbl), fm.height()
g = QRect(20, self.central_widget.geometry().height() - h, w, h)
self.label_widget.setGeometry(g)
class ColormapAction(QAction):
def __init__(self, label, cmap, parent):
super(ColormapAction, self).__init__(label, parent)
self.cmap = cmap
pm = cmap2pixmap(cmap)
self.setIcon(QIcon(pm))
def _colormap_mode(parent, on_trigger):
# actions for each colormap
acts = []
for label, cmap in config.colormaps:
a = ColormapAction(label, cmap, parent)
a.triggered.connect(nonpartial(on_trigger, cmap))
acts.append(a)
# Toolbar button
tb = QToolButton()
tb.setWhatsThis("Set color scale")
tb.setToolTip("Set color scale")
icon = get_icon('glue_rainbow')
tb.setIcon(icon)
tb.setPopupMode(QToolButton.InstantPopup)
tb.addActions(acts)
return tb
class StandaloneImageWidget(QMainWindow):
"""
A simplified image viewer, without any brushing or linking,
but with the ability to adjust contrast and resample.
"""
def __init__(self, image, parent=None, **kwargs):
"""
:param image: Image to display (2D numpy array)
:param parent: Parent widget (optional)
:param kwargs: Extra keywords to pass to imshow
"""
super(StandaloneImageWidget, self).__init__(parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self._setup_axes()
self._im = None
self._norm = DS9Normalize()
self.make_toolbar()
self.set_image(image, **kwargs)
def _setup_axes(self):
self._axes = self.central_widget.canvas.fig.add_subplot(111)
self._axes.set_aspect('equal', adjustable='datalim')
def set_image(self, image, **kwargs):
"""
Update the image shown in the widget
"""
if self._im is not None:
self._im.remove()
self._im = None
kwargs.setdefault('origin', 'upper')
self._im = imshow(self._axes, image,
norm=self._norm, cmap='gray', **kwargs)
self._im_array = image
self._axes.set_xticks([])
self._axes.set_yticks([])
self._redraw()
@property
def axes(self):
"""
The Matplolib axes object for this figure
"""
return self._axes
def show(self):
super(StandaloneImageWidget, self).show()
self._redraw()
def _redraw(self):
self.central_widget.canvas.draw()
def _set_cmap(self, cmap):
self._im.set_cmap(cmap)
self._redraw()
def mdi_wrap(self):
"""
Embed this widget in a QMdiSubWindow
"""
sub = QMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
clip_lo, clip_hi = mode.get_clip_percentile()
stretch = mode.stretch
self._norm.clip_lo = clip_lo
self._norm.clip_hi = clip_hi
self._norm.stretch | |
# -*- coding: utf-8 -*-
import pprint
import sys
import re
import hashlib
import BaseThreadedModule
import Utils
import Decorators
@Decorators.ModuleDocstringParser
class ModifyFields(BaseThreadedModule.BaseThreadedModule):
"""
Simple module to insert/delete/change field values.
Configuration templates:
# Keep all fields listed in source_fields, discard all others.
- ModifyFields:
action: keep # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Discard all fields listed in source_fields.
- ModifyFields:
action: delete # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Concat all fields listed in source_fields.
- ModifyFields:
action: concat # <type: string; is: required>
source_fields: # <type: list; is: required>
target_field: # <type: string; is: required>
receivers:
- NextModule
# Insert a new field with "target_field" name and "value" as new value.
- ModifyFields:
action: insert # <type: string; is: required>
target_field: # <type: string; is: required>
value: # <type: string; is: required>
receivers:
- NextModule
# Replace field values matching string "old" in data dictionary with "new".
- ModifyFields:
action: string_replace # <type: string; is: required>
source_field: # <type: string; is: required>
old: # <type: string; is: required>
new: # <type: string; is: required>
max: # <default: -1; type: integer; is: optional>
receivers:
- NextModule
# Replace field values in data dictionary with self.getConfigurationValue['with'].
- ModifyFields:
action: replace # <type: string; is: required>
source_field: # <type: string; is: required>
regex: ['<[^>]*>', 're.MULTILINE | re.DOTALL'] # <type: list; is: required>
with: # <type: string; is: required>
receivers:
- NextModule
# Map a field value.
- ModifyFields:
action: map # <type: string; is: required>
source_field: # <type: string; is: required>
map: # <type: dictionary; is: required>
target_field: # <default: "$(source_field)_mapped"; type: string; is: optional>
keep_unmappable: # <default: False; type: boolean; is: optional>
receivers:
- NextModule
# Split source field to target fields based on key value pairs.
- ModifyFields:
action: key_value # <type: string; is: required>
line_separator: # <type: string; is: required>
kv_separator: # <type: string; is: required>
source_field: # <type: list; is: required>
target_field: # <default: None; type: None||string; is: optional>
prefix: # <default: None; type: None||string; is: optional>
receivers:
- NextModule
# Split source field to target fields based on key value pairs using regex.
- ModifyFields:
action: key_value_regex # <type: string; is: required>
regex: # <type: string; is: required>
source_field: # <type: list; is: required>
target_field: # <default: None; type: None||string; is: optional>
prefix: # <default: None; type: None||string; is: optional>
receivers:
- NextModule
# Split source field to array at separator.
- ModifyFields:
action: split # <type: string; is: required>
separator: # <type: string; is: required>
source_field: # <type: list; is: required>
target_field: # <default: None; type: None||string; is: optional>
receivers:
- NextModule
# Merge source fields to target field as list.
- ModifyFields:
action: merge # <type: string; is: required>
source_fields: # <type: list; is: required>
target_field: # <type: string; is: reuired>
receivers:
- NextModule
# Merge source field to target field as string.
- ModifyFields:
action: join # <type: string; is: required>
source_field: # <type: string; is: required>
target_field: # <type: string; is: required>
separator: # <default: ","; type: string; is: optional>
receivers:
- NextModule
# Cast field values to integer.
- ModifyFields:
action: cast_to_int # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Cast field values to float.
- ModifyFields:
action: cast_to_float # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Cast field values to string.
- ModifyFields:
action: cast_to_str # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Cast field values to boolean.
- ModifyFields:
action: cast_to_bool # <type: string; is: required>
source_fields: # <type: list; is: required>
receivers:
- NextModule
# Create a hash from a field value.
# If target_fields is provided, it should have the same length as source_fields.
# If target_fields is not provided, source_fields will be replaced with the hashed value.
# Hash algorithm can be any of the in hashlib supported algorithms.
- ModifyFields:
action: hash # <type: string; is: required>
algorithm: sha1 # <default: "md5"; type: string; is: optional;>
salt: # <default: None; type: None||string; is: optional;>
source_fields: # <type: list; is: required>
target_fields: # <default: []; type: list; is: optional>
receivers:
- NextModule
"""
module_type = "modifier"
"""Set module type"""
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.BaseThreadedModule.configure(self, configuration)
# Set defaults
self.typecast_switch = { 'int': self.cast_to_int,
'integer': self.cast_to_int,
'float': self.cast_to_float,
'str': self.cast_to_str,
'string': self.cast_to_str,
'bool': self.cast_to_bool,
'boolean': self.cast_to_bool,
}
self.action = configuration['action']
# Precompile regex for replacement if defined
if 'regex' in configuration:
regex_pattern = configuration['regex']
regex_options = 0
if isinstance(regex_pattern, list):
i = iter(regex_pattern)
# Pattern is the first entry
regex_pattern = i.next()
# Regex options the second
try:
regex_options = eval(i.next())
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("RegEx error for options %s. Exception: %s, Error: %s" % (regex_options, etype, evalue))
self.gp.shutDown()
try:
self.regex = re.compile(regex_pattern, regex_options)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("RegEx error for pattern %s. Exception: %s, Error: %s" % (regex_pattern, etype, evalue))
self.gp.shutDown()
self.source_field = self.getConfigurationValue('source_field') if "source_field" in self.configuration_data else []
self.source_fields = self.getConfigurationValue('source_fields') if "source_fields" in self.configuration_data else []
self.target_field = self.getConfigurationValue('target_field') if "target_field" in self.configuration_data else []
self.target_fields = self.getConfigurationValue('target_fields') if "target_fields" in self.configuration_data else []
# Call action specific configure method.
if "configure_%s_action" % self.action in dir(self):
getattr(self, "configure_%s_action" % self.action)()
# Get action specific method
try:
self.event_handler = getattr(self, "%s" % self.action)
except AttributeError:
etype, evalue, etb = sys.exc_info()
self.logger.error("ModifyFields action called that does not exist: %s. Exception: %s, Error: %s" % (self.action, etype, evalue))
self.gp.shutDown()
def configure_split_action(self):
self.separator = self.getConfigurationValue('separator')
def configure_key_value_action(self):
self.line_separator = self.getConfigurationValue('line_separator')
self.kv_separator = self.getConfigurationValue('kv_separator')
self.prefix = self.getConfigurationValue('prefix')
def configure_key_value_regex_action(self):
self.prefix = self.getConfigurationValue('prefix')
def configure_join_action(self):
self.separator = self.getConfigurationValue('separator')
def configure_anonymize_action(self):
self.configure_hash_action()
def configure_hash_action(self):
# Import murmur hashlib if configured.
self.salt = self.getConfigurationValue('salt') if self.getConfigurationValue('salt') else ""
self.algorithm = self.getConfigurationValue('algorithm')
if self.algorithm == "murmur":
try:
import mmh3
self.hash_func = mmh3.hash
except ImportError:
etype, evalue, etb = sys.exc_info()
self.logger.error("Exception: %s, Error: %s" % (etype, evalue))
self.gp.shutDown()
else:
try:
self.hashlib_func = getattr(hashlib, self.algorithm)
except ImportError:
etype, evalue, etb = sys.exc_info()
self.logger.error("Exception: %s, Error: %s" % (etype, evalue))
self.gp.shutDown()
return
self.hash_func = self.hashlibFunc
def hashlibFunc(self, string):
return self.hashlib_func(string).hexdigest()
def handleEvent(self, event):
try:
event = self.event_handler(event)
except AttributeError:
etype, evalue, etb = sys.exc_info()
self.logger.error("ModifyFields action called that does not exist: %s. Exception: %s, Error: %s" % (self.action, etype, evalue))
self.gp.shutDown()
yield event
def keep(self,event):
"""
Field names not listed in self.configuration_data['source_fields'] will be deleted from data dictionary.
@param event: dictionary
@return: event: dictionary
"""
fields_to_del = set(event).difference(self.source_fields)
for field in fields_to_del:
# Do not delete internal event information.
if field == 'gambolputty':
continue
event.pop(field, None)
return event
def delete(self, event):
"""
Field names listed in ['source_fields'] will be deleted from data dictionary.
@todo: pypy seems to handle simple tight loops better than
- first building a set from data dictionary and
- then get common keys from ['source_fields'] and data via intersection
e.g.:
fields_to_check = self.getConfigurationValue('source_fields').intersection(set(data))
Still, if the field set is a large one, the above approach could be faster.
This problem affects this and some more methods in this class.
Maybe the code can be altered to take this into account.
@param event: dictionary
@return: event: dictionary
"""
for field in self.source_fields:
event.pop(field, None)
return event
def insert(self, event):
"""
Insert a new field with a given value.
@param event: dictionary
@return: event: dictionary
"""
event[self.target_field] = self.getConfigurationValue('value', event)
return event
def concat(self, event):
"""
Field names listed in ['source_fields'] will be concatenated to a new string.
The result will be stored in ['target_field']
@param event: dictionary
@return: event: dictionary
"""
concat_str = ""
for field in self.source_fields:
try:
concat_str = "%s%s" % (concat_str,event[field])
except KeyError:
pass
event[self.target_field] = concat_str
return event
def replace(self, event):
"""
Field value in data dictionary will be replace with ['with']
@param event: | |
"required": "number_b",
},
"number_d": {
"name": "The fourth parameter",
"about": "About the fourth parameter",
"type": "number",
"required": "number_b | number_c",
},
"number_e": {
"name": "The fifth parameter",
"about": "About the fifth parameter",
"type": "number",
"required": "number_b & number_d"
},
"number_f": {
"name": "The sixth parameter",
"about": "About the sixth parameter",
"type": "number",
"required": "not number_b"
}
}
args = {
"number_a": 123,
"number_b": 456,
}
validation_warnings = validation.validate(args, spec)
self.assertEqual(sorted(validation_warnings), [
(['number_c'], 'Key is missing from the args dict'),
(['number_d'], 'Key is missing from the args dict'),
])
args = {
"number_a": 123,
"number_b": 456,
"number_c": 1,
"number_d": 3,
"number_e": 4,
}
self.assertEqual([], validation.validate(args, spec))
args = {
"number_a": 123,
}
validation_warnings = validation.validate(args, spec)
self.assertEqual(sorted(validation_warnings), [
(['number_f'], 'Key is missing from the args dict')
])
def test_conditional_requirement_missing_var(self):
"""Validation: check AssertionError if expression is missing a var."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
},
"number_b": {
"name": "The second parameter",
"about": "About the second parameter",
"type": "number",
"required": False,
},
"number_c": {
"name": "The third parameter",
"about": "About the third parameter",
"type": "number",
"required": "some_var_not_in_args",
}
}
args = {
"number_a": 123,
"number_b": 456,
}
with self.assertRaises(AssertionError) as cm:
validation_warnings = validation.validate(args, spec)
self.assertTrue('some_var_not_in_args' in str(cm.exception))
def test_conditional_requirement_not_required(self):
"""Validation: unrequired conditional requirement should always pass"""
from natcap.invest import validation
csv_a_path = os.path.join(self.workspace_dir, 'csv_a.csv')
csv_b_path = os.path.join(self.workspace_dir, 'csv_b.csv')
# initialize test CSV files
with open(csv_a_path, 'w') as csv:
csv.write('a,b,c')
with open(csv_b_path, 'w') as csv:
csv.write('1,2,3')
spec = {
"condition": {
"name": "A condition that determines requirements",
"about": "About the condition",
"type": "boolean",
"required": False,
},
"csv_a": {
"name": "Conditionally required CSV A",
"about": "About CSV A",
"type": "csv",
"required": "condition",
},
"csv_b": {
"name": "Conditonally required CSV B",
"about": "About CSV B",
"type": "csv",
"required": "not condition",
}
}
# because condition = True, it shouldn't matter that the
# csv_b parameter wouldn't pass validation
args = {
"condition": True,
"csv_a": csv_a_path,
"csv_b": 'x' + csv_b_path # introduce a typo
}
validation_warnings = validation.validate(args, spec)
self.assertEqual(validation_warnings, [])
def test_requirement_missing(self):
"""Validation: verify absolute requirement on missing key."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
}
}
args = {}
self.assertEqual(
[(['number_a'], 'Key is missing from the args dict')],
validation.validate(args, spec))
def test_requirement_no_value(self):
"""Validation: verify absolute requirement without value."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
}
}
args = {'number_a': ''}
self.assertEqual(
[(['number_a'], 'Input is required but has no value')],
validation.validate(args, spec))
args = {'number_a': None}
self.assertEqual(
[(['number_a'], 'Input is required but has no value')],
validation.validate(args, spec))
def test_invalid_value(self):
"""Validation: verify invalidity."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
}
}
args = {'number_a': 'not a number'}
self.assertEqual(
[(['number_a'], ("Value 'not a number' could not be interpreted "
"as a number"))],
validation.validate(args, spec))
def test_conditionally_required_no_value(self):
"""Validation: verify conditional requirement when no value."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
},
"string_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "freestyle_string",
"required": "number_a",
}
}
args = {'string_a': None, "number_a": 1}
self.assertEqual(
[(['string_a'], 'Key is required but has no value')],
validation.validate(args, spec))
def test_conditionally_required_invalid(self):
"""Validation: verify conditional validity behavior when invalid."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
},
"string_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "option_string",
"required": "number_a",
"validation_options": {
"options": ['AAA', 'BBB']
}
}
}
args = {'string_a': "ZZZ", "number_a": 1}
self.assertEqual(
[(['string_a'], "Value must be one of: ['AAA', 'BBB']")],
validation.validate(args, spec))
def test_validation_exception(self):
"""Validation: Verify error when an unexpected exception occurs."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "number",
"required": True,
},
}
args = {'number_a': 1}
try:
# Patch in a new function that raises an exception into the
# validation functions dictionary.
patched_function = Mock(side_effect=ValueError('foo'))
validation._VALIDATION_FUNCS['number'] = patched_function
validation_warnings = validation.validate(args, spec)
finally:
# No matter what happens with this test, always restore the state
# of the validation functions dict.
validation._VALIDATION_FUNCS['number'] = (
validation.check_number)
self.assertEqual(
validation_warnings,
[(['number_a'], 'An unexpected error occurred in validation')])
def test_validation_other(self):
"""Validation: verify no error when 'other' type."""
from natcap.invest import validation
spec = {
"number_a": {
"name": "The first parameter",
"about": "About the first parameter",
"type": "other",
"required": True,
},
}
args = {'number_a': 1}
self.assertEqual([], validation.validate(args, spec))
def test_conditional_validity_recursive(self):
"""Validation: check that we can require from nested conditions."""
from natcap.invest import validation
spec = {}
previous_key = None
args = {}
for letter in string.ascii_uppercase[:10]:
key = 'arg_%s' % letter
spec[key] = {
'name': 'name ' + key,
'about': 'about ' + key,
'type': 'freestyle_string',
'required': previous_key
}
previous_key = key
args[key] = key
del args[previous_key] # delete the last addition to the dict.
self.assertEqual(
[(['arg_J'], 'Key is missing from the args dict')],
validation.validate(args, spec))
def test_spatial_overlap_error(self):
"""Validation: check that we return an error on spatial mismatch."""
from natcap.invest import validation
spec = {
'raster_a': {
'type': 'raster',
'name': 'raster 1',
'about': 'raster 1',
'required': True,
},
'raster_b': {
'type': 'raster',
'name': 'raster 2',
'about': 'raster 2',
'required': True,
},
'vector_a': {
'type': 'vector',
'name': 'vector 1',
'about': 'vector 1',
'required': True,
}
}
driver = gdal.GetDriverByName('GTiff')
filepath_1 = os.path.join(self.workspace_dir, 'raster_1.tif')
filepath_2 = os.path.join(self.workspace_dir, 'raster_2.tif')
reference_filepath = os.path.join(self.workspace_dir, 'reference.gpkg')
# Filepaths 1 and 2 are obviously outside of UTM zone 31N.
for filepath, geotransform, epsg_code in (
(filepath_1, [1, 1, 0, 1, 0, 1], 4326),
(filepath_2, [100, 1, 0, 100, 0, 1], 4326)):
raster = driver.Create(filepath, 3, 3, 1, gdal.GDT_Int32)
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(epsg_code)
raster.SetProjection(wgs84_srs.ExportToWkt())
raster.SetGeoTransform(geotransform)
raster = None
gpkg_driver = gdal.GetDriverByName('GPKG')
vector = gpkg_driver.Create(reference_filepath, 0, 0, 0,
gdal.GDT_Unknown)
vector_srs = osr.SpatialReference()
vector_srs.ImportFromEPSG(32731) # UTM 31N
layer = vector.CreateLayer('layer', vector_srs, ogr.wkbPoint)
new_feature = ogr.Feature(layer.GetLayerDefn())
new_feature.SetGeometry(ogr.CreateGeometryFromWkt('POINT 1 1'))
new_feature = None
layer = None
vector = None
args = {
'raster_a': filepath_1,
'raster_b': filepath_2,
'vector_a': reference_filepath,
}
validation_warnings = validation.validate(
args, spec, {'spatial_keys': list(args.keys()),
'different_projections_ok': True})
self.assertEqual(len(validation_warnings), 1)
self.assertEqual(set(args.keys()), set(validation_warnings[0][0]))
self.assertTrue('Bounding boxes do not intersect' in
validation_warnings[0][1])
def test_spatial_overlap_error_undefined_projection(self):
"""Validation: check spatial overlap message when no projection"""
from natcap.invest import validation
spec = {
'raster_a': {
'type': 'raster',
'name': 'raster 1',
'about': 'raster 1',
'required': True,
},
'raster_b': {
'type': 'raster',
'name': 'raster 2',
'about': 'raster 2',
'required': True,
}
}
driver = gdal.GetDriverByName('GTiff')
filepath_1 = os.path.join(self.workspace_dir, 'raster_1.tif')
filepath_2 = os.path.join(self.workspace_dir, 'raster_2.tif')
raster_1 = driver.Create(filepath_1, 3, 3, 1, gdal.GDT_Int32)
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(4326)
raster_1.SetProjection(wgs84_srs.ExportToWkt())
raster_1.SetGeoTransform([1, 1, 0, 1, 0, 1])
raster_1 = None
# don't define a projection for the second raster
driver.Create(filepath_2, 3, 3, 1, gdal.GDT_Int32)
args = {
'raster_a': filepath_1,
'raster_b': filepath_2
}
validation_warnings = validation.validate(
args, spec, {'spatial_keys': list(args.keys()),
'different_projections_ok': True})
expected = [(['raster_b'], 'Dataset must have a valid projection.')]
self.assertEqual(validation_warnings, expected)
def test_spatial_overlap_error_optional_args(self):
"""Validation: check for spatial mismatch with insufficient args."""
from natcap.invest import validation
spec = {
'raster_a': {
'type': 'raster',
'name': 'raster 1',
'about': 'raster 1',
'required': True,
},
'raster_b': {
'type': 'raster',
'name': 'raster 2',
'about': 'raster 2',
'required': False,
},
'vector_a': {
'type': 'vector',
'name': 'vector 1',
'about': 'vector 1',
'required': False,
}
}
driver = gdal.GetDriverByName('GTiff')
filepath_1 = os.path.join(self.workspace_dir, 'raster_1.tif')
filepath_2 = os.path.join(self.workspace_dir, 'raster_2.tif')
# Filepaths 1 and 2 do not overlap
for filepath, geotransform, epsg_code in (
(filepath_1, [1, 1, 0, 1, 0, 1], 4326),
(filepath_2, [100, 1, 0, 100, 0, 1], 4326)):
raster = driver.Create(filepath, 3, 3, 1, gdal.GDT_Int32)
wgs84_srs = osr.SpatialReference()
wgs84_srs.ImportFromEPSG(epsg_code)
raster.SetProjection(wgs84_srs.ExportToWkt())
raster.SetGeoTransform(geotransform)
raster = None
args = | |
in the nodes
graph = {}
for i in nodes_list:
bound = []
for j in range(len(distance_matrix)):
if (not j in exclusion_list) and (not i == j):
if distance_matrix[i][j] < cutoff:
bound.append(j)
graph[i] = bound
return graph # { i : [ a,b,c...], ... }
# vector stuff # TODO use numpy?
def makeCircleOnPlane(center, r, normal, points = 8):
"""
Calculate the points of a circle lying on an arbitrary plane
defined by the normal vector.
center : coords of center of the circle
r : radius
normal : normal of the plane where the circle lies
points : number of points for the circle
# http://www.physicsforums.com/showthread.php?t=123168
# P = Rcos(theta))U + Rsin(theta)N x U +c
# Where u is a unit vector from the centre of the circle
# to any point on the circumference; R is the radius;
# n is a unit vector perpendicular to the plane and c is the centre of the circle.
http://forums.create.msdn.com/forums/p/9551/50048.aspx
A coworker pointed out a trick to get a vector perpendicular to the normal vector:
simply swap two of the values, negate one of those, and zero the third.
So, if I have a normal vector of form Vector3(a, b, c), then one such vector that
is perpendicular to it is Vector3(b, -a, 0). Thus, there are six possible vectors
that are attainable by this method. The only trouble case is when the normal vector
contains elements whose values are zero, in which case you have to be a bit careful
which values you swap and negate. You just never want to end up with the zero vector.
"""
N = normal
U = array([N[1], -N[0], 0], 'f')
step = PI2/points
circle = []
for i in range(points):
theta = PI2-(step*i)
P = (r*cos(theta)*U)+(r*sin(theta))*(cross(N,U))+center
P = normalize(vector(center,P))*r
P = vecSum(P,center)
circle.append(P)
return circle
def rotatePoint(pt,m,ax):
"""
Rotate a point applied in m around a pivot ax ?
pt = point that is rotated
ax = vector around wich rotation is performed
?????? CHANGING THE INPUT VALUE?
"""
# From Ludo
# point
x=pt[0]
y=pt[1]
z=pt[2]
# rotation pivot
u=ax[0]
v=ax[1]
w=ax[2]
ux=u*x
uy=u*y
uz=u*z
vx=v*x
vy=v*y
vz=v*z
wx=w*x
wy=w*y
wz=w*z
sa=sin(ax[3])
ca=cos(ax[3])
#pt[0]=(u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa)+ m[0]
#pt[1]=(v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa)+ m[1]
#pt[2]=(w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa)+ m[2]
p0 =(u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa)+ m[0]
p1=(v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa)+ m[1]
p2=(w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa)+ m[2]
#b = [pt, m, ax]
return array([ p0, p1, p2])
def atomsToVector(at1, at2=None, norm=0):
at1 = atomCoord(at1)
if at2: at2 = atomCoord(at2)
return vector(at1, at2, norm=norm)
def vector(p1 , p2 = None, norm = 0): # TODO use Numpy?
if not p2 == None:
vec = array([p2[0]-p1[0],p2[1]-p1[1],p2[2]-p1[2]],'f')
else:
vec = array([p1[0], p1[1], p1[2] ], 'f' )
if norm:
return normalize(vec)
else:
return vec
def norm(A): # TODO use Numpy
"Return vector norm"
return sqrt(sum(A*A))
def normalize(A): # TODO use Numpy
"Normalize the Vector"
return A/norm(A)
def calcPlane(p1, p2, p3):
# returns the plane containing the 3 input points
v12 = vector(p1,p2)
v13 = vector(p3,p2)
return normalize(cross(v12, v13))
def dot(vector1, vector2): # TODO remove and use Numpy
dot_product = 0.
for i in range(0, len(vector1)):
dot_product += (vector1[i] * vector2[i])
return dot_product
def vecAngle(v1, v2, rad=1): # TODO remove and use Numpy?
angle = dot(normalize(v1), normalize(v2))
#print angle, math.degrees(angle)
try:
if rad:
return acos(angle)
else:
return degrees(acos(angle))
except:
print "#vecAngle> CHECK TrottNormalization"
return 0
def vecSum(vec1, vec2): # TODO remove and use Numpy # TODO to be used in the PDBQT+ data!
return array([vec1[0]+vec2[0], vec1[1]+vec2[1], vec1[2]+vec2[2] ], 'f')
def intersect(a,b):
return list(set(a) & set(b))
def normValue(v, vmin, vmax, normrange=[0,10]):
# http://mathforum.org/library/drmath/view/60433.html
# min = A
# max = B
# v = x
# y = 1 + (x-A)*(10-1)/(B-A)
#return 1 + (v-vmin)*(10-1)/(vmax-vmin)
return normrange[0] + (v-vmin)*( normrange[1] )/(vmax-vmin)
#top = (v-vmin)(10-1)
#down = (vmax-vmin)
#x = 1 + top/down
#return x
def normProduct(a, b, mode = 'simple'):
if mode =='simple': return a*b
elif mode =='scaled': return (a*b)*(a+b)
def avgVector(vec_list, normalize=False):
# XXX NOT WORKING!!!
# http://devmaster.net/forums/topic/5443-average-direction-vector/
#weight = 1;
#average = vec[0];
#for (i = 1; i < n; ++i)
#{
# find angle between average and vec[i];
# angle *= weight / (weight + 1);
# average = rotate vec[i] towards average by angle;
# ++weight;
#}
print "avgVector> NOT WORKING!!!! NEVER TESTED"
weight = 1
average = vec_list[0]
for i in range(len(vec_list)-1):
angle = vecAngle(average, vec_list[i+1])
angle *= weight / (weight+1)
#average = rotatePoint(pt,m,ax)
average = rotatePoint(vec_list[i+1],m,ax)
# XXX m?
# XXX ax?
weight += 1
return average
def coplanar(plane, coord_list = [], reference = [0., 0., 0.], tolerance = 0.2):
""" return list of coordinates that are within <tolerance>
from the plane. If the reference is provided, vectors will be
calculated with <reference> as origin.
"""
coplane_list = []
for c in coord_list:
pos = vector(reference, c)
if dot(plane, pos) <= tolerance:
coplane_list.append(c)
return coplane_list
#####################################
######## END VECTOR STUFF ###########
#####################################
def getAtomsFromString(string,mol):
""" fast selection method for PMV
string should be something like A:THR276:O
mol is a PMV molecule instance
"""
try:
string = string.split(":")
chain = string[0]
res = string[1]
atom = string[2]
chain = mol.chains.get(chain)
res = ch.residues.get(res)
atoms = res.atoms.get(atoms)
except:
print "getAtomsFromString> ERROR: something went wrong with ['%s']" % string
return False
return atoms[0]
def timerFunction(func,*arg):
t1=time.time()
res = apply(func,(arg))
print time.time()-t1
return res
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
http://www.artima.com/weblogs/viewpost.jsp?thread=230001
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
#####################
# VALIDATORS
def validateEmail(string, localhost=False, exclude='', allowempty=0):
"""validate a string to be a valid email
it is possible to specify if 'localhost' is accepted
or a value that is not acceptable (i.e.
an example like '<EMAIL>')
"""
string = str(string)
#print string, string.split('.')
if string.strip() == "":
if allowempty:
return True
return False
if "@" in string:
try:
name,domain = string.split("@")
if (not localhost) and (not "." in string):
return False
else:
splitted = string.split('.')
for s in splitted:
if not s.strip():
return False
return True
except:
return False
return False
def validatePosInt(value):
if value == '':
return Pmw.PARTIAL
for a in value:
if not a.isdigit():
return Pmw.ERROR
try:
if int(value) < 0:
return Pmw.ERROR
except:
return Pmw.ERROR
return Pmw.OK
def validateAscii(value, allowempty=0):
valid_chars = "-_.+(),%s%s" % (string.ascii_letters, string.digits)
value == value.strip()
if value == '':
if allowempty:
return Pmw.OK
else:
return Pmw.PARTIAL
for a in value:
if not a in valid_chars:
return Pmw.ERROR
return Pmw.OK
def validateWebLink(string, localhost=False, forcehttp=True):
""" validate a web link """
string=str(string).strip()
if forcehttp:
if not string.startswith('http://') and not string.startswith('https://'):
print "MISSING HTTP!!!"
return False
return True
# / validators
##################
###################### GUI
#
#
#from Tkinter import *
class PercentPie(Canvas):
""" Draws a percent pie configurable and modifiable.
Example:
root = Tk()
pie_1 = PercentPie(root, radius = 60)
pie_1.pack()
pie_1.set_percent(35)
"""
#def __init__(self, master, radius = 75, h = 100, w = 100, center = [0,0], pad = 3,
def __init__(self, master, radius = 75, pad = 3, border = 'black', fill = 'red', shadow=True):
# TODO add the coloring options (as RGB)
Canvas.__init__(self,master)
self.frame = Frame(self)
self.DEBUG = False
center = [0,0]
if shadow:
extra=4
else:
extra=2
w=pad+extra+radius
h=pad+extra+radius
#print 'R:%d, %dx%d' % (radius, w,h)
self.canvas = Canvas(self.frame, width = w, height = h)
# shadow
if shadow:
self.shadow = self.canvas.create_oval(center[0]+pad-1,
center[1]+pad-1,
center[0]+pad+3+radius,
center[1]+pad+3+radius,
fill = 'gray65', # COLOR HERE
outline = 'gray65')
# CIRCLE base and dark boundary
self.bg = self.canvas.create_oval(center[0]+pad-1,
center[1]+pad-1,
center[0]+pad+1+radius,
center[1]+pad+1+radius,
fill = 'red', # COLOR HERE
outline = 'black')
# CIRCLE halo color transition
self.bg = self.canvas.create_oval(center[0]+pad,
center[1]+pad,
center[0]+pad+radius,
center[1]+pad+radius,
outline = 'DarkSalmon') # COLOR HERE
# CIRCLE pie filler
self.arc = self.canvas.create_arc(center[0]+pad,
center[1]+pad,
center[0]+pad+radius,
center[1]+pad+radius,
start = 0, extent = | |
<gh_stars>10-100
# Copyright (c) 2020 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
"""
Like gen_kconfig_rest.py, but for generating an index of existing
devicetree bindings.
"""
import argparse
from collections import defaultdict
import glob
import io
import logging
import os
from pathlib import Path
import pprint
import re
import sys
import textwrap
from devicetree import edtlib
import gen_helpers
ZEPHYR_BASE = Path(__file__).parents[2]
GENERIC_OR_VENDOR_INDEPENDENT = 'Generic or vendor-independent'
UNKNOWN_VENDOR = 'Unknown vendor'
ZEPHYR_BASE = Path(__file__).parents[2]
# Base properties that have documentation in 'dt-important-props'.
DETAILS_IN_IMPORTANT_PROPS = set('compatible label reg status interrupts'.split())
logger = logging.getLogger('gen_devicetree_rest')
class VndLookup:
"""
A convenience class for looking up information based on a
devicetree compatible's vendor prefix 'vnd'.
"""
def __init__(self, vendor_prefixes, bindings):
self.vnd2vendor = self.load_vnd2vendor(vendor_prefixes)
self.vnd2bindings = self.init_vnd2bindings(bindings)
self.vnd2ref_target = self.init_vnd2ref_target()
def vendor(self, vnd):
return self.vnd2vendor.get(vnd, UNKNOWN_VENDOR)
def bindings(self, vnd, default=None):
return self.vnd2bindings.get(vnd, default)
def target(self, vnd):
return self.vnd2ref_target.get(
vnd, self.vnd2ref_target[(UNKNOWN_VENDOR,)])
@staticmethod
def load_vnd2vendor(vendor_prefixes):
# Load the vendor-prefixes.txt file. Return a dict mapping 'vnd'
# vendor prefixes as they are found in compatible properties to
# each vendor's full name.
#
# For example, this line:
#
# vnd A stand-in for a real vendor
#
# Gets split into a key 'vnd' and a value 'A stand-in for a real
# vendor' in the return value.
#
# The 'None' key maps to GENERIC_OR_VENDOR_INDEPENDENT.
vnd2vendor = {
None: GENERIC_OR_VENDOR_INDEPENDENT,
}
vnd2vendor.update(edtlib.load_vendor_prefixes_txt(vendor_prefixes))
logger.info('found %d vendor prefixes in %s', len(vnd2vendor) - 1,
vendor_prefixes)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('vnd2vendor=%s', pprint.pformat(vnd2vendor))
return vnd2vendor
def init_vnd2bindings(self, bindings):
# Take a 'vnd2vendor' map and a list of bindings and return a dict
# mapping 'vnd' vendor prefixes prefixes to lists of bindings. The
# bindings in each list are sorted by compatible. The keys in the
# return value are sorted by vendor name.
#
# Special cases:
#
# - The 'None' key maps to bindings with no vendor prefix
# in their compatibles, like 'gpio-keys'. This is the first key.
# - The (UNKNOWN_VENDOR,) key maps to bindings whose compatible
# has a vendor prefix that exists, but is not known,
# like 'somethingrandom,device'. This is the last key.
# Get an unsorted dict mapping vendor prefixes to lists of bindings.
unsorted = defaultdict(list)
generic_bindings = []
unknown_vendor_bindings = []
for binding in bindings:
vnd = compatible_vnd(binding.compatible)
if vnd is None:
generic_bindings.append(binding)
elif vnd in self.vnd2vendor:
unsorted[vnd].append(binding)
else:
unknown_vendor_bindings.append(binding)
# Key functions for sorting.
def vnd_key(vnd):
return self.vnd2vendor[vnd].casefold()
def binding_key(binding):
return binding.compatible
# Sort the bindings for each vendor by compatible.
# Plain dicts are sorted in CPython 3.6+, which is what we
# support, so the return dict's keys are in the same
# order as vnd2vendor.
#
# The unknown-vendor bindings being inserted as a 1-tuple key is a
# hack for convenience that ensures they won't collide with a
# known vendor. The code that consumes the dict below handles
# this.
vnd2bindings = {
None: sorted(generic_bindings, key=binding_key)
}
for vnd in sorted(unsorted, key=vnd_key):
vnd2bindings[vnd] = sorted(unsorted[vnd], key=binding_key)
vnd2bindings[(UNKNOWN_VENDOR,)] = sorted(unknown_vendor_bindings,
key=binding_key)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('vnd2bindings: %s', pprint.pformat(vnd2bindings))
return vnd2bindings
def init_vnd2ref_target(self):
# The return value, vnd2ref_target, is a dict mapping vendor
# prefixes to ref targets for their relevant sections in this
# file, with these special cases:
#
# - The None key maps to the ref target for bindings with no
# vendor prefix in their compatibles, like 'gpio-keys'
# - The (UNKNOWN_VENDOR,) key maps to the ref target for bindings
# whose compatible has a vendor prefix that is not recognized.
vnd2ref_target = {}
for vnd in self.vnd2bindings:
if vnd is None:
vnd2ref_target[vnd] = 'dt_no_vendor'
elif isinstance(vnd, str):
vnd2ref_target[vnd] = f'dt_vendor_{vnd}'
else:
assert vnd == (UNKNOWN_VENDOR,), vnd
vnd2ref_target[vnd] = 'dt_unknown_vendor'
return vnd2ref_target
def main():
args = parse_args()
setup_logging(args.verbose)
bindings = load_bindings(args.dts_roots)
base_binding = load_base_binding()
vnd_lookup = VndLookup(args.vendor_prefixes, bindings)
dump_content(bindings, base_binding, vnd_lookup, args.out_dir,
args.turbo_mode)
def parse_args():
# Parse command line arguments from sys.argv.
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', default=0, action='count',
help='increase verbosity; may be given multiple times')
parser.add_argument('--vendor-prefixes', required=True,
help='vendor-prefixes.txt file path')
parser.add_argument('--dts-root', dest='dts_roots', action='append',
help='''additional DTS root directory as it would
be set in DTS_ROOTS''')
parser.add_argument('--turbo-mode', action='store_true',
help='Enable turbo mode (dummy references)')
parser.add_argument('out_dir', help='output files are generated here')
return parser.parse_args()
def setup_logging(verbose):
if verbose >= 2:
log_level = logging.DEBUG
elif verbose == 1:
log_level = logging.INFO
else:
log_level = logging.ERROR
logging.basicConfig(format='%(filename)s:%(levelname)s: %(message)s',
level=log_level)
def load_bindings(dts_roots):
# Get a list of edtlib.Binding objects from searching 'dts_roots'.
if not dts_roots:
sys.exit('no DTS roots; use --dts-root to specify at least one')
binding_files = []
for dts_root in dts_roots:
binding_files.extend(glob.glob(f'{dts_root}/dts/bindings/**/*.yaml',
recursive=True))
bindings = edtlib.bindings_from_paths(binding_files, ignore_errors=True)
num_total = len(bindings)
# Remove bindings from the 'vnd' vendor, which is not a real vendor,
# but rather a stand-in we use for examples and tests when a real
# vendor would be inappropriate.
bindings = [binding for binding in bindings if
compatible_vnd(binding.compatible) != 'vnd']
logger.info('found %d bindings (ignored %d) in this dts_roots list: %s',
len(bindings), num_total - len(bindings), dts_roots)
return bindings
def load_base_binding():
# Make a Binding object for base.yaml.
#
# This helps separate presentation for properties common to all
# nodes from node-specific properties.
base_yaml = ZEPHYR_BASE / 'dts' / 'bindings' / 'base' / 'base.yaml'
base_includes = {"pm.yaml": os.fspath(ZEPHYR_BASE / 'dts' / 'bindings' / 'base'/ 'pm.yaml')}
if not base_yaml.is_file():
sys.exit(f'Expected to find base.yaml at {base_yaml}')
return edtlib.Binding(os.fspath(base_yaml), base_includes, require_compatible=False,
require_description=False)
def dump_content(bindings, base_binding, vnd_lookup, out_dir, turbo_mode):
# Dump the generated .rst files for a vnd2bindings dict.
# Files are only written if they are changed. Existing .rst
# files which would not be written by the 'vnd2bindings'
# dict are removed.
out_dir = Path(out_dir)
setup_bindings_dir(bindings, out_dir)
if turbo_mode:
write_dummy_index(bindings, out_dir)
else:
write_bindings_rst(vnd_lookup, out_dir)
write_orphans(bindings, base_binding, vnd_lookup, out_dir)
def setup_bindings_dir(bindings, out_dir):
# Make a set of all the Path objects we will be creating for
# out_dir / bindings / {binding_path}.rst. Delete all the ones that
# shouldn't be there. Make sure the bindings output directory
# exists.
paths = set()
bindings_dir = out_dir / 'bindings'
logger.info('making output subdirectory %s', bindings_dir)
bindings_dir.mkdir(parents=True, exist_ok=True)
for binding in bindings:
paths.add(bindings_dir / binding_filename(binding))
for dirpath, _, filenames in os.walk(bindings_dir):
for filename in filenames:
path = Path(dirpath) / filename
if path not in paths:
logger.info('removing unexpected file %s', path)
path.unlink()
def write_dummy_index(bindings, out_dir):
# Write out_dir / bindings.rst, with dummy anchors
# header
content = '\n'.join((
'.. _devicetree_binding_index:',
'.. _dt_vendor_zephyr:',
'',
'Dummy bindings index',
'####################',
'',
))
# build compatibles set and dump it
compatibles = {binding.compatible for binding in bindings}
content += '\n'.join((
f'.. dtcompatible:: {compatible}' for compatible in compatibles
))
write_if_updated(out_dir / 'bindings.rst', content)
def write_bindings_rst(vnd_lookup, out_dir):
# Write out_dir / bindings.rst, the top level index of bindings.
string_io = io.StringIO()
print_block(f'''\
.. _devicetree_binding_index:
Bindings index
##############
This page documents the available devicetree bindings.
See {zref('dt-bindings')} for an introduction to the Zephyr bindings
file format.
Vendor index
************
This section contains an index of hardware vendors.
Click on a vendor's name to go to the list of bindings for
that vendor.
.. rst-class:: rst-columns
''', string_io)
for vnd in vnd_lookup.vnd2bindings:
print(f'- :ref:`{vnd_lookup.target(vnd)}`', file=string_io)
print_block('''\
Bindings by vendor
******************
This section contains available bindings, grouped by vendor.
Within each group, bindings are listed by the "compatible" property
they apply to, like this:
**Vendor name (vendor prefix)**
.. rst-class:: rst-columns
- <compatible-A>
- <compatible-B> (on <bus-name> bus)
- <compatible-C>
- ...
The text "(on <bus-name> bus)" appears when bindings may behave
differently depending on the bus the node appears on.
For example, this applies to some sensor device nodes, which may
appear as children of either I2C or SPI bus nodes.
''', string_io)
for vnd, bindings in vnd_lookup.vnd2bindings.items():
if isinstance(vnd, tuple):
title = vnd[0]
else:
title = vnd_lookup.vendor(vnd).strip()
if isinstance(vnd, str):
title += f' ({vnd})'
underline = '=' * len(title)
print_block(f'''\
.. _{vnd_lookup.target(vnd)}:
{title}
{underline}
.. rst-class:: rst-columns
''', string_io)
for binding in bindings:
print(f'- :ref:`{binding_ref_target(binding)}`', file=string_io)
print(file=string_io)
write_if_updated(out_dir / 'bindings.rst', string_io.getvalue())
def write_orphans(bindings, base_binding, vnd_lookup, out_dir):
# Write out_dir / bindings / foo / binding_page.rst for each binding
# in 'bindings', along with any "disambiguation" pages needed when a
# single compatible string can be handled | |
**kwargs):
return self._call_func("empty_recycle_bin", *args, **kwargs)
def generate_tsig_key(self, *args, **kwargs):
return self._call_func("generate_tsig_key", *args, **kwargs)
def get_all_template_vendor_id(self, *args, **kwargs):
return self._call_func("get_all_template_vendor_id", *args, **kwargs)
def get_grid_revert_status(self, *args, **kwargs):
return self._call_func("get_grid_revert_status", *args, **kwargs)
def get_rpz_threat_details(self, *args, **kwargs):
return self._call_func("get_rpz_threat_details", *args, **kwargs)
def get_template_schema_versions(self, *args, **kwargs):
return self._call_func("get_template_schema_versions", *args, **kwargs)
def join(self, *args, **kwargs):
return self._call_func("join", *args, **kwargs)
def join_mgm(self, *args, **kwargs):
return self._call_func("join_mgm", *args, **kwargs)
def leave_mgm(self, *args, **kwargs):
return self._call_func("leave_mgm", *args, **kwargs)
def member_upgrade(self, *args, **kwargs):
return self._call_func("member_upgrade", *args, **kwargs)
def publish_changes(self, *args, **kwargs):
return self._call_func("publish_changes", *args, **kwargs)
def query_fqdn_on_member(self, *args, **kwargs):
return self._call_func("query_fqdn_on_member", *args, **kwargs)
def requestrestartservicestatus(self, *args, **kwargs):
return self._call_func("requestrestartservicestatus", *args, **kwargs)
def restartservices(self, *args, **kwargs):
return self._call_func("restartservices", *args, **kwargs)
def skip_member_upgrade(self, *args, **kwargs):
return self._call_func("skip_member_upgrade", *args, **kwargs)
def start_discovery(self, *args, **kwargs):
return self._call_func("start_discovery", *args, **kwargs)
def test_syslog_backup_server_connection(self, *args, **kwargs):
return self._call_func("test_syslog_backup_server_connection", *args,
**kwargs)
def test_syslog_connection(self, *args, **kwargs):
return self._call_func("test_syslog_connection", *args, **kwargs)
def upgrade(self, *args, **kwargs):
return self._call_func("upgrade", *args, **kwargs)
def upgrade_group_now(self, *args, **kwargs):
return self._call_func("upgrade_group_now", *args, **kwargs)
def upload_keytab(self, *args, **kwargs):
return self._call_func("upload_keytab", *args, **kwargs)
class GridCloudapi(InfobloxObject):
""" GridCloudapi: Grid Cloud API object.
Corresponds to WAPI object 'grid:cloudapi'
This object represents the Cloud Grid.
Fields:
allow_api_admins: Defines administrators who can perform cloud API
requests on the Grid Master. The valid value is NONE (no
administrator), ALL (all administrators), or LIST
(administrators on the ACL).
allowed_api_admins: The list of administrators who can perform cloud
API requests on the Cloud Platform Appliance.
enable_recycle_bin: Determines whether the recycle bin for deleted
cloud objects is enabled or not on the Grid Master.
gateway_config: Structure containing all the information related to
Gateway configuration for the Grid Master
"""
_infoblox_type = 'grid:cloudapi'
_fields = ['allow_api_admins', 'allowed_api_admins', 'enable_recycle_bin',
'gateway_config']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['allow_api_admins', 'allowed_api_admins',
'enable_recycle_bin']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'allowed_api_admins': GridCloudapiUser.from_dict,
}
class GridCloudapiCloudstatistics(InfobloxObject):
""" GridCloudapiCloudstatistics: Grid Cloud Statistics object.
Corresponds to WAPI object 'grid:cloudapi:cloudstatistics'
Represents the cloud statistics data.
Fields:
allocated_available_ratio: Ratio of allocated vs. available IPs
allocated_ip_count: Total number of IPs allocated by tenants.
available_ip_count: The total number of IP addresses available to
tenants. Only IP addresses in networks that are within a
delegation scope are counted.
fixed_ip_count: The total number of fixed IP addresses currently in
use by all tenants in the system.
floating_ip_count: The total number of floating IP addresses
currently in use by all tenants in the system.
tenant_count: Total number of tenant currently in the system.
tenant_ip_count: The total number of IP addresses currently in use
by all tenants in the system.
tenant_vm_count: The total number of VMs currently in use by all
tenants in the system.
"""
_infoblox_type = 'grid:cloudapi:cloudstatistics'
_fields = ['allocated_available_ratio', 'allocated_ip_count',
'available_ip_count', 'fixed_ip_count', 'floating_ip_count',
'tenant_count', 'tenant_ip_count', 'tenant_vm_count']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['allocated_available_ratio', 'allocated_ip_count',
'available_ip_count', 'fixed_ip_count',
'floating_ip_count', 'tenant_count', 'tenant_ip_count',
'tenant_vm_count']
_remap = {}
_shadow_fields = ['_ref']
class Tenant(InfobloxObject):
""" Tenant: Grid Cloud API Tenant object.
Corresponds to WAPI object 'grid:cloudapi:tenant'
A Tenant object represents an abstract administrative concept in
Cloud Management Platforms, which encompasses all network elements
such as networks, zones, VMs, IP addresses (fixed and floating),
network views, default DNS view, and all related extensive
attributes.
Fields:
cloud_info: Structure containing all cloud API related information
for this object.
comment: Comment for the Grid Cloud API Tenant object; maximum 256
characters.
created_ts: The timestamp when the tenant was first created in the
system.
id: Unique ID associated with the tenant. This is set only when the
tenant is first created.
last_event_ts: The timestamp when the last event associated with the
tenant happened.
name: Name of the tenant.
network_count: Number of Networks associated with the tenant.
vm_count: Number of VMs associated with the tenant.
"""
_infoblox_type = 'grid:cloudapi:tenant'
_fields = ['cloud_info', 'comment', 'created_ts', 'id', 'last_event_ts',
'name', 'network_count', 'vm_count']
_search_for_update_fields = ['id', 'name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'id', 'name']
_return_fields = ['comment', 'id', 'name']
_remap = {}
_shadow_fields = ['_ref']
class GridCloudapiVm(InfobloxObject):
""" GridCloudapiVm: Grid Cloud API vm object.
Corresponds to WAPI object 'grid:cloudapi:vm'
A vm object represents a virtual machine which encompasses network
elements such as IP addresses (fixed and floating, private and
public), DNS names and all related extensive attributes.
Fields:
availability_zone: Availability zone of the VM.
cloud_info: Structure containing all the cloud API related
information for this object.
comment: Comment for the vm object; maximum 1024 characters.
elastic_ip_address: Elastic IP address associated with the VM's
primary interface.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
first_seen: The timestamp when the VM was first seen in the system.
hostname: Hostname part of the FQDN for the address associated with
the VM's primary interface.
id: Unique ID associated with the VM. This is set only when the VM
is first created.
kernel_id: Identifier of the kernel that this VM is running; maximum
128 characters.
last_seen: The timestamp when the last event associated with the VM
happened.
name: Name of the VM.
network_count: Number of Networks containing any address associated
with this VM.
operating_system: Guest Operating system that this VM is running;
maximum 128 characters.
primary_mac_address: MAC address associated with the VM's primary
interface.
subnet_address: Address of the network that is the container of the
address associated with the VM's primary interface.
subnet_cidr: CIDR of the network that is the container of the
address associated with the VM's primary interface.
subnet_id: Subnet ID of the network that is the container of the
address associated with the VM's primary interface.
tenant_name: Name of the tenant associated with the VM.
vm_type: VM type; maximum 64 characters.
vpc_address: Network address of the parent VPC.
vpc_cidr: Network CIDR of the parent VPC.
vpc_id: Identifier of the parent VPC.
vpc_name: Name of the parent VPC.
"""
_infoblox_type = 'grid:cloudapi:vm'
_fields = ['availability_zone', 'cloud_info', 'comment',
'elastic_ip_address', 'extattrs', 'first_seen', 'hostname',
'id', 'kernel_id', 'last_seen', 'name', 'network_count',
'operating_system', 'primary_mac_address', 'subnet_address',
'subnet_cidr', 'subnet_id', 'tenant_name', 'vm_type',
'vpc_address', 'vpc_cidr', 'vpc_id', 'vpc_name']
_search_for_update_fields = ['id', 'name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'elastic_ip_address', 'id', 'name',
'primary_mac_address']
_return_fields = ['comment', 'extattrs', 'id', 'name']
_remap = {}
_shadow_fields = ['_ref']
class GridCloudapiVmaddress(InfobloxObject):
""" GridCloudapiVmaddress: Grid Cloud API VM address object.
Corresponds to WAPI object 'grid:cloudapi:vmaddress'
VM address is an abstract object that represents a virtual machine
running on the Cloud Management Platform.
Fields:
address: The IP address of the interface.
address_type: IP address type (Public, Private, Elastic, Floating,
...).
associated_ip: Reference to associated IPv4 or IPv6 address.
associated_object_types: Array of string denoting the types of
underlying objects IPv4/IPv6 - "A", "AAAA", "PTR", "HOST", "FA",
"RESERVATION", "UNMANAGED" + ("BULKHOST", "DHCP_RANGE",
"RESERVED_RANGE", "LEASE", "NETWORK", "BROADCAST", "PENDING"),
associated_objects: The list of references to the object (Host,
Fixed Address, RR, ...) that defines this IP.
cloud_info: Structure containing all the cloud API related
information. Only management platform "mgmt_platform" is updated
for this object.
dns_names: The list of all FQDNs associated with the IP address.
elastic_address: Elastic IP address associated with this private
address, if this address is a private address; otherwise empty.
interface_name: Name of the interface associated with this IP
address.
is_ipv4: Indicates whether the address is IPv4 or IPv6.
mac_address: The MAC address of the interface.
ms_ad_user_data: The Microsoft Active Directory user related
information.
network: The network to which this address belongs, in IPv4
Address/CIDR format.
network_view: Network view name of the delegated object.
port_id: Port identifier of the interface.
private_address: Private IP address associated with this public (or
elastic or floating) address, if this address is a public
address; otherwise empty.
private_hostname: Host part of the FQDN of this address if this
address is a private address; otherwise empty
public_address: Public IP address associated with this private
address, if this address is a private address; otherwise empty.
public_hostname: Host part of the FQDN of this address if this
address is a public (or elastic or floating) address; otherwise
empty
| |
<reponame>avrumnoor/NewsSummarizer<filename>venv/lib/python3.8/site-packages/jieba/posseg/prob_trans.py
P={('B', 'a'): {('E', 'a'): -0.0050648453069648755,
('M', 'a'): -5.287963037107507},
('B', 'ad'): {('E', 'ad'): -0.0007479013978476627,
('M', 'ad'): -7.198613337130562},
('B', 'ag'): {},
('B', 'an'): {('E', 'an'): 0.0},
('B', 'b'): {('E', 'b'): -0.06753917715798491,
('M', 'b'): -2.7286269787493125},
('B', 'bg'): {},
('B', 'c'): {('E', 'c'): -0.04442738163948101,
('M', 'c'): -3.1360307468646766},
('B', 'd'): {('E', 'd'): -0.04677309521554972,
('M', 'd'): -3.0857425240950174},
('B', 'df'): {('E', 'df'): 0.0},
('B', 'dg'): {},
('B', 'e'): {('E', 'e'): -0.05870760082677792,
('M', 'e'): -2.864396271492904},
('B', 'en'): {},
('B', 'f'): {('E', 'f'): -0.01914032315305203,
('M', 'f'): -3.965512906408017},
('B', 'g'): {},
('B', 'h'): {('E', 'h'): 0.0},
('B', 'i'): {('E', 'i'): -7.5100009018967535,
('M', 'i'): -0.0005477305718588087},
('B', 'in'): {('M', 'in'): 0.0},
('B', 'j'): {('E', 'j'): -0.2543207374058728,
('M', 'j'): -1.4936259227444613},
('B', 'jn'): {('M', 'jn'): 0.0},
('B', 'k'): {},
('B', 'l'): {('E', 'l'): -3.68054609970312, ('M', 'l'): -0.02553239955770466},
('B', 'ln'): {('M', 'ln'): 0.0},
('B', 'm'): {('E', 'm'): -0.1861592793620009,
('M', 'm'): -1.7727887176378618},
('B', 'mg'): {},
('B', 'mq'): {('E', 'mq'): -0.9316228008055788,
('M', 'mq'): -0.5007333022296009},
('B', 'n'): {('E', 'n'): -0.1386970766026951, ('M', 'n'): -2.04401015913586},
('B', 'ng'): {},
('B', 'nr'): {('E', 'nr'): -0.9187994754319938,
('M', 'nr'): -0.5091566178045187},
('B', 'nrfg'): {('E', 'nrfg'): -0.6898863081052478,
('M', 'nrfg'): -0.6964187211005602},
('B', 'nrt'): {('E', 'nrt'): -0.19889046807373303,
('M', 'nrt'): -1.7127985687430378},
('B', 'ns'): {('E', 'ns'): -0.30583032091031265,
('M', 'ns'): -1.333745856818688},
('B', 'nt'): {('E', 'nt'): -5.85632199504386,
('M', 'nt'): -0.0028658525153568088},
('B', 'nz'): {('E', 'nz'): -0.5116528585143264,
('M', 'nz'): -0.9150511614331327},
('B', 'o'): {('E', 'o'): -0.15615808279732152,
('M', 'o'): -1.9339496252366735},
('B', 'p'): {('E', 'p'): -0.0005180550387437946,
('M', 'p'): -7.565688085284925},
('B', 'q'): {('E', 'q'): -0.23327551934156823,
('M', 'q'): -1.5699064300606476},
('B', 'qe'): {('E', 'qe'): 0.0},
('B', 'qg'): {},
('B', 'r'): {('E', 'r'): -0.01687625300447763,
('M', 'r'): -4.090274052396829},
('B', 'rg'): {},
('B', 'rr'): {('E', 'rr'): -2.239526957026909,
('M', 'rr'): -0.11261887810814913},
('B', 'rz'): {('E', 'rz'): 0.0},
('B', 's'): {('E', 's'): -0.015465951118050396,
('M', 's'): -4.176837382501129},
('B', 't'): {('E', 't'): -0.06198448272867337,
('M', 't'): -2.8117033638171693},
('B', 'tg'): {},
('B', 'u'): {('E', 'u'): -2.170492159178564e-05,
('M', 'u'): -10.7379823741081},
('B', 'ud'): {},
('B', 'ug'): {},
('B', 'uj'): {},
('B', 'ul'): {},
('B', 'uv'): {},
('B', 'uz'): {},
('B', 'v'): {('E', 'v'): -0.012409266514635869,
('M', 'v'): -4.395510002912934},
('B', 'vd'): {('E', 'vd'): 0.0},
('B', 'vg'): {},
('B', 'vi'): {('M', 'vi'): 0.0},
('B', 'vn'): {('E', 'vn'): -0.01102649184048478,
('M', 'vn'): -4.512962732418981},
('B', 'vq'): {('E', 'vq'): 0.0},
('B', 'w'): {},
('B', 'x'): {},
('B', 'y'): {('E', 'y'): -0.0046236917718409775,
('M', 'y'): -5.378872763286892},
('B', 'yg'): {},
('B', 'z'): {('E', 'z'): -0.5543440297742611, ('M', 'z'): -0.854370382921828},
('B', 'zg'): {},
('E', 'a'): {('B', 'a'): -3.4301509494938407,
('B', 'ad'): -6.530359369866237,
('B', 'an'): -6.905890270156357,
('B', 'b'): -5.525983558928292,
('B', 'c'): -4.069370617491082,
('B', 'd'): -4.117261312051237,
('B', 'df'): -9.965267808870705,
('B', 'e'): -11.323391293023898,
('B', 'f'): -5.717589226727901,
('B', 'i'): -6.038496946808002,
('B', 'j'): -6.104838894944724,
('B', 'l'): -5.522952743340017,
('B', 'm'): -4.436632687260457,
('B', 'mq'): -8.361560571145588,
('B', 'n'): -1.479406075636878,
('B', 'nr'): -4.831656636089442,
('B', 'nrfg'): -8.451711668139886,
('B', 'nrt'): -7.1944663972050185,
('B', 'ns'): -5.321701425373129,
('B', 'nt'): -7.642880088580479,
('B', 'nz'): -6.519370248290642,
('B', 'o'): -9.78294625207675,
('B', 'p'): -6.0958541085369085,
('B', 'q'): -7.907487763972047,
('B', 'r'): -4.479759806376397,
('B', 'rr'): -13.520615870360118,
('B', 'rz'): -9.965267808870705,
('B', 's'): -6.188246664431055,
('B', 't'): -5.039464450291145,
('B', 'u'): -6.332203133863164,
('B', 'v'): -2.2068977276606287,
('B', 'vd'): -8.586141937229426,
('B', 'vi'): -11.574705721304804,
('B', 'vn'): -3.354802515634624,
('B', 'vq'): -12.827468689800172,
('B', 'y'): -8.458020837333152,
('B', 'z'): -6.33069369961431,
('S', 'a'): -4.841303829467445,
('S', 'ad'): -10.812565669257907,
('S', 'ag'): -7.868126690091467,
('S', 'an'): -10.812565669257907,
('S', 'b'): -7.3657577763437,
('S', 'c'): -3.544575125142281,
('S', 'd'): -3.378426427593927,
('S', 'dg'): -9.649414859452227,
('S', 'e'): -9.031979500627978,
('S', 'f'): -5.155641891921391,
('S', 'g'): -7.752294874566346,
('S', 'h'): -9.736426236441856,
('S', 'j'): -6.561217358226142,
('S', 'k'): -6.495966839906481,
('S', 'm'): -4.73967475678773,
('S', 'mg'): -11.574705721304804,
('S', 'n'): -4.191071250905458,
('S', 'ng'): -4.934829887478268,
('S', 'nr'): -6.3589938674209305,
('S', 'o'): -10.630244112463952,
('S', 'p'): -3.686853600315555,
('S', 'q'): -4.984600924703292,
('S', 'r'): -3.947717891287047,
('S', 'rg'): -12.827468689800172,
('S', 'tg'): -7.777612682550635,
('S', 'u'): -4.923872400185871,
('S', 'ud'): -5.816254702449806,
('S', 'ug'): -7.628971658534347,
('S', 'uj'): -1.5134881880027917,
('S', 'ul'): -4.426247674636356,
('S', 'uv'): -3.4773228981331137,
('S', 'uz'): -7.270640628100635,
('S', 'v'): -3.1548103981230424,
('S', 'vg'): -6.3809555343951985,
('S', 'vn'): -9.994255345743957,
('S', 'x'): -9.230156429211727,
('S', 'y'): -5.506941727527432},
('E', 'ad'): {('B', 'a'): -3.669490663077246,
('B', 'ad'): -5.577421564067343,
('B', 'an'): -7.408859517113575,
('B', 'b'): -6.442419001553947,
('B', 'c'): -5.341996757640598,
('B', 'd'): -4.558153015609841,
('B', 'df'): -9.172448109374933,
('B', 'f'): -6.6204021567493045,
('B', 'i'): -5.892163656378089,
('B', 'j'): -6.861564213566192,
('B', 'l'): -5.997872543403312,
('B', 'm'): -5.595897840234916,
('B', 'mq'): -8.824141415106718,
('B', 'n'): -2.6719094715397667,
('B', 'nr'): -5.990928071050501,
('B', 'nrfg'): -9.259459486364563,
('B', 'nrt'): -7.745331753734787,
('B', 'ns'): -6.245708707307894,
('B', 'nt'): -8.949304558060723,
('B', 'nz'): -7.482967489267296,
('B', 'o'): -9.577913217483097,
('B', 'p'): -5.436764589063194,
('B', 'q'): -9.259459486364563,
('B', 'r'): -5.405450875997045,
('B', 's'): -7.062234909028343,
('B', 't'): -6.152023223230571,
('B', 'u'): -6.379240099932416,
('B', 'v'): -0.6987018824779084,
('B', 'vd'): -9.018297429547674,
('B', 'vi'): -10.964207578602988,
('B', 'vn'): -3.0646831065707905,
('B', 'y'): -9.71144461010762,
('B', 'z'): -7.8506922693926136,
('S', 'a'): -5.554796164549365,
('S', 'ad'): -9.865595289934879,
('S', 'ag'): -9.172448109374933,
('S', 'b'): -8.884766036923152,
('S', 'c'): -4.4880047424923335,
('S', 'd'): -3.535874384412182,
('S', 'dg'): -10.271060398043042,
('S', 'e'): -10.964207578602988,
('S', 'f'): -5.483568655260997,
('S', 'g'): -8.712915779996493,
('S', 'h'): -9.865595289934879,
('S', 'j'): -7.725529126438608,
('S', 'k'): -7.275328124489052,
('S', 'm'): -5.537057340211983,
('S', 'n'): -5.825472281879416,
('S', 'ng'): -6.8290410218606326,
('S', 'nr'): -7.745331753734787,
('S', 'o'): -9.172448109374933,
('S', 'p'): -3.226155280913672,
('S', 'q'): -6.01544768822482,
('S', 'r'): -5.2772322222631685,
('S', 'tg'): -9.172448109374933,
('S', 'u'): -7.2147035026726165,
('S', 'ud'): -7.380688640146878,
('S', 'ug'): -7.968475305048997,
('S', 'uj'): -2.7833266366065983,
('S', 'ul'): -5.257097313854112,
('S', 'uv'): -3.6745970571518205,
('S', 'uz'): -8.884766036923152,
('S', 'v'): -2.244971905780595,
('S', 'vg'): -6.300768484490921,
('S', 'x'): -9.71144461010762,
('S', 'y'): -7.3006459324733415},
('E', 'ag'): {},
('E', 'an'): {('B', 'a'): -3.6052367516663226,
('B', 'ad'): -5.932878424497706,
('B', 'an'): -5.820082930352362,
('B', 'b'): -5.474131949776605,
('B', 'c'): -3.354245843284712,
('B', 'd'): -3.24087671226596,
('B', 'df'): -9.038958755220563,
('B', 'f'): -4.96708704885052,
('B', 'i'): -5.847111602740281,
('B', 'j'): -5.978687960529,
('B', 'l'): -5.428040842576338,
('B', 'm'): -4.790463513171203,
('B', 'mq'): -7.940346466552453,
('B', 'n'): -1.6296204338393674,
('B', 'nr'): -4.682249928530971,
('B', 'nrfg'): -8.191660894833358,
('B', 'nrt'): -7.304357699832456,
('B', 'ns'): -5.350079301106626,
('B', 'nt'): -7.572621686427135,
('B', 'nz'): -6.641063482422192,
('B', 'o'): -10.137571043888672,
('B', 'p'): -5.446223161659528,
('B', 'q'): -8.345811574660617,
('B', 'r'): -4.104484822089871,
('B', 's'): -6.353381409970411,
('B', 't'): -4.849304013194137,
('B', 'u'): -6.736373662226517,
('B', 'v'): -2.148688790579445,
('B', 'vd'): -9.038958755220563,
('B', 'vi'): -9.444423863328726,
('B', 'vn'): -3.0106802349898643,
('B', 'y'): -8.191660894833358,
('B', 'z'): -6.148586997324398,
('S', 'a'): -5.094145926969426,
('S', 'ad'): -10.137571043888672,
('S', 'ag'): -8.058129502208836,
('S', 'b'): -7.304357699832456,
('S', 'c'): -2.728434599968544,
('S', 'd'): -2.9349098473654345,
('S', 'dg'): -8.751276682768781,
('S', 'e'): -7.834985950894627,
('S', 'f'): -4.565417011710908,
('S', 'g'): -7.652664394100672,
('S', 'h'): -9.444423863328726,
('S', 'j'): -6.526653131244448,
('S', 'k'): -7.498513714273414,
('S', 'm'): -4.699491734965477,
('S', 'n'): -4.64040281859547,
('S', 'ng'): -5.100618441475043,
('S', 'nr'): -6.959517213540726,
('S', 'p'): -3.524186825509113,
('S', 'q'): -5.8068377036023415,
('S', 'r'): -3.5135058160887787,
('S', 'tg'): -6.641063482422192,
('S', 'u'): -4.478088828129051,
('S', 'ud'): -6.010436658843581,
('S', 'ug'): -7.364982321648891,
('S', 'uj'): -2.0789273316730545,
('S', 'ul'): -4.363019498344264,
('S', 'uv'): -4.271102986955375,
('S', 'uz'): -6.641063482422192,
('S', 'v'): -3.071957680290955,
('S', 'vg'): -6.026697179715361,
('S', 'x'): -9.444423863328726,
('S', 'y'): -5.22491615815262},
('E', 'b'): {('B', 'a'): -4.707673302574278,
('B', 'ad'): -7.800150133419737,
('B', 'an'): -6.283289297017788,
('B', 'b'): -4.557557781934221,
('B', 'c'): -6.218462133938539,
('B', 'd'): -5.577520032648095,
('B', 'f'): -5.6289933828534355,
('B', 'h'): -12.354027025020278,
('B', 'i'): -7.343391730924022,
('B', 'j'): -5.482935730409732,
('B', 'l'): -5.224729476090905,
('B', 'ln'): -12.354027025020278,
('B', 'm'): -4.711023389459561,
('B', 'mq'): -8.50387942331022,
('B', 'n'): -0.7488506003459426,
('B', 'nr'): -5.464435716665812,
('B', 'nrfg'): -9.175973194672332,
('B', 'nrt'): -7.653546659227862,
('B', 'ns'): -4.893536719194941,
('B', 'nt'): -6.744555229835318,
('B', 'nz'): -5.697300500841886,
('B', 'o'): -11.660879844460332,
('B', 'p'): -6.151491507832356,
('B', 'q'): -7.810732242750274,
('B', 'r'): -5.393679295918971,
('B', 'rz'): -10.967732663900387,
('B', 's'): -6.102123141854389,
('B', 't'): -6.11570239998077,
('B', 'u'): -6.614234112841044,
('B', 'v'): -2.302248786072008,
('B', 'vd'): -9.869120375232278,
('B', 'vn'): -2.882707963426864,
('B', 'vq'): -11.660879844460332,
('B', 'y'): -10.408116875964964,
('B', 'z'): -7.758907174885688,
('S', 'a'): -4.857374586851995,
('S', 'ag'): -8.164372282993853,
('S', 'b'): -5.649612670056171,
('S', 'bg'): -12.354027025020278,
('S', 'c'): -4.680338895752548,
('S', 'd'): -4.707673302574278,
('S', 'dg'): -10.274585483340442,
('S', 'f'): -4.603412292249869,
('S', 'g'): -7.748856839032187,
('S', 'h'): -8.210892298628746,
('S', 'j'): -6.216299970934044,
('S', 'k'): -6.424437881630383,
('S', 'm'): -4.805998055085264,
('S', 'n'): -3.636344972854637,
('S', 'ng'): -4.001944353667641,
('S', 'nr'): -7.278853209786451,
('S', 'o'): -10.967732663900387,
('S', 'p'): -3.903828702428319,
('S', 'q'): -4.428869512795575,
('S', 'r'): -5.6883433072378695,
('S', 'tg'): -8.328675334285128,
('S', 'u'): -5.63139723016483,
('S', 'ud'): -8.827666500404117,
('S', 'ug'): -8.690465378890632,
('S', 'uj'): -2.512468069106317,
('S', 'ul'): -7.463677896798524,
('S', 'uv'): -6.064311454111281,
('S', 'uz'): -8.023293684733947,
('S', 'v'): -2.7256344294304595,
('S', 'vg'): -5.935662089084067,
('S', 'vn'): -9.463655267124114,
('S', 'x'): -8.857519463553798,
('S', 'y'): -8.422201392295952,
('S', 'yg'): -12.354027025020278},
('E', 'bg'): {},
('E', 'c'): {('B', 'a'): -4.316505787725195,
('B', 'ad'): -5.657713206360876,
('B', 'an'): -7.810927842700653,
('B', 'b'): -4.974044769755475,
('B', 'c'): -4.096441559872942,
('B', 'd'): -3.4848930231958763,
('B', 'df'): -8.56861354439817,
('B', 'e'): -12.192954477374535,
('B', 'f'): -5.183093975455244,
('B', 'h'): -12.88610165793448,
('B', 'i'): -5.377862883255817,
('B', 'in'): -12.88610165793448,
('B', 'j'): -5.2189434026153325,
('B', 'l'): -5.076560333281069,
('B', 'm'): -3.738914100963566,
('B', 'mq'): -6.469369375422154,
('B', 'n'): -1.8458874846215843,
('B', 'nr'): -3.8640204354190053,
('B', 'nrfg'): -7.302605349152781,
('B', 'nrt'): -6.2985516431096835,
('B', 'ns'): -3.8921775165230805,
('B', 'nt'): -5.97734687861926,
('B', 'nz'): -5.659892647833808,
('B', 'o'): -9.66722583306628,
('B', | |
+ m.x915 - m.x936 - m.x943
+ m.x985 + m.x999 + m.x1013 - m.x1034 - m.x1041 + m.x1083 + m.x1097 + m.x1111 - m.x1132
- m.x1139 + m.x1181 + m.x1195 + m.x1209 - m.x1230 - m.x1237 + m.x1279 + m.x1293 + m.x1307
- m.x1328 - m.x1335 >= 0)
m.c3897 = Constraint(expr= m.x594 + m.x608 + m.x622 - m.x643 - m.x650 + m.x692 + m.x706 + m.x720 - m.x741 - m.x748
+ m.x790 + m.x804 + m.x818 - m.x839 - m.x846 + m.x888 + m.x902 + m.x916 - m.x937 - m.x944
+ m.x986 + m.x1000 + m.x1014 - m.x1035 - m.x1042 + m.x1084 + m.x1098 + m.x1112 - m.x1133
- m.x1140 + m.x1182 + m.x1196 + m.x1210 - m.x1231 - m.x1238 + m.x1280 + m.x1294 + m.x1308
- m.x1329 - m.x1336 >= -50)
m.c3898 = Constraint(expr= m.x595 + m.x609 + m.x623 - m.x644 - m.x651 + m.x693 + m.x707 + m.x721 - m.x742 - m.x749
+ m.x791 + m.x805 + m.x819 - m.x840 - m.x847 + m.x889 + m.x903 + m.x917 - m.x938 - m.x945
+ m.x987 + m.x1001 + m.x1015 - m.x1036 - m.x1043 + m.x1085 + m.x1099 + m.x1113 - m.x1134
- m.x1141 + m.x1183 + m.x1197 + m.x1211 - m.x1232 - m.x1239 + m.x1281 + m.x1295 + m.x1309
- m.x1330 - m.x1337 >= 0)
m.c3899 = Constraint(expr= m.x596 + m.x610 + m.x624 - m.x645 - m.x652 + m.x694 + m.x708 + m.x722 - m.x743 - m.x750
+ m.x792 + m.x806 + m.x820 - m.x841 - m.x848 + m.x890 + m.x904 + m.x918 - m.x939 - m.x946
+ m.x988 + m.x1002 + m.x1016 - m.x1037 - m.x1044 + m.x1086 + m.x1100 + m.x1114 - m.x1135
- m.x1142 + m.x1184 + m.x1198 + m.x1212 - m.x1233 - m.x1240 + m.x1282 + m.x1296 + m.x1310
- m.x1331 - m.x1338 >= 0)
m.c3900 = Constraint(expr= m.x611 + m.x625 - m.x653 + m.x709 + m.x723 - m.x751 + m.x807 + m.x821 - m.x849 + m.x905
+ m.x919 - m.x947 + m.x1003 + m.x1017 - m.x1045 + m.x1101 + m.x1115 - m.x1143 + m.x1199
+ m.x1213 - m.x1241 + m.x1297 + m.x1311 - m.x1339 >= 0)
m.c3901 = Constraint(expr= m.x612 + m.x626 - m.x654 + m.x710 + m.x724 - m.x752 + m.x808 + m.x822 - m.x850 + m.x906
+ m.x920 - m.x948 + m.x1004 + m.x1018 - m.x1046 + m.x1102 + m.x1116 - m.x1144 + m.x1200
+ m.x1214 - m.x1242 + m.x1298 + m.x1312 - m.x1340 >= 0)
m.c3902 = Constraint(expr= m.x613 + m.x627 - m.x655 + m.x711 + m.x725 - m.x753 + m.x809 + m.x823 - m.x851 + m.x907
+ m.x921 - m.x949 + m.x1005 + m.x1019 - m.x1047 + m.x1103 + m.x1117 - m.x1145 + m.x1201
+ m.x1215 - m.x1243 + m.x1299 + m.x1313 - m.x1341 >= 0)
m.c3903 = Constraint(expr= m.x614 + m.x628 - m.x656 + m.x712 + m.x726 - m.x754 + m.x810 + m.x824 - m.x852 + m.x908
+ m.x922 - m.x950 + m.x1006 + m.x1020 - m.x1048 + m.x1104 + m.x1118 - m.x1146 + m.x1202
+ m.x1216 - m.x1244 + m.x1300 + m.x1314 - m.x1342 >= 0)
m.c3904 = Constraint(expr= m.x615 + m.x629 - m.x657 + m.x713 + m.x727 - m.x755 + m.x811 + m.x825 - m.x853 + m.x909
+ m.x923 - m.x951 + m.x1007 + m.x1021 - m.x1049 + m.x1105 + m.x1119 - m.x1147 + m.x1203
+ m.x1217 - m.x1245 + m.x1301 + m.x1315 - m.x1343 >= 0)
m.c3905 = Constraint(expr= m.x616 + m.x630 - m.x658 + m.x714 + m.x728 - m.x756 + m.x812 + m.x826 - m.x854 + m.x910
+ m.x924 - m.x952 + m.x1008 + m.x1022 - m.x1050 + m.x1106 + m.x1120 - m.x1148 + m.x1204
+ m.x1218 - m.x1246 + m.x1302 + m.x1316 - m.x1344 >= -30)
m.c3906 = Constraint(expr= m.x617 + m.x631 - m.x659 + m.x715 + m.x729 - m.x757 + m.x813 + m.x827 - m.x855 + m.x911
+ m.x925 - m.x953 + m.x1009 + m.x1023 - m.x1051 + m.x1107 + m.x1121 - m.x1149 + m.x1205
+ m.x1219 - m.x1247 + m.x1303 + m.x1317 - m.x1345 >= 0)
m.c3907 = Constraint(expr= m.x632 + m.x639 + m.x730 + m.x737 + m.x828 + m.x835 + m.x926 + m.x933 + m.x1024 + m.x1031
+ m.x1122 + m.x1129 + m.x1220 + m.x1227 + m.x1318 + m.x1325 >= 0)
m.c3908 = Constraint(expr= m.x633 + m.x640 + m.x731 + m.x738 + m.x829 + m.x836 + m.x927 + m.x934 + m.x1025 + m.x1032
+ m.x1123 + m.x1130 + m.x1221 + m.x1228 + m.x1319 + m.x1326 >= 0)
m.c3909 = Constraint(expr= m.x634 + m.x641 + m.x732 + m.x739 + m.x830 + m.x837 + m.x928 + m.x935 + m.x1026 + m.x1033
+ m.x1124 + m.x1131 + m.x1222 + m.x1229 + m.x1320 + m.x1327 >= 0)
m.c3910 = Constraint(expr= m.x635 + m.x642 + m.x733 + m.x740 + m.x831 + m.x838 + m.x929 + m.x936 + m.x1027 + m.x1034
+ m.x1125 + m.x1132 + m.x1223 + m.x1230 + m.x1321 + m.x1328 >= 0)
m.c3911 = Constraint(expr= m.x636 + m.x643 + m.x734 + m.x741 + m.x832 + m.x839 + m.x930 + m.x937 + m.x1028 + m.x1035
+ m.x1126 + m.x1133 + m.x1224 + m.x1231 + m.x1322 + m.x1329 >= 0)
m.c3912 = Constraint(expr= m.x637 + m.x644 + m.x735 + m.x742 + m.x833 + m.x840 + m.x931 + m.x938 + m.x1029 + m.x1036
+ m.x1127 + m.x1134 + m.x1225 + m.x1232 + m.x1323 + m.x1330 >= 0)
m.c3913 = Constraint(expr= m.x638 + m.x645 + m.x736 + m.x743 + m.x834 + m.x841 + m.x932 + m.x939 + m.x1030 + m.x1037
+ m.x1128 + m.x1135 + m.x1226 + m.x1233 + m.x1324 + m.x1331 >= 0)
m.c3914 = Constraint(expr= m.x646 + m.x653 + m.x744 + m.x751 + m.x842 + m.x849 + m.x940 + m.x947 + m.x1038 + m.x1045
+ m.x1136 + m.x1143 + m.x1234 + m.x1241 + m.x1332 + m.x1339 >= 0)
m.c3915 = Constraint(expr= m.x647 + m.x654 + m.x745 + m.x752 + m.x843 + m.x850 + m.x941 + m.x948 + m.x1039 + m.x1046
+ m.x1137 + m.x1144 + m.x1235 + m.x1242 + m.x1333 + m.x1340 >= 0)
m.c3916 = Constraint(expr= m.x648 + m.x655 + m.x746 + m.x753 + m.x844 + m.x851 + m.x942 + m.x949 + m.x1040 + m.x1047
+ m.x1138 + m.x1145 + m.x1236 + m.x1243 + m.x1334 + m.x1341 >= 0)
m.c3917 = Constraint(expr= m.x649 + m.x656 + m.x747 + m.x754 + m.x845 + m.x852 + m.x943 + m.x950 + m.x1041 + m.x1048
+ m.x1139 + m.x1146 + m.x1237 + m.x1244 + m.x1335 + m.x1342 >= 0)
m.c3918 = Constraint(expr= m.x650 + m.x657 + m.x748 + m.x755 + m.x846 + m.x853 + m.x944 + m.x951 + m.x1042 + m.x1049
+ m.x1140 + m.x1147 + m.x1238 + m.x1245 + m.x1336 + m.x1343 >= 0)
m.c3919 = Constraint(expr= m.x651 + m.x658 + m.x749 + m.x756 + m.x847 + m.x854 + m.x945 + m.x952 + m.x1043 + m.x1050
+ m.x1141 + m.x1148 + m.x1239 + m.x1246 + m.x1337 + m.x1344 >= 0)
m.c3920 = Constraint(expr= m.x652 + m.x659 + m.x750 + m.x757 + m.x848 + m.x855 + m.x946 + m.x953 + m.x1044 + m.x1051
+ m.x1142 + m.x1149 + m.x1240 + m.x1247 + m.x1338 + m.x1345 >= 0)
m.c3921 = Constraint(expr= - m.x562 - m.x660 - m.x758 - m.x856 - m.x954 - m.x1052 - m.x1150 - m.x1248 <= 0)
m.c3922 = Constraint(expr= - m.x563 - m.x661 - m.x759 - m.x857 - m.x955 - m.x1053 - m.x1151 - m.x1249 <= 50)
m.c3923 = Constraint(expr= - m.x564 - m.x662 - m.x760 - m.x858 - m.x956 - m.x1054 - m.x1152 - m.x1250 <= 50)
m.c3924 = Constraint(expr= - m.x565 - m.x663 - m.x761 - m.x859 - m.x957 - m.x1055 - m.x1153 - m.x1251 <= 50)
m.c3925 = Constraint(expr= - m.x566 - m.x664 - m.x762 - m.x860 - m.x958 - m.x1056 - m.x1154 - m.x1252 <= 50)
m.c3926 = Constraint(expr= - m.x567 - m.x665 - m.x763 - m.x861 - m.x959 - m.x1057 - m.x1155 - m.x1253 <= 50)
m.c3927 = Constraint(expr= - m.x568 - m.x666 - m.x764 - m.x862 - m.x960 - m.x1058 - m.x1156 | |
'매도미체결수량', '매수미체결수량',
'공시', '정보제공', '언론사', '제목']:
item = QtWidgets.QTableWidgetItem(str(df[column][index]))
elif gubun in [ui_num['재무년도'], ui_num['재무분기'], ui_num['동업종비교']]:
try:
item = QtWidgets.QTableWidgetItem(str(df[column][index]))
except KeyError:
continue
elif column not in ['수익률', '등락율', '고저평균대비등락율', '체결강도',
'체결강도5분', '체결강도20분', '체결강도60분', '최고체결강도']:
item = QtWidgets.QTableWidgetItem(changeFormat(df[column][index]).split('.')[0])
else:
item = QtWidgets.QTableWidgetItem(changeFormat(df[column][index]))
if column in ['종목명', '호가종목명', '공시', '제목', '구분']:
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignLeft)
elif column in ['거래횟수', '추정예탁자산', '추정예수금', '보유종목수', '주문구분', '체결시간', '거래일자', '기간',
'일자', '매도미체결수량', '매도미체결수량', '정보제공', '언론사']:
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
else:
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
if column == '체결수량':
if j == 0:
item.setIcon(self.icon_totalb)
elif j == 21:
item.setIcon(self.icon_totals)
elif column == '체결강도' and gubun in [ui_num['체결수량0'], ui_num['체결수량1']]:
if j == 0:
item.setIcon(self.icon_up)
elif j == 21:
item.setIcon(self.icon_down)
elif gubun in [ui_num['호가0'], ui_num['호가1']]:
if column == '증감':
if j == 0:
item.setIcon(self.icon_perb)
elif j == 21:
item.setIcon(self.icon_pers)
elif column == '잔량':
if j == 0:
item.setIcon(self.icon_totalb)
elif j == 21:
item.setIcon(self.icon_totals)
elif column == '호가':
if j == 0:
item.setIcon(self.icon_up)
elif j == 21:
item.setIcon(self.icon_down)
else:
if gubun == ui_num['호가0']:
hj_tableWidget = self.hoga_00_hj_tableWidget
else:
hj_tableWidget = self.hoga_01_hj_tableWidget
if hj_tableWidget.item(0, 0) is not None:
o = comma2int(hj_tableWidget.item(0, columns_hj.index('시가')).text())
h = comma2int(hj_tableWidget.item(0, columns_hj.index('고가')).text())
low = comma2int(hj_tableWidget.item(0, columns_hj.index('저가')).text())
if o != 0:
if df[column][index] == o:
item.setIcon(self.icon_open)
elif df[column][index] == h:
item.setIcon(self.icon_high)
elif df[column][index] == low:
item.setIcon(self.icon_low)
elif column == '등락율':
if j == 0:
item.setIcon(self.icon_up)
elif j == 21:
item.setIcon(self.icon_down)
else:
if gubun == ui_num['호가0']:
hj_tableWidget = self.hoga_00_hj_tableWidget
else:
hj_tableWidget = self.hoga_01_hj_tableWidget
if hj_tableWidget.item(0, 0) is not None:
uvi = comma2int(hj_tableWidget.item(0, columns_hj.index('UVI')).text())
dvi = comma2int(hj_tableWidget.item(0, columns_hj.index('DVI')).text())
if df[column][index] != 0:
if j < 11:
if df['호가'][index] == uvi:
item.setIcon(self.icon_vi)
else:
if df['호가'][index] == dvi:
item.setIcon(self.icon_vi)
if '수익률' in df.columns:
if df['수익률'][index] >= 0:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun == ui_num['체결목록']:
if df['주문구분'][index] == '매수':
item.setForeground(color_fg_bt)
elif df['주문구분'][index] == '매도':
item.setForeground(color_fg_dk)
elif df['주문구분'][index] in ['매도취소', '매수취소']:
item.setForeground(color_fg_bc)
elif gubun in [ui_num['기업공시'], ui_num['기업뉴스']]:
cname = '공시' if gubun == ui_num['기업공시'] else '제목'
if '단기과열' in df[cname][index] or '투자주의' in df[cname][index] or \
'투자경고' in df[cname][index] or '투자위험' in df[cname][index] or \
'거래정지' in df[cname][index] or '환기종목' in df[cname][index] or \
'불성실공시' in df[cname][index] or '관리종목' in df[cname][index] or \
'정리매매' in df[cname][index] or '유상증자' in df[cname][index] or \
'무상증자' in df[cname][index]:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun == ui_num['투자자']:
if column in ['등락율', '개인투자자', '외국인투자자', '기관계']:
if df[column][index] >= 0:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun in [ui_num['재무년도'], ui_num['재무분기'], ui_num['동업종비교']]:
if '-' not in df[column][index] and column != '구분':
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun == ui_num['체결강도']:
if column == '등락율':
if df[column][index] >= 0:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif '체결강도' in column:
if df[column][index] >= 100:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun in [ui_num['체결수량0'], ui_num['체결수량1']]:
if column == '체결수량':
if j == 0:
if df[column][index] > df[column][21]:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif j == 21:
if df[column][index] > df[column][0]:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
else:
if gubun == ui_num['체결수량0']:
hg_tableWidget = self.hoga_00_hg_tableWidget
else:
hg_tableWidget = self.hoga_01_hg_tableWidget
if hg_tableWidget.item(0, 0) is not None and \
hg_tableWidget.item(10, columns_hg.index('호가')).text() != '':
c = comma2int(hg_tableWidget.item(10, columns_hg.index('호가')).text())
if df[column][index] > 0:
item.setForeground(color_fg_bt)
if df[column][index] * c > 90000000:
item.setBackground(color_bf_bt)
elif df[column][index] < 0:
item.setForeground(color_fg_dk)
if df[column][index] * c < -90000000:
item.setBackground(color_bf_dk)
elif column == '체결강도':
if df[column][index] >= 100:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif gubun in [ui_num['호가0'], ui_num['호가1']]:
if '증감' in column:
if j == 0:
if df[column][index] > 100:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif j == 21:
if df[column][index] > 100:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif df[column][index] > 0:
item.setForeground(color_fg_bt)
if df[column][index] * df['호가'][10] > 90000000:
item.setBackground(color_bf_bt)
elif df[column][index] < 0:
item.setForeground(color_fg_dk)
if df[column][index] * df['호가'][11] < -90000000:
item.setBackground(color_bf_dk)
elif column == '잔량':
if j == 0:
if df[column][index] > df[column][21]:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif j == 21:
if df[column][index] > df[column][0]:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif j < 11:
item.setForeground(color_fg_bt)
else:
item.setForeground(color_fg_dk)
elif column in ['호가', '등락율']:
if df['등락율'][index] > 0:
item.setForeground(color_fg_bt)
elif df['등락율'][index] < 0:
item.setForeground(color_fg_dk)
if column == '호가' and df[column][index] != 0:
if gubun == ui_num['호가0']:
hj_tableWidget = self.hoga_00_hj_tableWidget
else:
hj_tableWidget = self.hoga_01_hj_tableWidget
if hj_tableWidget.item(0, 0) is not None:
c = comma2int(hj_tableWidget.item(0, columns_hj.index('현재가')).text())
if j not in [0, 21] and df[column][index] == c:
item.setBackground(color_bf_bt)
if hj_tableWidget.item(0, columns_hj.index('매입가')).text() != '0':
gap = df[column][19] - df[column][20]
buyprice = comma2int(hj_tableWidget.item(0, columns_hj.index('매입가')).text())
if df[column][index] <= buyprice < df[column][index] + gap:
item.setBackground(color_bf_dk)
elif gubun in [ui_num['매도주문0'], ui_num['매도주문1'], ui_num['매수주문0'], ui_num['매수주문1']]:
item.setForeground(color_fg_bt)
item.setBackground(color_bg_bt)
tableWidget.setItem(j, i, item)
if len(df) < 13 and gubun in [ui_num['거래목록'], ui_num['잔고목록'], ui_num['체결목록']]:
tableWidget.setRowCount(13)
elif len(df) < 22 and gubun == ui_num['기업공시']:
tableWidget.setRowCount(22)
elif len(df) < 12 and gubun == ui_num['기업뉴스']:
tableWidget.setRowCount(12)
elif len(df) < 28 and gubun == ui_num['체결강도']:
tableWidget.setRowCount(28)
elif len(df) < 31 and gubun == ui_num['당일상세']:
tableWidget.setRowCount(31)
elif len(df) < 41 and gubun == ui_num['누적상세']:
tableWidget.setRowCount(41)
@QtCore.pyqtSlot(int)
def CellClicked_1(self, row):
item = self.hoga_00_hj_tableWidget.item(0, 0)
if item is None:
return
name = item.text()
if self.hoga_00_hj_tableWidget.item(0, columns_jg.index('보유수량')).text() == '':
return
jc = comma2int(self.hoga_00_hj_tableWidget.item(0, columns_jg.index('보유수량')).text())
if self.hoga_00_hg_tableWidget.item(row, columns_hg.index('호가')).text() == '':
return
hg = comma2int(self.hoga_00_hg_tableWidget.item(row, columns_hg.index('호가')).text())
bper = 0
if self.hoga_00_sell_radioButton_01.isChecked():
bper = 10
elif self.hoga_00_sell_radioButton_02.isChecked():
bper = 25
elif self.hoga_00_sell_radioButton_03.isChecked():
bper = 33
elif self.hoga_00_sell_radioButton_04.isChecked():
bper = 50
elif self.hoga_00_sell_radioButton_05.isChecked():
bper = 75
elif self.hoga_00_sell_radioButton_06.isChecked():
bper = 100
if bper == 0:
windowQ.put([2, '시스템 명령 오류 알림 - 매도비율을 선택하십시오.'])
return
oc = int(jc * (bper / 100))
if oc == 0:
oc = 1
order = ['매도', self.dict_code[name], name, hg, oc]
traderQ.put(order)
@QtCore.pyqtSlot(int)
def CellClicked_2(self, row):
item = self.hoga_00_hj_tableWidget.item(0, 0)
if item is None:
return
name = item.text()
if self.hoga_00_hg_tableWidget.item(row, columns_hg.index('호가')).text() == '':
return
hg = comma2int(self.hoga_00_hg_tableWidget.item(row, columns_hg.index('호가')).text())
og = 0
if self.hoga_00_buy_radioButton_01.isChecked():
og = 100000
elif self.hoga_00_buy_radioButton_02.isChecked():
og = 500000
elif self.hoga_00_buy_radioButton_03.isChecked():
og = 1000000
elif self.hoga_00_buy_radioButton_04.isChecked():
og = 5000000
elif self.hoga_00_buy_radioButton_05.isChecked():
og = 10000000
elif self.hoga_00_buy_radioButton_06.isChecked():
og = 50000000
if og == 0:
windowQ.put([2, '시스템 명령 오류 알림 - 매수금액을 선택하십시오.'])
return
oc = int(og / hg)
order = ['매수', self.dict_code[name], name, hg, oc]
traderQ.put(order)
@QtCore.pyqtSlot(int)
def CellClicked_3(self, row):
item = self.hoga_01_hj_tableWidget.item(0, 0)
if item is None:
return
name = item.text()
if self.hoga_01_hj_tableWidget.item(0, columns_jg.index('보유수량')).text() == '':
return
jc = comma2int(self.hoga_01_hj_tableWidget.item(0, columns_jg.index('보유수량')).text())
if self.hoga_01_hg_tableWidget.item(row, columns_hg.index('호가')).text() == '':
return
hg = comma2int(self.hoga_01_hg_tableWidget.item(row, columns_hg.index('호가')).text())
bper = 0
if self.hoga_01_sell_radioButton_01.isChecked():
bper = 10
elif self.hoga_01_sell_radioButton_02.isChecked():
bper = 25
elif self.hoga_01_sell_radioButton_03.isChecked():
bper = 33
elif self.hoga_01_sell_radioButton_04.isChecked():
bper = 50
elif self.hoga_01_sell_radioButton_05.isChecked():
bper = 75
elif self.hoga_01_sell_radioButton_06.isChecked():
bper = 100
if bper == 0:
windowQ.put([2, '시스템 명령 오류 알림 - 매도비율을 선택하십시오.'])
return
oc = int(jc * (bper / 100))
if oc == 0:
oc = 1
order = ['매도', self.dict_code[name], name, hg, oc]
traderQ.put(order)
@QtCore.pyqtSlot(int)
def CellClicked_4(self, row):
item = self.hoga_01_hj_tableWidget.item(0, 0)
if item is None:
return
name = item.text()
if self.hoga_01_hg_tableWidget.item(row, columns_hg.index('호가')).text() == '':
return
hg = comma2int(self.hoga_01_hg_tableWidget.item(row, columns_hg.index('호가')).text())
og = 0
if self.hoga_01_buy_radioButton_01.isChecked():
og = 100000
elif self.hoga_01_buy_radioButton_02.isChecked():
og = 500000
elif self.hoga_01_buy_radioButton_03.isChecked():
og = 1000000
elif self.hoga_01_buy_radioButton_04.isChecked():
og = 5000000
elif self.hoga_01_buy_radioButton_05.isChecked():
og = 10000000
elif self.hoga_01_buy_radioButton_06.isChecked():
og = 50000000
if og == 0:
windowQ.put([2, '시스템 명령 오류 알림 - 매수금액을 선택하십시오.'])
return
oc = int(og / hg)
order = ['매수', self.dict_code[name], name, hg, oc]
traderQ.put(order)
@QtCore.pyqtSlot(int, int)
def CellClicked_5(self, row, col):
if col > 1:
return
item = self.td_tableWidget.item(row, 0)
if item is None:
return
code = self.dict_code[item.text()]
self.PutTraderQ(code, col)
@QtCore.pyqtSlot(int, int)
def CellClicked_6(self, row, col):
if col > 1:
return
item = self.jg_tableWidget.item(row, 0)
if item is None:
return
code = self.dict_code[item.text()]
self.PutTraderQ(code, col)
@QtCore.pyqtSlot(int, int)
def CellClicked_7(self, row, col):
if col > 1:
return
item = self.cj_tableWidget.item(row, 0)
if item is None:
return
code = self.dict_code[item.text()]
self.PutTraderQ(code, col)
@QtCore.pyqtSlot(int, int)
def CellClicked_8(self, row, col):
if col > 1:
return
item = self.gj_tableWidget.item(row, 0)
if item is None:
return
code = self.dict_code[item.text()]
self.PutTraderQ(code, col)
@QtCore.pyqtSlot(int, int)
def CellClicked_9(self, row, col):
if col > 1:
return
item = self.dd_tableWidget.item(row, 1)
if item is None:
return
code = self.dict_code[item.text()]
self.PutTraderQ(code, col)
def PutTraderQ(self, code, col):
if self.mode2 != 0:
return
if self.mode1 == 0:
if self.mode0 == 1:
self.ButtonClicked_4(0)
if col == 0:
traderQ.put(f"현재가{ui_num['차트P1']} {code}")
elif col == | |
0": {
"image_ver": "Cisco IOS Software, IOSv Software (VIOS-ADVENTERPRISEK9-M), Version 15.6(3)M2, RELEASE SOFTWARE (fc2)",
"uptime_in_curr_state": "1 day, 16 hours, 42 minutes",
"config_register": "0x0",
"curr_sw_state": "ACTIVE"
}
}
}
golden_output_iosv = {'execute.return_value': '''\
Redundant System Information :
------------------------------
Available system uptime = 0 minutes
Switchovers system experienced = 0
Standby failures = 0
Last switchover reason = unsupported
Hardware Mode = Simplex
Maintenance Mode = Disabled
Communications = Down Reason: Failure
Current Processor Information :
-------------------------------
Active Location = slot 0
Current Software state = ACTIVE
Uptime in current state = 1 day, 16 hours, 42 minutes
Image Version = Cisco IOS Software, IOSv Software (VIOS-ADVENTERPRISEK9-M), Version 15.6(3)M2, RELEASE SOFTWARE (fc2)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2017 by Cisco Systems, Inc.
Compiled Wed 29-Mar-17 14:05 by prod_rel_team
Configuration register = 0x0
Peer (slot: 0) information is not available because it is in 'DISABLED' state
'''}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
redundancy_obj = ShowRedundancy(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = redundancy_obj.parse()
def test_golden_iosv(self):
self.maxDiff = None
self.dev_iosv = Mock(**self.golden_output_iosv)
redundancy_obj = ShowRedundancy(device=self.dev_iosv)
parsed_output = redundancy_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_iosv)
class TestShowInventory(unittest.TestCase):
dev1 = Device(name='empty')
dev_iosv = Device(name='iosv')
empty_output = {'execute.return_value': ''}
golden_parsed_output_iosv = {
'main': {
'chassis': {
'IOSv': {
'descr': 'IOSv chassis, Hw Serial#: 9K66Z7TOKAACDEQA24N7S, Hw Revision: 1.0',
'name': 'IOSv',
'pid': 'IOSv',
'sn': '9K66Z7TOKAACDEQA24N7S',
'vid': '1.0',
},
},
},
}
golden_output_iosv = {'execute.return_value': '''\
NAME: "IOSv", DESCR: "IOSv chassis, Hw Serial#: 9K66Z7TOKAACDEQA24N7S, Hw Revision: 1.0"
PID: IOSv , VID: 1.0, SN: 9K66Z7TOKAACDEQA24N7S
'''}
golden_parsed_output_2 = {
"main": {
"chassis": {
"WS-C6504-E": {
"name": "WS-C6504-E",
"descr": "Cisco Systems Cisco 6500 4-slot Chassis System",
"pid": "WS-C6504-E",
"vid": "V01",
"sn": "FXS1712Q1R8",
}
}
},
"slot": {
"CLK-7600 1": {
"other": {
"CLK-7600 1": {
"name": "CLK-7600 1",
"descr": "OSR-7600 Clock FRU 1",
"pid": "CLK-7600",
"vid": "",
"sn": "FXS170802GL",
}
}
},
"CLK-7600 2": {
"other": {
"CLK-7600 2": {
"name": "CLK-7600 2",
"descr": "OSR-7600 Clock FRU 2",
"pid": "CLK-7600",
"vid": "",
"sn": "FXS170802GL",
}
}
},
"FAN-MOD-4HS 1": {
"other": {
"FAN-MOD-4HS 1": {
"name": "FAN-MOD-4HS 1",
"descr": "High Speed Fan Module for CISCO7604 1",
"pid": "FAN-MOD-4HS",
"vid": "V01",
"sn": "DCH170900PF",
}
}
},
"PS 1 PWR-2700-AC/4": {
"other": {
"PS 1 PWR-2700-AC/4": {
"name": "PS 1 PWR-2700-AC/4",
"descr": "2700W AC power supply for CISCO7604 1",
"pid": "PWR-2700-AC/4",
"vid": "V03",
"sn": "APS1707008Y",
}
}
},
"PS 2 PWR-2700-AC/4": {
"other": {
"PS 2 PWR-2700-AC/4": {
"name": "PS 2 PWR-2700-AC/4",
"descr": "2700W AC power supply for CISCO7604 2",
"pid": "PWR-2700-AC/4",
"vid": "V03",
"sn": "APS17070093",
}
}
},
"1": {
"rp": {
"VS-SUP2T-10G": {
"name": "1",
"descr": "VS-SUP2T-10G 5 ports Supervisor Engine 2T 10GE w/ CTS Rev. 1.5",
"pid": "VS-SUP2T-10G",
"vid": "V05",
"sn": "SAL17152N0F",
"subslot": {
"0": {
"VS-F6K-MSFC5": {
"descr": "VS-F6K-MSFC5 CPU Daughterboard Rev. 2.0",
"name": "msfc sub-module of 1",
"pid": "VS-F6K-MSFC5",
"sn": "SAL17142D06",
"vid": "",
},
"VS-F6K-PFC4": {
"descr": "VS-F6K-PFC4 Policy Feature Card 4 Rev. 2.0",
"name": "VS-F6K-PFC4 Policy Feature Card 4 EARL sub-module of 1",
"pid": "VS-F6K-PFC4",
"sn": "SAL17163901",
"vid": "V03",
},
},
"4": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te1/4",
"name": "Transceiver Te1/4",
"pid": "X2-10GB-SR",
"sn": "ONT170202T1",
"vid": "V06 ",
}
},
"5": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te1/5",
"name": "Transceiver Te1/5",
"pid": "X2-10GB-SR",
"sn": "ONT1702033D",
"vid": "V06 ",
}
},
},
}
}
},
"2": {
"lc": {
"WS-X6816-10GE": {
"name": "2",
"descr": "WS-X6816-10GE CEF720 16 port 10GE Rev. 2.0",
"pid": "WS-X6816-10GE",
"vid": "V02",
"sn": "SAL17152QB3",
"subslot": {
"0": {
"WS-F6K-DFC4-E": {
"descr": "WS-F6K-DFC4-E Distributed Forwarding Card 4 Rev. 1.2",
"name": "WS-F6K-DFC4-E Distributed Forwarding Card 4 EARL sub-module of 2",
"pid": "WS-F6K-DFC4-E",
"sn": "SAL171846RF",
"vid": "V02",
}
},
"1": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/1",
"name": "Transceiver Te2/1",
"pid": "X2-10GB-SR",
"sn": "ONT17020338",
"vid": "V06 ",
}
},
"2": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/2",
"name": "Transceiver Te2/2",
"pid": "X2-10GB-SR",
"sn": "ONT1702020H",
"vid": "V06 ",
}
},
"3": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/3",
"name": "Transceiver Te2/3",
"pid": "X2-10GB-SR",
"sn": "ONT170202UU",
"vid": "V06 ",
}
},
"4": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/4",
"name": "Transceiver Te2/4",
"pid": "X2-10GB-SR",
"sn": "ONT170202T5",
"vid": "V06 ",
}
},
"5": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/5",
"name": "Transceiver Te2/5",
"pid": "X2-10GB-SR",
"sn": "AGA1515XZE2",
"vid": "V05 ",
}
},
"6": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/6",
"name": "Transceiver Te2/6",
"pid": "X2-10GB-SR",
"sn": "FNS153920YJ",
"vid": "V06 ",
}
},
"16": {
"X2-10GB-SR": {
"descr": "X2 Transceiver 10Gbase-SR Te2/16",
"name": "Transceiver Te2/16",
"pid": "X2-10GB-SR",
"sn": "ONT170201TT",
"vid": "V06 ",
}
},
},
}
}
},
"3": {
"lc": {
"WS-X6824-SFP": {
"name": "3",
"descr": "WS-X6824-SFP CEF720 24 port 1000mb SFP Rev. 1.0",
"pid": "WS-X6824-SFP",
"vid": "V01",
"sn": "SAL17152EG9",
"subslot": {
"0": {
"WS-F6K-DFC4-A": {
"descr": "WS-F6K-DFC4-A Distributed Forwarding Card 4 Rev. 1.0",
"name": "WS-F6K-DFC4-A Distributed Forwarding Card 4 EARL sub-module of 3",
"pid": "WS-F6K-DFC4-A",
"sn": "SAL171848KL",
"vid": "V04",
}
}
},
}
}
},
"4": {
"lc": {
"WS-X6748-GE-TX": {
"name": "4",
"descr": "WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 3.4",
"pid": "WS-X6748-GE-TX",
"vid": "V04",
"sn": "SAL14017TWF",
"subslot": {
"0": {
"WS-F6700-CFC": {
"descr": "WS-F6700-CFC Centralized Forwarding Card Rev. 4.1",
"name": "WS-F6700-CFC Centralized Forwarding Card EARL sub-module of 4",
"pid": "WS-F6700-CFC",
"sn": "SAL13516QS8",
"vid": "V06",
}
}
},
}
}
},
},
}
golden_output_2 = {'execute.return_value': '''
NAME: "WS-C6504-E", DESCR: "Cisco Systems Cisco 6500 4-slot Chassis System"
PID: WS-C6504-E , VID: V01, SN: FXS1712Q1R8
NAME: "CLK-7600 1", DESCR: "OSR-7600 Clock FRU 1"
PID: CLK-7600 , VID: , SN: FXS170802GL
NAME: "CLK-7600 2", DESCR: "OSR-7600 Clock FRU 2"
PID: CLK-7600 , VID: , SN: FXS170802GL
NAME: "1", DESCR: "VS-SUP2T-10G 5 ports Supervisor Engine 2T 10GE w/ CTS Rev. 1.5"
PID: VS-SUP2T-10G , VID: V05, SN: SAL17152N0F
NAME: "msfc sub-module of 1", DESCR: "VS-F6K-MSFC5 CPU Daughterboard Rev. 2.0"
PID: VS-F6K-MSFC5 , VID: , SN: SAL17142D06
NAME: "VS-F6K-PFC4 Policy Feature Card 4 EARL sub-module of 1", DESCR: "VS-F6K-PFC4 Policy Feature Card 4 Rev. 2.0"
PID: VS-F6K-PFC4 , VID: V03, SN: SAL17163901
NAME: "Transceiver Te1/4", DESCR: "X2 Transceiver 10Gbase-SR Te1/4"
PID: X2-10GB-SR , VID: V06 , SN: ONT170202T1
NAME: "Transceiver Te1/5", DESCR: "X2 Transceiver 10Gbase-SR Te1/5"
PID: X2-10GB-SR , VID: V06 , SN: ONT1702033D
NAME: "2", DESCR: "WS-X6816-10GE CEF720 16 port 10GE Rev. 2.0"
PID: WS-X6816-10GE , VID: V02, SN: SAL17152QB3
NAME: "WS-F6K-DFC4-E Distributed Forwarding Card 4 EARL sub-module of 2", DESCR: "WS-F6K-DFC4-E Distributed Forwarding Card 4 Rev. 1.2"
PID: WS-F6K-DFC4-E , VID: V02, SN: SAL171846RF
NAME: "Transceiver Te2/1", DESCR: "X2 Transceiver 10Gbase-SR Te2/1"
PID: X2-10GB-SR , VID: V06 , SN: ONT17020338
NAME: "Transceiver Te2/2", DESCR: "X2 Transceiver 10Gbase-SR Te2/2"
PID: X2-10GB-SR , VID: V06 , SN: ONT1702020H
NAME: "Transceiver Te2/3", DESCR: "X2 Transceiver 10Gbase-SR Te2/3"
PID: X2-10GB-SR , VID: V06 , SN: ONT170202UU
NAME: "Transceiver Te2/4", DESCR: "X2 Transceiver 10Gbase-SR Te2/4"
PID: X2-10GB-SR , VID: V06 , SN: ONT170202T5
NAME: "Transceiver Te2/5", DESCR: "X2 Transceiver 10Gbase-SR Te2/5"
PID: X2-10GB-SR , VID: V05 , SN: AGA1515XZE2
NAME: "Transceiver Te2/6", DESCR: "X2 Transceiver 10Gbase-SR Te2/6"
PID: X2-10GB-SR , VID: V06 , SN: FNS153920YJ
NAME: "Transceiver Te2/16", DESCR: "X2 Transceiver 10Gbase-SR Te2/16"
PID: X2-10GB-SR , VID: V06 , SN: ONT170201TT
NAME: "3", DESCR: "WS-X6824-SFP CEF720 24 port 1000mb SFP Rev. 1.0"
PID: WS-X6824-SFP , VID: V01, SN: SAL17152EG9
NAME: "WS-F6K-DFC4-A Distributed Forwarding Card 4 EARL sub-module of 3", DESCR: "WS-F6K-DFC4-A Distributed Forwarding Card 4 Rev. 1.0"
PID: WS-F6K-DFC4-A , VID: V04, SN: SAL171848KL
NAME: "4", DESCR: "WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 3.4"
PID: WS-X6748-GE-TX , VID: V04, SN: SAL14017TWF
NAME: "WS-F6700-CFC Centralized Forwarding Card EARL sub-module of 4", DESCR: "WS-F6700-CFC Centralized Forwarding Card Rev. 4.1"
PID: WS-F6700-CFC , VID: V06, SN: SAL13516QS8
NAME: "FAN-MOD-4HS 1", DESCR: "High Speed Fan Module for CISCO7604 1"
PID: FAN-MOD-4HS , VID: V01, SN: DCH170900PF
NAME: "PS 1 PWR-2700-AC/4", DESCR: "2700W AC power supply for CISCO7604 1"
PID: PWR-2700-AC/4 , VID: V03, SN: APS1707008Y
NAME: "PS 2 PWR-2700-AC/4", | |
<reponame>RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects
'''
Adapted from https://github.com/weiaicunzai/blender_shapenet_render
Author: <NAME>
'''
import sys
import os
import random
import pickle
import bpy
import mathutils
import numpy as np
import json
import glob
from mathutils import Matrix, Vector
import bpy_extras
import imageio
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from settings import *
import settings
def clear_mesh():
"""
Clears mesh in scene and deletes from .blend
"""
# Delete mesh
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select_set(True)
bpy.ops.object.delete()
# Delete mesh from .blend file
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""
initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = True
#output
# bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
# bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
# bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].cycles.device = 'GPU'
scene = bpy.context.scene
scene.cycles.device = 'GPU'
def node_setting_init():
"""
node settings for render rgb images
mainly for compositing the background images
https://blender.stackexchange.com/questions/180355/change-saturation-of-rendered-object-without-changing-the-background-image
"""
bpy.context.scene.render.use_compositing = True
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
# Creating nodes ###########################################
image_node = tree.nodes.new('CompositorNodeImage')
scale_node = tree.nodes.new('CompositorNodeScale')
alpha_over_node = tree.nodes.new('CompositorNodeAlphaOver')
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
file_output_node = tree.nodes.new('CompositorNodeOutputFile')
# Nodes to fix saturation
mask_node = tree.nodes.new('CompositorNodeIDMask')
saturation_node = tree.nodes.new('CompositorNodeHueSat')
alpha_over_node_2 = tree.nodes.new('CompositorNodeAlphaOver')
nodes = {}
# Nodes for mask and depth
file_output_mask_node = tree.nodes.new('CompositorNodeOutputFile')
file_output_depth_node = tree.nodes.new('CompositorNodeOutputFile')
file_output_mask_node.format.color_mode = 'BW'
file_output_depth_node.format.color_mode = 'BW'
file_output_depth_node.format.color_depth = '16'
scale_node.space = g_scale_space
# Linking nodes #############################################
links.new(image_node.outputs[0], scale_node.inputs[0])
links.new(scale_node.outputs[0], alpha_over_node.inputs[1])
links.new(alpha_over_node.outputs[0], file_output_node.inputs[0])
# saturation fix
links.new(render_layer_node.outputs[0], saturation_node.inputs[0])
links.new(render_layer_node.outputs[0], alpha_over_node_2.inputs[1])
links.new(render_layer_node.outputs[3], mask_node.inputs[0])
links.new(mask_node.outputs[0], alpha_over_node_2.inputs[0])
links.new(saturation_node.outputs[0], alpha_over_node_2.inputs[2])
links.new(alpha_over_node_2.outputs[0], alpha_over_node.inputs[2])
# Depth and Mask links
links.new(render_layer_node.outputs[2], file_output_depth_node.inputs[0])
# links.new(render_layer_node.outputs[1], file_output_mask_node.inputs[0])
links.new(mask_node.outputs[0], file_output_mask_node.inputs[0])
# Setting values for nodes####################################
file_output_depth_node.format.file_format = 'OPEN_EXR'
file_output_node.file_slots[0].path = 'frame_########_Color_00.png' # blender placeholder #
file_output_depth_node.file_slots[0].path = 'frame_########_Depth_00.exr'
file_output_mask_node.file_slots[0].path = 'frame_########_Mask_00.png'
mask_node.index = 1
# hue
saturation_node.inputs[1].default_value = 0.5
# saturation
saturation_node.inputs[2].default_value = 1.3
# value factor
saturation_node.inputs[3].default_value = 2.0
saturation_node.inputs[4].default_value = 1.0
def init_all():
"""
init everything we need for rendering an image
"""
scene_setting_init(g_gpu_render_enable)
set_rendering_settings()
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
bpy.data.objects['Light'].data.energy = 0
clear_mesh()
add_light()
cam_obj = bpy.data.objects['Camera']
cam_obj.data.sensor_fit = 'VERTICAL'
# setting camera parameters for focal length ~= 617.1
cam_obj.data.lens = 50.0
cam_obj.data.sensor_width = 36.0
cam_obj.data.clip_start = 0.01
cam_obj.data.clip_end = 30
def set_image_path(new_path):
"""
set image output path to new_path
Args:
new rendered image output path
"""
file_output_node = bpy.context.scene.node_tree.nodes[4]
file_output_node.base_path = new_path
file_depth_node = bpy.context.scene.node_tree.nodes[9]
file_depth_node.base_path = new_path
file_mask_node = bpy.context.scene.node_tree.nodes[8]
file_mask_node.base_path = new_path
def camera_look_at_object(object_cam, object_target):
'''
Sets the camera quaternion automatically to look at shapenet object
'''
direction = object_target.location - object_cam.location
rot_quaternion = direction.to_track_quat('-Z', 'Y')
object_cam.rotation_quaternion = rot_quaternion
def add_light():
'''
Add scene lighting
'''
light_intensity = 2.0
# create light datablock, set attributes
light_data = bpy.data.lights.new(name="light_1", type='SUN')
light_data.energy = light_intensity
# create new object with our light datablock
light_object = bpy.data.objects.new(name="light_1", object_data=light_data)
light_object.rotation_mode = "QUATERNION"
# link light object
bpy.context.collection.objects.link(light_object)
# make it active
bpy.context.view_layer.objects.active = light_object
# light_object.data.cycles.cast_shadow = False
#change location
light_object.location = (0, 0, 10)
direction = mathutils.Vector((0.0,0.0, 0.0)) - light_object.location
light_object.rotation_quaternion = direction.to_track_quat('-Z', 'Y')
# dg = bpy.context.evaluated_depsgraph_get()
# dg.update()
light_data_2 = bpy.data.lights.new(name="light_2", type='SUN')
light_data_2.energy = light_intensity
# create new object with our light datablock
light_object_2 = bpy.data.objects.new(name="light_2", object_data=light_data_2)
# link light object
bpy.context.collection.objects.link(light_object_2)
# make it active
bpy.context.view_layer.objects.active = light_object_2
#change location
light_object_2.location = (0, 0, -10)
# Look at origin
direction_2 = mathutils.Vector((0.0, 0.0, 0.0)) - light_object_2.location
print("Lights")
print(light_object_2.rotation_quaternion)
light_object_2.rotation_mode = "QUATERNION"
light_object_2.rotation_quaternion = direction_2.to_track_quat('-Z', 'Y')
print(light_object_2.rotation_quaternion)
# light_object_2.data.cycles.cast_shadow = False
bpy.context.object.data.cycles.cast_shadow = False
# update scene, if needed
dg = bpy.context.evaluated_depsgraph_get()
dg.update()
def get_calibration_matrix_K_from_blender(camd):
'''
https://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera
Function to obtain camera intrinsics
'''
# camd.sensor_fit = "VERTICAL"
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# print('vertical')
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px * scale / 2
v_0 = resolution_y_in_px * scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(
((alpha_u, skew, u_0),
( 0 , alpha_v, v_0),
( 0 , 0, 1 )))
return K
def get_camera_intrinsics(cam_obj):
'''
https://www.rojtberg.net/1601/from-blender-to-opencv-camera-and-back/
'''
# get the relevant data
cam = cam_obj.data
scene = bpy.context.scene
# assume image is not scaled
assert scene.render.resolution_percentage == 100
# assume angles describe the horizontal field of view
assert cam.sensor_fit != 'VERTICAL'
f_in_mm = cam.lens
sensor_width_in_mm = cam.sensor_width
# print(f_in_mm, sensor_width_in_mm)
w = scene.render.resolution_x
h = scene.render.resolution_y
pixel_aspect = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
f_x = f_in_mm / sensor_width_in_mm * w
f_y = f_x * pixel_aspect
# yes, shift_x is inverted.
c_x = w * (0.5 - cam.shift_x)
# and shift_y is still a percentage of width..
c_y = h * 0.5 + w * cam.shift_y
K = np.array([[f_x, 0, c_x],
[0, f_y, c_y],
[0, 0, 1]])
return K
def set_camera_extrinsics(cam_obj, car_obj, location):
'''
Sets camera location and look at car_obj
'''
cam_obj.location = location
camera_look_at_object(cam_obj, car_obj)
def set_rendering_settings():
'''
Sets rendering settings for background
'''
bpy.context.scene.render.film_transparent = True
# bpy.context.scene.render.image_settings.color_mode = 'RGBA'
bpy.context.scene.render.resolution_percentage = 100
cam_obj = bpy.data.objects['Camera']
bpy.context.scene.camera = cam_obj
bpy.context.scene.cycles.samples = 250
bpy.context.scene.frame_end = 1
# bpy.context.scene.use_denoising = True
bpy.data.worlds["World"].node_tree.nodes["Background"].inputs[0].default_value = (1, 1, 1, 1)
bpy.data.worlds["World"].node_tree.nodes["Background"].inputs[1].default_value = 0.02
bpy.context.scene.view_settings.look = 'Medium Contrast'
bpy.context.scene.render.image_settings.color_depth = '16'
bpy.context.scene.cycles.max_bounces = 4
bpy.context.scene.cycles.caustics_reflective = False
bpy.context.scene.cycles.caustics_refractive = False
bpy.context.scene.cycles.sample_clamp_indirect = 0
bpy.context.scene.view_layers["View Layer"].use_pass_object_index = True
bpy.context.scene.view_layers["View Layer"].use_pass_z = True
bpy.context.scene.cycles.device = 'GPU'
def gen_helical_path(num_images):
'''
Function to generate helical path around object
'''
highest_z = 1 + 3*random.random()
base_radius = 1 + 0.5*np.random.random()
u = np.linspace( -highest_z*np.pi, highest_z*np.pi, num_images)
radius = base_radius + 5 * np.abs(np.sin(u)) #+ 0.5*np.random.random(num_images)
x = radius * (np.cos(u))
y = radius * (np.sin(u))
z = u / np.pi
return x, y, z
# plot(x,y,z,'r');
def fix_shapenet_lighting(category_object):
'''
Fix lighting and material issues
'''
category_object.data.use_auto_smooth = False
category_object.modifiers.data.modifiers.new(name = "edgesplit",type="EDGE_SPLIT")
category_object.pass_index = 1.0
for material in list(category_object.data.materials):
# Roughness
material.node_tree.nodes["Principled BSDF"].inputs[7].default_value = 0.6
# Specular
material.node_tree.nodes["Principled BSDF"].inputs[5].default_value = 0
# Metallic
material.node_tree.nodes["Principled BSDF"].inputs[4].default_value = 0.2
def convert_extrinsics_2_unity(camera_json):
'''
Convert camera extrinsics from blender to unity
https://gist.github.com/piranha771/e97c773fc050bc6387d36a080c4dd132
Not tested properly.......
'''
camera_json_unity = camera_json.copy()
cam_position = camera_json['position']
cam_rotation = camera_json['rotation']
camera_json_unity['position']['x'] = -cam_position['x']
camera_json_unity['position']['y'] = cam_position['z']
camera_json_unity['position']['z'] = cam_position['y']
camera_json_unity['rotation']['z'] = -cam_rotation['y']
camera_json_unity['rotation']['y'] = cam_rotation['z']
return camera_json_unity
def project_by_object_utils(cam, point):
'''
https://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera
Projects 3D point to image plane and returns its location
'''
scene = bpy.context.scene
co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
render_scale = scene.render.resolution_percentage / 100
render_size = (
int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale),
)
return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
def get_3x4_RT_matrix_from_blender(cam):
'''
https://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera
'''
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam @ location
# Build the coordinate transform matrix from world to computer vision camera
# NOTE: Use * instead of | |
"""
Module for managing container and VM images
.. versionadded:: 2014.7.0
"""
import logging
import os
import pprint
import shlex
import uuid
import salt.syspaths
import salt.utils.kickstart
import salt.utils.path
import salt.utils.preseed
import salt.utils.stringutils
import salt.utils.validate.path
import salt.utils.yast
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
CMD_MAP = {
"yum": ("yum", "rpm"),
"deb": ("debootstrap",),
"pacman": ("pacman",),
}
EPEL_URL = (
"http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm"
)
def __virtual__():
"""
By default, this will be available on all platforms; but not all distros
will necessarily be supported
"""
return True
def bootstrap(
platform,
root,
img_format="dir",
fs_format="ext2",
fs_opts=None,
arch=None,
flavor=None,
repo_url=None,
static_qemu=None,
img_size=None,
mount_dir=None,
pkg_cache=None,
pkgs=None,
exclude_pkgs=None,
epel_url=EPEL_URL,
):
"""
Create an image for a specific platform.
Please note that this function *MUST* be run as root, as images that are
created make files belonging to root.
platform
Which platform to use to create the image. Currently supported platforms
are rpm, deb and pacman.
root
Local path to create the root of the image filesystem.
img_format
Which format to create the image in. By default, just copies files into
a directory on the local filesystem (``dir``). Future support will exist
for ``sparse``.
fs_format
When using a non-``dir`` ``img_format``, which filesystem to format the
image to. By default, ``ext2``.
fs_opts
When using a non-``dir`` ``img_format``, a dict of opts may be
specified.
arch
Architecture to install packages for, if supported by the underlying
bootstrap tool. Currently only used for deb.
flavor
Which flavor of operating system to install. This correlates to a
specific directory on the distribution repositories. For instance,
``wheezy`` on Debian.
repo_url
Mainly important for Debian-based repos. Base URL for the mirror to
install from. (e.x.: http://ftp.debian.org/debian/)
static_qemu
Local path to the static qemu binary required for this arch.
(e.x.: /usr/bin/qemu-amd64-static)
pkg_confs
The location of the conf files to copy into the image, to point the
installer to the right repos and configuration.
img_size
If img_format is not ``dir``, then the size of the image must be
specified.
mount_dir
If img_format is not ``dir``, then the image must be mounted somewhere.
If the ``mount_dir`` is not specified, then it will be created at
``/opt/salt-genesis.<random_uuid>``. This directory will be unmounted
and removed when the process is finished.
pkg_cache
This points to a directory containing a cache of package files to be
copied to the image. It does not need to be specified.
pkgs
A list of packages to be installed on this image. For RedHat, this
will include ``yum``, ``centos-release`` and ``iputils`` by default.
exclude_pkgs
A list of packages to be excluded. If you do not want to install the
defaults, you need to include them in this list.
epel_url
The URL to download the EPEL release package from.
CLI Examples:
.. code-block:: bash
salt myminion genesis.bootstrap pacman /root/arch
salt myminion genesis.bootstrap rpm /root/redhat
salt myminion genesis.bootstrap deb /root/wheezy arch=amd64 \
flavor=wheezy static_qemu=/usr/bin/qemu-x86_64-static
"""
if img_format not in ("dir", "sparse"):
raise SaltInvocationError('The img_format must be "sparse" or "dir"')
if img_format == "dir":
# We can just use the root as the root
if not __salt__["file.directory_exists"](root):
try:
__salt__["file.mkdir"](root)
except Exception as exc: # pylint: disable=broad-except
return {"Error": salt.utils.stringutils.to_unicode(pprint.pformat(exc))}
elif img_format == "sparse":
if not img_size:
raise SaltInvocationError("An img_size must be specified for a sparse file")
if not mount_dir:
mount_dir = "/opt/salt-genesis.{}".format(uuid.uuid4())
__salt__["file.mkdir"](mount_dir, "root", "root", "755")
__salt__["cmd.run"](("fallocate", "-l", img_size, root), python_shell=False)
_mkpart(root, fs_format, fs_opts, mount_dir)
loop1 = __salt__["cmd.run"]("losetup -f")
log.debug("First loop device is %s", loop1)
__salt__["cmd.run"]("losetup {} {}".format(loop1, root))
loop2 = __salt__["cmd.run"]("losetup -f")
log.debug("Second loop device is %s", loop2)
start = str(2048 * 2048)
__salt__["cmd.run"]("losetup -o {} {} {}".format(start, loop2, loop1))
__salt__["mount.mount"](mount_dir, loop2)
_populate_cache(platform, pkg_cache, mount_dir)
if mount_dir:
root = mount_dir
if pkgs is None:
pkgs = []
if exclude_pkgs is None:
exclude_pkgs = []
if platform in ("rpm", "yum"):
_bootstrap_yum(
root, pkgs=pkgs, exclude_pkgs=exclude_pkgs, epel_url=epel_url,
)
elif platform == "deb":
_bootstrap_deb(
root,
arch=arch,
flavor=flavor,
repo_url=repo_url,
static_qemu=static_qemu,
pkgs=pkgs,
exclude_pkgs=exclude_pkgs,
)
elif platform == "pacman":
_bootstrap_pacman(
root, img_format=img_format, pkgs=pkgs, exclude_pkgs=exclude_pkgs,
)
if img_format != "dir":
blkinfo = __salt__["disk.blkid"](loop2)
__salt__["file.replace"](
"{}/boot/grub/grub.cfg".format(mount_dir),
"ad4103fa-d940-47ca-8506-301d8071d467", # This seems to be the default
blkinfo[loop2]["UUID"],
)
__salt__["mount.umount"](root)
__salt__["cmd.run"]("losetup -d {}".format(loop2))
__salt__["cmd.run"]("losetup -d {}".format(loop1))
__salt__["file.rmdir"](mount_dir)
def _mkpart(root, fs_format, fs_opts, mount_dir):
"""
Make a partition, and make it bootable
.. versionadded:: 2015.8.0
"""
__salt__["partition.mklabel"](root, "msdos")
loop1 = __salt__["cmd.run"]("losetup -f")
log.debug("First loop device is %s", loop1)
__salt__["cmd.run"]("losetup {} {}".format(loop1, root))
part_info = __salt__["partition.list"](loop1)
start = str(2048 * 2048) + "B"
end = part_info["info"]["size"]
__salt__["partition.mkpart"](loop1, "primary", start=start, end=end)
__salt__["partition.set"](loop1, "1", "boot", "on")
part_info = __salt__["partition.list"](loop1)
loop2 = __salt__["cmd.run"]("losetup -f")
log.debug("Second loop device is %s", loop2)
start = start.rstrip("B")
__salt__["cmd.run"]("losetup -o {} {} {}".format(start, loop2, loop1))
_mkfs(loop2, fs_format, fs_opts)
__salt__["mount.mount"](mount_dir, loop2)
__salt__["cmd.run"](
(
"grub-install",
"--target=i386-pc",
"--debug",
"--no-floppy",
"--modules=part_msdos linux",
"--boot-directory={}/boot".format(mount_dir),
loop1,
),
python_shell=False,
)
__salt__["mount.umount"](mount_dir)
__salt__["cmd.run"]("losetup -d {}".format(loop2))
__salt__["cmd.run"]("losetup -d {}".format(loop1))
return part_info
def _mkfs(root, fs_format, fs_opts=None):
"""
Make a filesystem using the appropriate module
.. versionadded:: 2015.8.0
"""
if fs_opts is None:
fs_opts = {}
if fs_format in ("ext2", "ext3", "ext4"):
__salt__["extfs.mkfs"](root, fs_format, **fs_opts)
elif fs_format in ("btrfs",):
__salt__["btrfs.mkfs"](root, **fs_opts)
elif fs_format in ("xfs",):
__salt__["xfs.mkfs"](root, **fs_opts)
def _populate_cache(platform, pkg_cache, mount_dir):
"""
If a ``pkg_cache`` directory is specified, then use it to populate the
disk image.
"""
if not pkg_cache:
return
if not os.path.isdir(pkg_cache):
return
if platform == "pacman":
cache_dir = "{}/var/cache/pacman/pkg".format(mount_dir)
__salt__["file.mkdir"](cache_dir, "root", "root", "755")
__salt__["file.copy"](pkg_cache, cache_dir, recurse=True, remove_existing=True)
def _bootstrap_yum(
root, pkg_confs="/etc/yum*", pkgs=None, exclude_pkgs=None, epel_url=EPEL_URL,
):
"""
Bootstrap an image using the yum tools
root
The root of the image to install to. Will be created as a directory if
it does not exist. (e.x.: /root/arch)
pkg_confs
The location of the conf files to copy into the image, to point yum
to the right repos and configuration.
pkgs
A list of packages to be installed on this image. For RedHat, this
will include ``yum``, ``centos-release`` and ``iputils`` by default.
exclude_pkgs
A list of packages to be excluded. If you do not want to install the
defaults, you need to include them in this list.
epel_url
The URL to download the EPEL release package from.
TODO: Set up a pre-install overlay, to copy files into /etc/ and so on,
which are required for the install to work.
"""
if pkgs is None:
pkgs = []
elif isinstance(pkgs, str):
pkgs = pkgs.split(",")
default_pkgs = ("yum", "centos-release", "iputils")
for pkg in default_pkgs:
if pkg not in pkgs:
pkgs.append(pkg)
if exclude_pkgs is None:
exclude_pkgs = []
elif isinstance(exclude_pkgs, str):
exclude_pkgs = exclude_pkgs.split(",")
for pkg in exclude_pkgs:
pkgs.remove(pkg)
_make_nodes(root)
release_files = [rf for rf in os.listdir("/etc") if rf.endswith("release")]
__salt__["cmd.run"](
"cp /etc/resolv/conf {rfs} {root}/etc".format(
root=shlex.quote(root), rfs=" ".join(release_files)
)
)
__salt__["cmd.run"](
"cp -r {rfs} {root}/etc".format(
root=shlex.quote(root), rfs=" ".join(release_files)
)
)
__salt__["cmd.run"](
"cp -r {confs} {root}/etc".format(
root=shlex.quote(root), confs=shlex.quote(pkg_confs)
)
)
yum_args = [
"yum",
"install",
"--installroot={}".format(shlex.quote(root)),
"-y",
] + pkgs
__salt__["cmd.run"](yum_args, python_shell=False)
if "epel-release" not in exclude_pkgs:
__salt__["cmd.run"](
("rpm", "--root={}".format(shlex.quote(root)), "-Uvh", epel_url),
python_shell=False,
)
def _bootstrap_deb(
root, arch, flavor, repo_url=None, static_qemu=None, pkgs=None, exclude_pkgs=None,
):
"""
Bootstrap an image using the Debian tools
root
The root of the image to install to. Will be created as a directory if
it does not exist. (e.x.: /root/wheezy)
arch
Architecture of the target image. (e.x.: amd64)
flavor
Flavor of Debian to install. (e.x.: wheezy)
repo_url
Base URL for the mirror to install from.
(e.x.: http://ftp.debian.org/debian/)
static_qemu
Local path to the static qemu binary required for this arch.
(e.x.: /usr/bin/qemu-amd64-static)
pkgs
A list of packages to be installed on this image.
exclude_pkgs
A list of packages to be excluded.
"""
if repo_url is None:
repo_url = "http://ftp.debian.org/debian/"
if not salt.utils.path.which("debootstrap"):
log.error("Required tool debootstrap is not installed.")
return False
if static_qemu and not salt.utils.validate.path.is_executable(static_qemu):
log.error("Required tool qemu not present/readable at: %s", static_qemu)
return False
if isinstance(pkgs, (list, tuple)):
pkgs = ",".join(pkgs)
if isinstance(exclude_pkgs, (list, tuple)):
exclude_pkgs = ",".join(exclude_pkgs)
deb_args = ["debootstrap", "--foreign", "--arch", shlex.quote(arch)]
if pkgs:
deb_args += ["--include", shlex.quote(pkgs)]
if exclude_pkgs:
deb_args += ["--exclude", shlex.quote(exclude_pkgs)]
deb_args += [
shlex.quote(flavor),
shlex.quote(root),
shlex.quote(repo_url),
]
__salt__["cmd.run"](deb_args, python_shell=False)
if static_qemu:
__salt__["cmd.run"](
"cp {qemu} {root}/usr/bin/".format(
qemu=shlex.quote(static_qemu), root=shlex.quote(root)
)
)
env = {
"DEBIAN_FRONTEND": "noninteractive",
"DEBCONF_NONINTERACTIVE_SEEN": "true",
"LC_ALL": "C",
"LANGUAGE": "C",
"LANG": "C",
"PATH": "/sbin:/bin:/usr/bin",
}
__salt__["cmd.run"](
"chroot {root} /debootstrap/debootstrap --second-stage".format(
root=shlex.quote(root)
| |
from bs4 import BeautifulSoup
import requests
import os.path
import re
import pandas as pd
import numpy as np
import csv
import json
from datetime import datetime
from tqdm import tqdm
from collections import defaultdict
from collections import Counter
from IPython.core.display import HTML
import time
import nltk
import heapq
# custom libraries
from data_collection import *
from tsv_management import *
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer
########################################################################################################################################################
# search_engine.py #
# #
# library of function useful to process every document for the search, initialize the search engine, and perform the actual search #
# #
########################################################################################################################################################
##########################################################################
# #
# functions to preprocess the text to have it ready for a search #
# #
##########################################################################
def preprocessing(text):
'''
this function preprocesses a string to prepare it either for the inverted
index creation or for the search of the query
in details, here we:
- tokenize the string (with a regex tokenizer)
- convert the words to lowercase
- remove the english stopwords
- stem the words using Porter stemmer
input: string to preprocess
output: preprocessed list of words
'''
# initialize tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# initialize the stemmer
porter = PorterStemmer()
# tokenize the text
word_list = tokenizer.tokenize(text)
processed_text = []
for word in word_list:
if word.lower() not in stopwords.words('english'):
stemmed_word = porter.stem(word)
processed_text.append(stemmed_word)
return(processed_text)
def list_preprocessing( list_of_strings ):
'''
this function preprocesses every string in a list of strings
input: list of strings
output: None
non-explicit output: every string in the list is preprocessed and becomes a list of words
'''
for i in tqdm(range(len(list_of_strings)), desc="Preprocessing documents"):
if list_of_strings[i] is not None:
list_of_strings[i] = preprocessing(list_of_strings[i])
else:
list_of_strings[i] = []
return
##########################################################################
# #
# functions to create and manage a vocabulary that maps each word #
# of our anime descriptions to an integer number #
# #
##########################################################################
def vocabulary_generation():
'''
NOTE: there are two different tqdm progress bar called in this function
this function generates a vocabulary using all the anime descriptions
and saves everything in a json file
'''
# retrieving the descriptions of all the anime
description = column_retrieval('animeDescription')
# preprocessing every description
list_preprocessing( description )
# generating a vocabulary of words that associates every word to an integer
vocabulary = vocabulary_creation(description)
# saving the vocabulary to the disk
with open("content/vocabulary.json", 'w') as f:
json.dump(vocabulary, f)
return
def vocabulary_creation(list_of_lists):
'''
here we create a vocabulary of all words from a list of lists of words
input: a list that contains lists of words
output: dictionary that associates words to integers starting from 0
'''
# initializing the set of all the words
set_of_words = set()
for words_list in list_of_lists:
# adding the words to the set of all the words
set_of_words.update(words_list)
# initializing the vocabulary
vocabulary = {}
for i in range(len(set_of_words)):
# assigning to a random word the value i
vocabulary[ set_of_words.pop() ] = i
return( vocabulary )
def vocabulary_retrieval():
'''
this function reads the vocabulary from the disk
and returns it as a dictionary
input: None
output: vocabulary dictionary
'''
term_dict = json_loading('./content/vocabulary.json')
return(term_dict)
def json_loading(path):
'''
this function parses a json file in path and returns it
input: json file path
output: data retrieved from the json file
'''
with open(path) as json_file:
data = json.load(json_file)
return(data)
def vocabulary_inversion(vocabulary):
'''
reverses the input vocabulary
input: dictionary {key1:value1, ...}
output: dictionary {value1:key, ...}
'''
inverted_vocabulary = {value : key for (key, value) in vocabulary.items()}
return(inverted_vocabulary)
def vocabulary_conversion(words_list, vocabulary):
'''
this function converts a list of words according to a certain vocabulary
input: (list of words to convert, vocabulary)
output: list of word ids according to the vocabulary
'''
ids = []
for word in words_list:
if word in vocabulary.keys():
ids.append(vocabulary[word])
else:
ids = []
break
return(ids)
#################################################################################################################################################################################
# #############################################################################
# the following set of functions are used in the simple unranked search engine #############################################################################
# which only performs a conjunctive search on the words of the query #############################################################################
# #############################################################################
#################################################################################################################################################################################
##########################################################################
# #
# functions to create and manage the inverted index #
# #
##########################################################################
def unranked_inverted_index_creation(list_of_documents, vocabulary):
'''
this function builds an inverted index using a list of documents and a vocabulary
NOTE: for simplicity of search, every word in our inverted index will
belong to a dummy 0 document that contains every word
(our anime documents are indexed starting from 1)
NOTE: because we only consider document_ids in increasing order,
our inverted index is automatically sorted
input: (list of the (preprocessed) documents, vocabulary of terms)
output: list containing the inverted index
NOTE: inverted_index[i] will be the inverted list
associated with the word vocabulary[i]
'''
number_of_words = len(vocabulary)
# initializing the inverted index list with lists that contain 0
inverted_index = []
for i in range(number_of_words):
inverted_index.append(list([0]))
for i in tqdm(range(len(list_of_documents)), desc="Building the inverted index"):
# our documents start from 1
document_id = i+1
document = list_of_documents[i]
for word in document:
# converting the word to its id according to the vocabulary
word_id = vocabulary[word]
if document_id not in inverted_index[word_id]: # if the document id isn't already associated to the current word id
inverted_index[word_id].append(document_id) # then we add it to the corresponding list
return ( inverted_index )
def unranked_inverted_index_generation():
'''
NOTE: there are three different tqdm progress bar called in this function
this function generates an inverted index using all the anime descriptions
and saves everything in a json file
'''
# retrieving the descriptions of all the anime
description = column_retrieval('animeDescription')
# processing every every description
list_preprocessing( description )
# retrieving the vocabulary from the disk
vocabulary = vocabulary_retrieval()
# generating an inverted index list
inverted_index = unranked_inverted_index_creation(description, vocabulary)
# saving the inverted index to the disk
with open("content/unranked_inverted_index.json", 'w') as f:
json.dump(inverted_index, f)
return
def unranked_inverted_index_retrieval():
'''
this function reads the unranked inverted index from the disk
and returns it as a list
input: None
output: inverted index list
'''
inverted_index = json_loading('./content/unranked_inverted_index.json')
return(inverted_index)
###############################################################################################
# #
# functions used to intersect two or more elements of the inverted index #
# #
###############################################################################################
def intersection_pointers(inverted_words):
'''
NOTE: this function assumes that exists a 'universal' document indexed by 0
so that the intersection will never be empty
and we won't have to do several check on the list lengths
computes the intersection on the elements of the inverted index
input: list of ordered lists of integers
output: a list containing the intersection among the elements of the input lists
NOTE: this algorithm compares the last element of every list instead of the first
so that the last element (which is the first of every list) will always be a match
'''
number_of_words = len(inverted_words)
# an array of indices that points to the last element of every list in inverted_words
pointers = list( map(lambda x: len(x) - 1, inverted_words) )
# creating output set
intersection = []
# j will the index used to navigate the elments of inverted_words
while( pointers[0] >= 0): # the algorithm stops when the first list has been scanned completely
current_element = inverted_words[0][pointers[0]] # we always start comparing the pointed element of the first list
j = 1 # with the pointed element of the second list
while( j < number_of_words): # this cycle only ends when a common element is found
# thus the need for the common 0 element
if current_element > inverted_words[j][ pointers[j] ]: # if the pointed element of this list is smaller than the current element
current_element = decrement_some_pointers(inverted_words, j, pointers) # then I decrement all the previous lists' pointers to match this one
j = 1 # and I restart the cycle from the second list
elif current_element < inverted_words[j][ pointers[j] ]: # if the pointed element of this list is bigger than the current element
j += decrement_one_pointers(inverted_words[j], current_element, pointers, j) | |
from math import e, pi, sqrt
from numpy import cos, sin
from numpy import arcsin as asin
from numpy import arccos as acos
from numpy import log as ln
import tensorflow as tf
import random
from OperatorChecker import *
# this is an example of how quantum operator rotations can be mapped to rotations on the bloch sphere
# this code is not fully optimized, but its intended purpose is to demonstrate how one would go about doing these calculations by hand
class RotationMatrixModel(tf.Module): # this class extends the tf module class, and it is used by operator_to_rotation to define a tf model to solve for the 3d rotation matrix corresponding to an operator
def __init__(self, **kwargs):
super().__init__(**kwargs)
# initialize the x, y, and z angles to random values
self.x = tf.Variable(random.random(), dtype='float32')
self.y = tf.Variable(random.random(), dtype='float32')
self.z = tf.Variable(random.random(), dtype='float32')
def __call__(self, initial_vector): # function that returns the initial state rotated by the matrix
matrix = [[tf.math.cos(self.x)*tf.math.cos(self.y), tf.math.cos(self.x)*tf.math.sin(self.y)*tf.math.sin(self.z) - tf.math.sin(self.x)*tf.math.cos(self.z), tf.math.cos(self.x)*tf.math.sin(self.y)*tf.math.cos(self.z) + tf.math.sin(self.x)*tf.math.sin(self.z) ],
[tf.math.sin(self.x)*tf.math.cos(self.y), tf.math.sin(self.x)*tf.math.sin(self.y)*tf.math.sin(self.z) + tf.math.cos(self.x)*tf.math.cos(self.z), tf.math.sin(self.x)*tf.math.sin(self.y)*tf.math.cos(self.z) - tf.math.cos(self.x)*tf.math.sin(self.z) ],
[-tf.math.sin(self.y), tf.math.cos(self.y)*tf.math.sin(self.z), tf.math.cos(self.y)*tf.math.cos(self.z) ]] # rotation matrix stored as a tf tensor
return tf.reshape((matrix @ initial_vector), [3, 1]) # return the matrix product of the rotation matrix and the initial state
class RotationConversions():
def operator_to_updated_state(self, operator, theta_init, phi_init):
'''
operator_to_updated_state(operator, theta_init, phi_init)
Description:
- this function takes a quantum operator (corresponding to a qbit gate), and the initial qbit state defined by the angles theta and phi
- theta and phi define the state based on some point on the bloch sphere in spherical coordinates
- the statevector of the qbit is defined as [cos(theta/2), sin(theta/2) * e^(i*phi)]
- the function returns the state after being acted on by the gate (in terms of the new theta and phi values)
Parameters:
- operator <type 'list'>: linear, hermitian matrix representing the quantum operator
- theta_init <type 'float'>: initial value for the theta component of the quantum state (must be between 0.0 and pi/2)
- phi_init <type 'float'>: initial value for the phi component of the quantum state (must be between 0.0 and pi/2)
Returns:
- [theta_updated, phi_updated] <type 'list'>: list storing the updated values for theta and phi after being operated on by 'operator'
Example:
>>> rc = RotationConversions()
>>> pauli_z = [[1, 0], [0, -1]] # pauli z gate
>>> print(rc.operator_to_updated_state(pauli_z, 1, 1)) # operate on the initial state of ['theta': 1, 'phi': 1]
[-1.0, 1.0]
'''
# make sure that the operator is unitary and hermitian, and throw an ArithmeticError if it is not
if (not OperatorChecker.check_hermitian(operator)):
raise ArithmeticError("Passed operator is not hermitian")
if (not OperatorChecker.check_unitary(operator)):
raise ArithmeticError("Passed operator is not unitary")
i = 1.0j # define the imaginary unit
a0 = cos(theta_init/2) # generate the initial complex statevector using the initial theta and phi angles
b0 = e**(i*phi_init) * sin(theta_init/2)
operator_a = operator[0][0] * a0 + operator[0][1] * b0 # calculate the result of operating on the statevector
operator_b = operator[1][0] * a0 + operator[1][1] * b0
phase = asin((operator_a/abs(operator_a)).imag) # calculate the relative phases of the resulting statevector
a1 = (operator_a / e**(i*phase)) # adjust by the relative phases
b1 = (operator_b / e**(i*phase))
theta1 = (-2 * (acos(a1)).real) # calculate the new theta and phi values
if (sin(theta1 / 2) == 0): # avoid divide by 0
phi1 = (-i * ln(b1 / 1E-15))
else:
phi1 = (-i * ln(b1 / sin(theta1 / 2)))
return [theta1.real.round(10), phi1.real.round(10)] # return the updated theta and phi values, rounded to 10 decimal places for readability
def operator_to_rotation(self, operator, print_optimization_loss=False, epochs=300, num_of_vectors=3):
'''
operator_to_rotation(operator, print_optimization_loss=False, epochs=300, num_of_vectors=3)
Description:
- this function takes a quantum operator (corresponding to a qbit gate)
- the function uses tensorflow to find the spacial rotations along the x, y, and z axes of the bloch sphere that corresponds to the operator acting on a qbit state state
Parameters:
- operator <type 'list'>: linear, hermitian matrix representing the quantum operator
- print_optimization_loss=False <type 'bool'>: boolean value that determines if the function will print out the loss of the tf model as it optimizes to find the spacial rotations
- epochs=300 <type: 'int'>: number of epochs that the tf model will optimize for
- num_of_vectors=3 <type 'int'>: number of quantum statevectors that the tf model will optimize for (higher means more accurate but slower, lower means less accurate but faster)
Returns:
- [RotX, RotY, RotZ] <type 'list'>: list storing the spacial rotations along each axis corresponding to the passed operator
Example:
>>> rc = RotationConversions()
>>> pauli_z = [[1, 0], [0, -1]] # pauli z gate
>>> print(rc.operator_to_rotation(pauli_z)) # solve for the spacial rotation of the pauli z gate
[0.0, 0.0, 3.14159]
'''
# make sure that the operator is unitary and hermitian, and throw an ArithmeticError if it is not
if (not OperatorChecker.check_hermitian(operator)):
raise ArithmeticError("Passed operator is not hermitian")
if (not OperatorChecker.check_unitary(operator)):
raise ArithmeticError("Passed operator is not unitary")
def generate_vector_rotations(quantity): # this function is used to generate a given number of random pair of vectors such that one is the result of operating on the other with the operator
vector_pairs = []
for i in range(quantity):
# define an arbitrary initial state for the statevector to be in, defined by the two angles on the bloch sphere (just not along any of the axes on the bloch sphere to avoid division by 0)
theta0 = random.random() * pi / 2
phi0 = random.random() * pi / 2
# calculate the new statevector in terms of the angles after applying the operator
theta1, phi1 = self.operator_to_updated_state(operator, theta0, phi0)
# get the x, y, and z coords of the original state
x0 = sin(theta0) * cos(phi0)
y0 = sin(theta0) * sin(phi0)
z0 = cos(theta0)
# get the x, y, and z coords of the updated state
x1 = sin(theta1) * cos(phi1)
y1 = sin(theta1) * sin(phi1)
z1 = cos(theta1)
vector_pairs.append([[x0, y0, z0], [x1, y1, z1]])
return vector_pairs # return the list of vector pairs
# generate the list of vector pairs that will be used to find the 3d spacial rotation that corresponds to this operator
vector_pairs = generate_vector_rotations(num_of_vectors)
initials = []
targets = []
for i in range(num_of_vectors):
initials.append(vector_pairs[i][0])
targets.append(vector_pairs[i][1])
# use the gradient decent tools in tensorflow to solve for the rotation matrix to map between the two states in 3d space
model = RotationMatrixModel() # instantiate the model that will be optimized to find the spacial rotations
def loss(target, predicted): # loss function that finds the square of the difference between the predicted and target vectors (the initial vector)
return tf.square(target[0, 0] - predicted[0, 0]) + tf.square(target[1, 0] - predicted[1, 0]) + tf.square(target[2, 0] - predicted[2, 0]) # calculate the loss by adding the errors in each dimension
def train(initials, targets, print_loss, learning_rate): # function to optimize the model using gradient descent, thus solving for the angles that produce the rotated state
with tf.GradientTape() as t:
# trainable variables are automatically tracked by GradientTape
current_loss = tf.reduce_sum([loss(tf.reshape(target, [3, 1]), model(tf.reshape(initial, [3, 1]))) for target, initial in zip(initials, targets)])
if (print_optimization_loss):
print("Loss: " + str(current_loss.numpy()))
# use GradientTape to calculate the gradients with respect to x, y, and z
dx, dy, dz = t.gradient(current_loss, [model.x, model.y, model.z])
# subtract the gradient scaled by the learning rate
model.x.assign_sub(learning_rate * dx)
model.y.assign_sub(learning_rate * dy)
model.z.assign_sub(learning_rate * dz)
# train the model to solve for the angles
if (print_optimization_loss):
print("Solving for the rotation matrix...")
for i in range(epochs): # training loop that iterates epochs times
train(initials, targets, print_optimization_loss, learning_rate=0.08) # optimise using gradient descent
# return the solution state stored in the model (Rot_x, Rot_y, Rot_z)
return [model.z.numpy(), model.y.numpy(), model.x.numpy()]
'''
# example usage
i = 1.0j
h = 1/sqrt(2)
operator0 = [ [0, -i],
[i, 0] ] # y
operator1 = [ [1, 0],
[0, -1] ] # z
operator2 = [ [0, 1],
| |
<reponame>Qu4tro/i3expo
#!/usr/bin/env python3
from xdg.BaseDirectory import xdg_config_home
from PIL import Image, ImageDraw
from threading import Thread
from i3expo.debounce import Debounce
from i3expo.geometry import Geometry, Dimension
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import logging
import math
import argparse
import time
import traceback
import sys
import signal
import copy
import i3ipc
import pygame
import ctypes
import configparser
global_updates_running = True
global_knowledge = {'active': -1}
i3 = i3ipc.Connection()
screenshot_lib = '/usr/share/i3expo/prtscn.so'
grab = ctypes.CDLL(screenshot_lib)
parser = argparse.ArgumentParser(
description="Display an overview of all open workspaces")
parser.add_argument("-v", "--verbose",
help="Print more program data", action='store_true')
parser.add_argument("-i", "--interval",
help="Update interval in seconds (default: 1s)")
parser.add_argument("-d", "--dedicated",
help="Launch on a dedicated workspace", action="store_true")
args = parser.parse_args()
loop_interval = 100.0
config = None
config_path = os.path.join(xdg_config_home, "i3expo", "config")
update_debounced = None
def signal_quit(signal, frame):
logging.info("Shutting down...")
pygame.display.quit()
pygame.quit()
i3.main_quit()
sys.exit(0)
def signal_reload(signal, frame):
global loop_interval
logging.info("Reloading config")
read_config()
loop_interval = config.getfloat('Daemon', 'forced_update_interval')
def signal_show(signal, frame):
global global_updates_running
logging.info("Showing UI")
if not global_updates_running:
global_updates_running = True
elif should_show_ui():
global_updates_running = False
update_debounced.reset()
source = i3.get_tree().find_focused().workspace().name
if args.dedicated:
i3.command('workspace i3expod-temporary-workspace')
ui_thread = Thread(target=show_ui, args=[source])
ui_thread.daemon = True
ui_thread.start()
def should_show_ui():
return len(global_knowledge) - 1 > 1
def get_color(raw):
return pygame.Color(raw)
def read_config():
global config
converters = {'color': get_color}
pygame.display.init()
disp_info = pygame.display.Info()
defaults = {
'Capture': {
'screenshot_width': disp_info.current_w,
'screenshot_height': disp_info.current_h,
'screenshot_offset_x': 0,
'screenshot_offset_y': 0,
'screenshot_delay': 0.2
},
'UI': {
'window_width': disp_info.current_w,
'window_height': disp_info.current_h,
'bgcolor': 'gray20',
'frame_active_color': '#5a6da4',
'frame_inactive_color': '#93afb3',
'frame_missing_color': '#ffe6d0',
'tile_missing_color': 'gray40',
'grid_x': 3,
'workspaces': 9,
'padding_percent_x': 5,
'padding_percent_y': 5,
'spacing_percent_x': 4,
'spacing_percent_y': 4,
'frame_width_px': 3,
'names_show': True,
'names_font': 'verdana', # list with pygame.font.get_fonts()
'names_fontsize': 25,
'names_color': 'white',
'highlight_percentage': 20
},
'Daemon': {
'forced_update_interval': 10.0,
'debounce_period': 1.0,
}
}
pygame.display.quit()
config = configparser.ConfigParser(
converters=converters
)
config.read_dict(defaults)
root_dir = os.path.dirname(config_path)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if os.path.exists(config_path):
config.read(config_path)
else:
with open(config_path, 'w') as f:
config.write(f)
def grab_screen():
logging.debug("Grabbing screen")
x1 = config.getint('Capture', 'screenshot_offset_x')
y1 = config.getint('Capture', 'screenshot_offset_y')
x2 = config.getint('Capture', 'screenshot_width')
y2 = config.getint('Capture', 'screenshot_height')
w, h = x2-x1, y2-y1
size = w * h
objlength = size * 3
grab.getScreen.argtypes = []
result = (ctypes.c_ubyte*objlength)()
grab.getScreen(x1, y1, w, h, result)
return (w, h, result)
def process_image(raw_img):
pil = Image.frombuffer(
'RGB', (raw_img[0], raw_img[1]), raw_img[2], 'raw', 'RGB', 0, 1)
return pygame.image.fromstring(pil.tobytes(), pil.size, pil.mode)
def update_workspace(workspace):
# logging.debug("Update workspace %s", workspace.num)
if workspace.num not in global_knowledge.keys():
global_knowledge[workspace.num] = {
'name': None,
'screenshot': None,
'windows': {},
'last_update': 0,
'state': 0
}
global_knowledge[workspace.num]['name'] = workspace.name
global_knowledge['active'] = workspace.num
def init_knowledge():
for workspace in i3.get_tree().workspaces():
update_workspace(workspace)
def on_workspace(i3, e):
global global_updates_running, loop_interval
# global_updates_running = True
update_state(i3, rate_limit_period=loop_interval, force=True)
def tree_hash(workspace):
state = 0
for con in workspace.leaves():
f = 31 if con.focused else 0 # so focus change can be detected
state += con.id % (con.rect.x + con.rect.y + con.rect.width + con.rect.height + f)
logging.debug("Tree hash for workspace %s: %s", workspace.num, state)
return state
def tree_has_changed(workspace):
state = tree_hash(workspace)
if global_knowledge[workspace.num]['state'] == state:
return False
else:
global_knowledge[workspace.num]['state'] = state
return True
def should_update(rate_limit_period, current_workspace, force):
if not global_updates_running:
return False
elif rate_limit_period is not None and time.time() - global_knowledge[current_workspace.num]['last_update'] <= rate_limit_period:
return False
elif force:
update_debounced.reset()
tree_has_changed(current_workspace)
return True
elif not tree_has_changed(current_workspace):
return False
return True
def update_state(i3, e=None, rate_limit_period=None, force=False):
# Prevent screenshoft from being takes too fast and capturing
# the still unchanged workspace instead of the new one
time.sleep(config.getfloat('Capture', 'screenshot_delay'))
global last_update
root = i3.get_tree()
current_workspace = root.find_focused().workspace()
update_workspace(current_workspace)
if should_update(rate_limit_period, current_workspace, force):
logging.debug("Update state for workspace %s", current_workspace.num)
workspaces = [w.num for w in root.workspaces()]
deleted = []
for num in global_knowledge:
if type(num) is int and num not in workspaces:
deleted.append(num)
for num in deleted:
del global_knowledge[num]
global_knowledge[current_workspace.num]['screenshot'] = grab_screen()
global_knowledge[current_workspace.num]['last_update'] = time.time()
def get_hovered_frame(mpos, frames):
for frame in frames:
if mpos[0] > frame['ul'].x \
and mpos[0] < frame['br'].x \
and mpos[1] > frame['ul'].y \
and mpos[1] < frame['br'].y:
return frame['ws_num']
return None
def show_ui(source):
global global_updates_running
try:
window_width = config.getint('UI', 'window_width')
window_height = config.getint('UI', 'window_height')
pygame.display.init()
pygame.font.init()
screen = pygame.display.set_mode(
(window_width, window_height), pygame.FULLSCREEN)
pygame.display.set_caption('i3expo')
geometry = init_geometry(screen)
tiles = init_tiles(screen)
draw_tiles(screen, tiles, geometry)
pygame.display.flip()
input_loop(screen, source, tiles, geometry.grid.x)
except Exception:
logging.exception("Failed to show UI")
finally:
logging.info("Closing UI")
pygame.display.quit()
pygame.display.init() # Allows for faster launching
global_updates_running = True
def input_loop(screen, source, tiles, columns):
running = True
use_mouse = True
selected_id = 0
while running:
if global_updates_running:
logging.info("Global updates is running")
break
if not pygame.display.get_init():
logging.info("Display is not initialised")
break
jump = False
kbdmove = (0, 0)
for event in pygame.event.get():
if event.type == pygame.QUIT:
logging.info("Received pygame.QUIT")
running = False
elif event.type == pygame.MOUSEMOTION:
use_mouse = True
elif event.type == pygame.KEYDOWN:
use_mouse = False
if event.key == pygame.K_UP or event.key == pygame.K_k:
kbdmove = (0, -1)
if event.key == pygame.K_DOWN or event.key == pygame.K_j:
kbdmove = (0, 1)
if event.key == pygame.K_LEFT or event.key == pygame.K_h:
kbdmove = (-1, 0)
if event.key == pygame.K_RIGHT or event.key == pygame.K_l:
kbdmove = (1, 0)
if event.key == pygame.K_RETURN:
jump = True
if event.key == pygame.K_ESCAPE:
logging.debug("ESCAPE key pressed")
running = False
pygame.event.clear()
break
elif event.type == pygame.MOUSEBUTTONUP:
use_mouse = True
if event.button == 1:
jump = True
pygame.event.clear()
break
if use_mouse:
mpos = pygame.mouse.get_pos()
active_frame = get_hovered_frame(mpos, tiles)
logging.debug("Mouse selected: %s", active_frame)
elif kbdmove != (0, 0):
if kbdmove[0] != 0:
selected_id += kbdmove[0]
elif kbdmove[1] != 0:
selected_id += kbdmove[1] * columns
if selected_id >= len(tiles):
selected_id -= len(tiles)
elif selected_id < 0:
selected_id += len(tiles)
active_frame = tiles[selected_id]['ws_num']
logging.debug("Keyboard selected: %s", active_frame)
if jump:
if active_frame in global_knowledge.keys():
logging.info('Switching to workspace %s', active_frame)
i3.command(f'workspace number {active_frame}')
break
elif not running and args.dedicated:
logging.info('Exiting expo and switching to workspace %s', source)
i3.command('workspace ' + source)
for tile in tiles:
if tile['active'] and not tile['ws_num'] == active_frame:
screen.blit(tile['mouseoff'], (tile['ul'].x, tile['ul'].y))
tile['active'] = False
if not tile['active'] and tile['ws_num'] == active_frame:
screen.blit(tile['mouseon'], (tile['ul'].x, tile['ul'].y))
tile['active'] = True
pygame.display.update()
pygame.time.wait(25)
def init_geometry(screen):
g = Geometry()
workspaces = config.getint('UI', 'workspaces')
max_grid_x = config.getint('UI', 'grid_x')
padding_x = config.getint('UI', 'padding_percent_x')
padding_y = config.getint('UI', 'padding_percent_y')
spacing_x = config.getint('UI', 'spacing_percent_x')
spacing_y = config.getint('UI', 'spacing_percent_y')
frame_width = config.getint('UI', 'frame_width_px')
g.total.x = screen.get_width()
g.total.y = screen.get_height()
logging.debug('total_x=%s total_y=%s', g.total.x, g.total.y)
n_workspaces = min(workspaces, len(global_knowledge) - 1)
g.grid.x = min(max_grid_x, n_workspaces)
g.grid.y = math.ceil(n_workspaces / max_grid_x)
logging.debug('grid_x=%s grid_y=%s', g.grid.x, g.grid.y)
g.pad.x = round(g.total.x * padding_x / 100)
g.pad.y = round(g.total.y * padding_y / 100)
logging.debug('pad_x=%s pad_y=%s', g.pad.x, g.pad.y)
g.space.x = round(g.total.x * spacing_x / 100)
g.space.y = round(g.total.y * spacing_y / 100)
logging.debug('space_x=%s space_y=%s', g.space.x, g.space.y)
g.outer.x = round(
(g.total.x - 2 * g.pad.x - g.space.x * (g.grid.x - 1)) / g.grid.x)
g.outer.y = round(
(g.total.y - 2 * g.pad.y - g.space.y * (g.grid.y - 1)) / g.grid.y)
logging.debug('shot_outer_x=%s shot_outer_y=%s', g.outer.x, g.outer.y)
g.offset.x = g.outer.x + g.space.x
g.offset.y = g.outer.y + g.space.y
logging.debug('offset_delta_x=%s offset_delta_y=%s',
g.offset.x, g.offset.y)
g.inner.x = g.outer.x - 2 * frame_width
g.inner.y = g.outer.y - 2 * frame_width
g.pad.x = max(g.pad.x, (g.total.x - g.space.x *
(g.grid.x - 1) - g.outer.x * g.grid.x) / 2)
g.pad.y = max(g.pad.y, (g.total.y - g.space.y *
(g.grid.y - 1) - g.outer.y * g.grid.y) / 2)
g.frame = frame_width
return g
def autosize_image(g, image):
result = Dimension()
offset = Dimension()
image_size = image.get_rect().size
image_dim = Dimension(image_size[0], image_size[1])
ratio = g.inner / image_dim
if ratio.x < ratio.y:
result.set(g.inner.x, round(ratio.x * image_dim.y))
offset.set(0, round((g.inner.y - result.y) / 2))
else:
result.set(round(ratio.y * image_dim.x), g.inner.y)
offset.set(round((g.inner.x - result.x) / 2), 0)
resized = pygame.transform.smoothscale(image, (result.x, result.y))
return (resized, result, offset)
def draw_tiles(screen, tiles, g):
highlight_percentage = config.getint('UI', 'highlight_percentage')
bgcolor = config.getcolor('UI', 'bgcolor')
screen.fill(bgcolor)
for idx, t in enumerate(tiles):
x = math.floor(idx % g.grid.x)
y = math.floor(idx / g.grid.x)
origin = Dimension(
g.pad.x + g.offset.x * x,
g.pad.y + g.offset.y * y
)
result = Dimension()
offset = Dimension()
if t['screenshot']:
t['img'] = process_image(t['screenshot'])
(image, result, offset) = autosize_image(g, t['img'])
t['ul'] = origin + g.frame + offset
t['br'] = origin + g.frame + offset + result
screen.fill(t['frame'],
(
origin.x + offset.x,
origin.y + offset.y,
result.x + g.frame * 2,
result.y + g.frame * 2,
))
if t['tile']:
screen.fill(t['tile'],
(
origin.x + g.frame + offset.x,
origin.y + g.frame + offset.y,
result.x,
result.y,
))
screen.blit(image, (origin.x + g.frame + offset.x,
origin.y + g.frame + offset.y))
mouseoff = screen.subsurface(
(origin.x + g.frame + offset.x, origin.y +
g.frame + offset.y, result.x, result.y)
| |
51], # Was too bright yellow
[166, 86, 40],
[247, 129, 191],
[153, 153, 153],
])/255.
# Steal Kelly's colors from https://gist.github.com/ollieglass/f6ddd781eeae1d24e391265432297538, removing
# black: '222222', off-white: 'F2F3F4', mid-grey: '848482',
kellycolors = ['F3C300', '875692', 'F38400', 'A1CAF1', 'BE0032', 'C2B280', '008856', 'E68FAC', '0067A5', 'F99379', '604E97', 'F6A600', 'B3446C', 'DCD300', '882D17', '8DB600', '654522', 'E25822', '2B3D26']
for c,color in enumerate(kellycolors):
kellycolors[c] = list(hex2rgb(color))
kellycolors = np.array(kellycolors)
if basis == 'colorbrewer' and ncolors<=len(colorbrewercolors):
colors = colorbrewercolors[:ncolors]
elif basis == 'kelly' and ncolors<=len(kellycolors):
colors = kellycolors[:ncolors]
else: # Too many colors, calculate instead
## Calculate sliding limits if none provided
if limits is None:
colorrange = 1-1/float(ncolors**0.5)
limits = [0.5-colorrange/2, 0.5+colorrange/2]
## Calculate primitives and dot locations
primitive = np.linspace(limits[0], limits[1], nsteps) # Define primitive color vector
x, y, z = np.meshgrid(primitive, primitive, primitive) # Create grid of all possible points
dots = np.transpose(np.array([x.flatten(), y.flatten(), z.flatten()])) # Flatten into an array of dots
ndots = nsteps**3 # Calculate the number of dots
## Start from the colorbrewer colors
if basis=='colorbrewer' or basis=='kelly':
indices = [] # Initialize the array
if basis == 'colorbrewer': basiscolors = colorbrewercolors
elif basis == 'kelly': basiscolors = kellycolors
for color in basiscolors:
rgbdistances = dots - color # Calculate the distance in RGB space
totaldistances = np.linalg.norm(rgbdistances,axis=1)
closest = np.argmin(totaldistances)
indices.append(closest)
else:
indices = [0]
## Calculate the distances
for pt in range(ncolors-len(indices)): # Loop over each point
totaldistances = np.inf+np.zeros(ndots) # Initialize distances
for ind in indices: # Loop over each existing point
rgbdistances = dots - dots[ind] # Calculate the distance in RGB space
totaldistances = np.minimum(totaldistances, np.linalg.norm(rgbdistances,axis=1)) # Calculate the minimum Euclidean distance
maxindex = np.argmax(totaldistances) # Find the point that maximizes the minimum distance
indices.append(maxindex) # Append this index
colors = dots[indices,:]
## Wrap up -- turn color array into a list, or reverse
if hueshift: colors = shifthue(colors, hueshift=hueshift) # Shift hue if requested
output = _processcolors(colors=colors, asarray=asarray, ashex=ashex, reverse=reverse)
## For plotting -- optional
if demo:
ax = scatter3d(colors[:,0], colors[:,1], colors[:,2], c=output, s=200, depthshade=False, lw=0, figkwargs={'facecolor':'w'})
ax.set_xlabel('Red', fontweight='bold')
ax.set_ylabel('Green', fontweight='bold')
ax.set_zlabel('Blue', fontweight='bold')
ax.set_xlim((0,1))
ax.set_ylim((0,1))
ax.set_zlim((0,1))
return output
def midpointnorm(vcenter=0, vmin=None, vmax=None):
'''
Alias to Matplotlib's TwoSlopeNorm. Used to place the center of the colormap
somewhere other than the center of the data.
Args:
vcenter (float): the center of the colormap (0 by default)
vmin (float): the minimum of the colormap
vmax (float): the maximum of the colormap
**Example**::
data = pl.rand(10,10) - 0.2
pl.pcolor(data, cmap='bi', norm=sc.midpointnorm())
New in version 1.2.0.
'''
from matplotlib.colors import TwoSlopeNorm
norm = TwoSlopeNorm(vcenter=vcenter, vmin=vmin, vmax=vmax)
return norm
def colormapdemo(cmap=None, n=None, smoothing=None, randseed=None, doshow=True):
'''
Demonstrate a color map using simulated elevation data, shown in both 2D and
3D. The argument can be either a colormap itself or a string describing a
colormap.
**Examples**::
sc.colormapdemo('inferno') # Use a registered Matplotlib colormap
sc.colormapdemo('parula') # Use a registered Sciris colormap
sc.colormapdemo(sc.alpinecolormap(), n=200, smoothing=20, randseed=2942) # Use a colormap object
Version: 2019aug22
'''
# Set data
if n is None: n = 100
if smoothing is None: smoothing = 40
if randseed is None: randseed = 8
if cmap is None: cmap = 'parula' # For no particular reason
maxheight = 1
horizontalsize = 4
pl.seed(randseed)
kernel = np.array([0.25,0.5,0.25])
data = pl.randn(n,n)
for s in range(smoothing): # Quick-and-dirty-and-slow smoothing
for i in range(n): data[:,i] = np.convolve(data[:,i],kernel,mode='same')
for i in range(n): data[i,:] = np.convolve(data[i,:],kernel,mode='same')
data -= data.min()
data /= data.max()
data *= maxheight
# Plot in 2D
fig1 = pl.figure(figsize=(10,8))
X = np.linspace(0,horizontalsize,n)
pcl = pl.pcolor(X, X, data, cmap=cmap, linewidth=0, antialiased=False, shading='auto')
cb2 = fig1.colorbar(pcl)
cb2.set_label('Height (km)',horizontalalignment='right', labelpad=50)
pl.xlabel('Position (km)')
pl.ylabel('Position (km)')
if doshow:
pl.show()
# Plot in 3D
fig2,ax2 = fig3d(returnax=True, figsize=(18,8))
ax2.view_init(elev=45, azim=30)
X = np.linspace(0,horizontalsize,n)
X, Y = np.meshgrid(X, X)
surf = ax2.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cmap, linewidth=0, antialiased=False)
cb = fig2.colorbar(surf)
cb.set_label('Height (km)',horizontalalignment='right', labelpad=50)
pl.xlabel('Position (km)')
pl.ylabel('Position (km)')
if doshow:
pl.show()
return {'2d':fig1, '3d':fig2}
##############################################################################
#%% Colormaps
##############################################################################
__all__ += ['alpinecolormap', 'bicolormap', 'parulacolormap', 'turbocolormap', 'bandedcolormap', 'orangebluecolormap']
def alpinecolormap(apply=False):
"""
This function generates a map based on ascending height. Based on data from
Kazakhstan.
**Test case**::
sc.colormapdemo('alpine')
**Usage example**::
import sciris as sc
import pylab as pl
pl.imshow(pl.randn(20,20), interpolation='none', cmap=sc.alpinecolormap())
Version: 2014aug06
"""
# Set parameters
water = np.array([3,18,59])/256.
desert = np.array([194,175,160*0.6])/256.
forest1 = np.array([61,86,46])/256.
forest2 = np.array([61,86,46])/256.*1.2
rock = np.array([119,111,109])/256.*1.3
snow = np.array([243,239,238])/256.
breaks = [0.0,0.5,0.7,0.8,0.9,1.0]
# Create dictionary
cdict = {'red': ((breaks[0], water[0], water[0]),
(breaks[1], desert[0], desert[0]),
(breaks[2], forest1[0], forest1[0]),
(breaks[3], forest2[0], forest2[0]),
(breaks[4], rock[0], rock[0]),
(breaks[5], snow[0], snow[0])),
'green': ((breaks[0], water[1], water[1]),
(breaks[1], desert[1], desert[1]),
(breaks[2], forest1[1], forest1[1]),
(breaks[3], forest2[1], forest2[1]),
(breaks[4], rock[1], rock[1]),
(breaks[5], snow[1], snow[1])),
'blue': ((breaks[0], water[2], water[2]),
(breaks[1], desert[2], desert[2]),
(breaks[2], forest1[2], forest1[2]),
(breaks[3], forest2[2], forest2[2]),
(breaks[4], rock[2], rock[2]),
(breaks[5], snow[2], snow[2]))}
# Make map
cmap = mplc.LinearSegmentedColormap('alpine', cdict, 256)
if apply:
pl.set_cmap(cmap)
return cmap
def bicolormap(gap=0.1, mingreen=0.2, redbluemix=0.5, epsilon=0.01, demo=False, apply=False):
"""
This function generators a two-color map, blue for negative, red for
positive changes, with grey in the middle. The input argument is how much
of a color gap there is between the red scale and the blue one.
Args:
gap: sets how big of a gap between red and blue color scales there is (0=no gap; 1=pure red and pure blue)
mingreen: how much green to include at the extremes of the red-blue color scale
redbluemix: how much red to mix with the blue and vice versa at the extremes of the scale
epsilon: what fraction of the colormap to make gray in the middle
**Examples**::
bicolormap(gap=0,mingreen=0,redbluemix=1,epsilon=0) # From pure red to pure blue with white in the middle
bicolormap(gap=0,mingreen=0,redbluemix=0,epsilon=0.1) # Red -> yellow -> gray -> turquoise -> blue
bicolormap(gap=0.3,mingreen=0.2,redbluemix=0,epsilon=0.01) # Red and blue with a sharp distinction between
Version: 2013sep13
"""
mng=mingreen; # Minimum amount of green to add into the colors
mix=redbluemix; # How much red to mix with the blue an vice versa
eps=epsilon; # How much of the center of the colormap to make gray
omg=1-gap # omg = one minus gap
cdict = {'red': ((0.00000, 0.0, 0.0),
(0.5-eps, mix, omg),
(0.50000, omg, omg),
(0.5+eps, omg, 1.0),
(1.00000, 1.0, 1.0)),
'green': ((0.00000, mng, mng),
(0.5-eps, omg, omg),
(0.50000, omg, omg),
(0.5+eps, omg, omg),
(1.00000, mng, mng)),
'blue': ((0.00000, 1.0, 1.0),
(0.5-eps, 1.0, omg),
(0.50000, omg, omg),
(0.5+eps, omg, mix),
(1.00000, 0.0, 0.0))}
cmap = mplc.LinearSegmentedColormap('bi',cdict,256)
if apply:
pl.set_cmap(cmap)
def demoplot(): # pragma: no cover
from pylab import figure, subplot, imshow, colorbar, rand, show
maps=[]
maps.append(bicolormap()) # Default ,should work for most things
maps.append(bicolormap(gap=0,mingreen=0,redbluemix=1,epsilon=0)) # From pure red to pure blue with white in the middle
maps.append(bicolormap(gap=0,mingreen=0,redbluemix=0,epsilon=0.1)) # Red -> yellow -> gray -> turquoise -> blue
maps.append(bicolormap(gap=0.3,mingreen=0.2,redbluemix=0,epsilon=0.01)) # Red and blue with a sharp distinction between
nexamples=len(maps)
figure(figsize=(5*nexamples,4))
for m in range(nexamples):
subplot(1,nexamples,m+1)
imshow(rand(20,20),cmap=maps[m],interpolation='none');
colorbar()
show()
if demo: demoplot()
return cmap
def parulacolormap(apply=False):
'''
Create a map similar to Viridis, but brighter. Set apply=True to use
immediately.
**Demo and example**::
cmap = sc.parulacolormap()
sc.colormapdemo(cmap=cmap)
Version: 2019aug22
'''
data = [[0.2422,0.1504,0.6603], [0.2444,0.1534,0.6728], [0.2464,0.1569,0.6847], [0.2484,0.1607,0.6961], [0.2503,0.1648,0.7071], [0.2522,0.1689,0.7179], [0.2540,0.1732,0.7286], [0.2558,0.1773,0.7393],
[0.2576,0.1814,0.7501], [0.2594,0.1854,0.7610], [0.2611,0.1893,0.7719], [0.2628,0.1932,0.7828], [0.2645,0.1972,0.7937], [0.2661,0.2011,0.8043], [0.2676,0.2052,0.8148], [0.2691,0.2094,0.8249],
[0.2704,0.2138,0.8346], [0.2717,0.2184,0.8439], [0.2729,0.2231,0.8528], [0.2740,0.2280,0.8612], [0.2749,0.2330,0.8692], [0.2758,0.2382,0.8767], [0.2766,0.2435,0.8840], [0.2774,0.2489,0.8908],
[0.2781,0.2543,0.8973], [0.2788,0.2598,0.9035], [0.2794,0.2653,0.9094], [0.2798,0.2708,0.9150], [0.2802,0.2764,0.9204], [0.2806,0.2819,0.9255], [0.2809,0.2875,0.9305], [0.2811,0.2930,0.9352],
[0.2813,0.2985,0.9397], [0.2814,0.3040,0.9441], [0.2814,0.3095,0.9483], [0.2813,0.3150,0.9524], [0.2811,0.3204,0.9563], [0.2809,0.3259,0.9600], [0.2807,0.3313,0.9636], [0.2803,0.3367,0.9670],
[0.2798,0.3421,0.9702], [0.2791,0.3475,0.9733], [0.2784,0.3529,0.9763], [0.2776,0.3583,0.9791], [0.2766,0.3638,0.9817], [0.2754,0.3693,0.9840], [0.2741,0.3748,0.9862], [0.2726,0.3804,0.9881],
[0.2710,0.3860,0.9898], [0.2691,0.3916,0.9912], [0.2670,0.3973,0.9924], [0.2647,0.4030,0.9935], [0.2621,0.4088,0.9946], [0.2591,0.4145,0.9955], [0.2556,0.4203,0.9965], [0.2517,0.4261,0.9974],
[0.2473,0.4319,0.9983], [0.2424,0.4378,0.9991], [0.2369,0.4437,0.9996], [0.2311,0.4497,0.9995], [0.2250,0.4559,0.9985], [0.2189,0.4620,0.9968], [0.2128,0.4682,0.9948], [0.2066,0.4743,0.9926],
[0.2006,0.4803,0.9906], [0.1950,0.4861,0.9887], [0.1903,0.4919,0.9867], [0.1869,0.4975,0.9844], [0.1847,0.5030,0.9819], [0.1831,0.5084,0.9793], [0.1818,0.5138,0.9766], [0.1806,0.5191,0.9738],
[0.1795,0.5244,0.9709], [0.1785,0.5296,0.9677], [0.1778,0.5349,0.9641], [0.1773,0.5401,0.9602], [0.1768,0.5452,0.9560], [0.1764,0.5504,0.9516], [0.1755,0.5554,0.9473], [0.1740,0.5605,0.9432],
[0.1716,0.5655,0.9393], [0.1686,0.5705,0.9357], [0.1649,0.5755,0.9323], [0.1610,0.5805,0.9289], [0.1573,0.5854,0.9254], [0.1540,0.5902,0.9218], [0.1513,0.5950,0.9182], [0.1492,0.5997,0.9147],
[0.1475,0.6043,0.9113], [0.1461,0.6089,0.9080], [0.1446,0.6135,0.9050], [0.1429,0.6180,0.9022], [0.1408,0.6226,0.8998], [0.1383,0.6272,0.8975], [0.1354,0.6317,0.8953], [0.1321,0.6363,0.8932],
[0.1288,0.6408,0.8910], [0.1253,0.6453,0.8887], [0.1219,0.6497,0.8862], [0.1185,0.6541,0.8834], [0.1152,0.6584,0.8804], [0.1119,0.6627,0.8770], [0.1085,0.6669,0.8734], [0.1048,0.6710,0.8695],
[0.1009,0.6750,0.8653], [0.0964,0.6789,0.8609], [0.0914,0.6828,0.8562], [0.0855,0.6865,0.8513], [0.0789,0.6902,0.8462], [0.0713,0.6938,0.8409], [0.0628,0.6972,0.8355], [0.0535,0.7006,0.8299],
[0.0433,0.7039,0.8242], [0.0328,0.7071,0.8183], [0.0234,0.7103,0.8124], [0.0155,0.7133,0.8064], [0.0091,0.7163,0.8003], [0.0046,0.7192,0.7941], [0.0019,0.7220,0.7878], [0.0009,0.7248,0.7815],
[0.0018,0.7275,0.7752], [0.0046,0.7301,0.7688], [0.0094,0.7327,0.7623], [0.0162,0.7352,0.7558], [0.0253,0.7376,0.7492], [0.0369,0.7400,0.7426], [0.0504,0.7423,0.7359], [0.0638,0.7446,0.7292],
[0.0770,0.7468,0.7224], [0.0899,0.7489,0.7156], [0.1023,0.7510,0.7088], [0.1141,0.7531,0.7019], [0.1252,0.7552,0.6950], [0.1354,0.7572,0.6881], [0.1448,0.7593,0.6812], [0.1532,0.7614,0.6741],
[0.1609,0.7635,0.6671], [0.1678,0.7656,0.6599], [0.1741,0.7678,0.6527], [0.1799,0.7699,0.6454], [0.1853,0.7721,0.6379], [0.1905,0.7743,0.6303], [0.1954,0.7765,0.6225], [0.2003,0.7787,0.6146],
[0.2061,0.7808,0.6065], [0.2118,0.7828,0.5983], [0.2178,0.7849,0.5899], [0.2244,0.7869,0.5813], [0.2318,0.7887,0.5725], [0.2401,0.7905,0.5636], [0.2491,0.7922,0.5546], [0.2589,0.7937,0.5454],
[0.2695,0.7951,0.5360], [0.2809,0.7964,0.5266], [0.2929,0.7975,0.5170], [0.3052,0.7985,0.5074], [0.3176,0.7994,0.4975], | |
import datetime as dtm
from profile_plot import profile_plot
import matplotlib.pyplot as plt
from matplotlib.font_manager import fontManager, FontProperties
from matplotlib import ticker, cm
import sys
import pandas as pd
import numpy as np
import os
import re
from dateutil import parser
import errno
from shutil import copyfile
import subprocess
import argparse
import textwrap
RED = (228/256., 26/256., 28/256.)
BLUE = (55/256., 126/256., 184/256.)
plt.style.use(['seaborn-paper','seaborn-colorblind'])
def process_stations(station_file):
sd=pd.read_csv(station_file,names=["id","x","y","dist_km","elev_navd","name","depth_mllw"],header=0,dtype={"id":pd.StringDtype()})
for i in range(sd.shape[0]):
station=sd["id"][i]
if station.endswith(".0"):
station = station[:-2]
sd.at[i,"id"]=station
sd=sd.set_index('id')
sd.dist_km=sd.dist_km/1000.0
return sd
def process_cruise(path):
print ("process_cruise")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()[2:]
cruisefile.close()
cruiselines = [line.strip().split(",") for line in cruisetxt if (line != "\n")]
cruise_data = {}
for entry in cruiselines:
time = dtm.datetime.strptime("%s %s" % (entry[0],entry[1]), "%m/%d/%Y %H:%M")
station = entry[2]
if station.endswith(".0"):
station = station[:-2]
if not station in cruise_data.keys():
cruise_data[station] = ([],[],time)
depth = float(entry[3])
salinity = float(entry[4])
cruise_data[station][0].append(depth)
cruise_data[station][1].append(salinity)
for station in cruise_data.keys():
time = cruise_data[station][2]
depth = np.array(cruise_data[station][0])
salinity = np.array(cruise_data[station][1])
depthorder = np.argsort(depth)
depth = depth[depthorder]
salinity = salinity[depthorder]
cruise_data[station] = (depth,salinity,time)
return cruise_data
def process_xyt(path,casts,base_time):
print ("process_cruise")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()
cruisefile.close()
cruiselines = [line.strip().split() for line in cruisetxt if (line != "\n")]
cruise_data = {}
for entry in cruiselines:
castno = int(entry[0])
salt = float(entry[1])
depth = -float(entry[2])
elapsed = 24.*3600.*float(entry[4])
time = base_time + dtm.timedelta(seconds=elapsed)
station = casts[castno][4]
if not station in cruise_data.keys():
cruise_data[station] = ([],[],time)
cruise_data[station][0].append(depth)
cruise_data[station][1].append(salt)
for station in cruise_data.keys():
time = cruise_data[station][2]
depth = np.array(cruise_data[station][0])
salinity = np.array(cruise_data[station][1])
depthorder = np.argsort(depth)
depth = depth[depthorder]
salinity = salinity[depthorder]
cruise_data[station] = (depth,salinity,time)
return cruise_data
def match_cruise(time, station, x, z, times, data):
times = np.array(times)
ndxR = np.searchsorted( times, time)
ndxL = max(ndxR - 1,0)
if not (time >= times[0] and time <= times[-1]):
raise ValueError("Time %s (in days) is not in model file spanning from %s to %s" % (time, times[0], times[-1]))
wl = (times[ndxR] - time)/(times[ndxR] - times[ndxL])
wr = 1 - wl
station_ndx = station.data_index
profile = wl*data[ndxL,:,station_ndx] + wr*data[ndxR,:,station_ndx]
xx = x[:,station_ndx]
zz = z[:,station_ndx]
ndx_farleft = max(ndxL-2, 0)
ndx_farright = min(ndxR+3, len(times))
surrounding_profiles = [(time, profile)]
for n in range(ndx_farleft,ndx_farright):
t = times[n]
vals = data[n,:,station_ndx]
surrounding_profiles.append((t, vals))
return zz, surrounding_profiles
def do_depth_plot(station,cruise_data,surrounding_profiles,ax,xlabel,ylabel,add_legend = False):
profiles = []
all_lines = []
col = None
i = 0
for i,prof in enumerate(surrounding_profiles):
p = np.array(prof[1])
zz = np.array(prof[0])
p = np.ma.masked_where(np.isnan(p),p)
z_masked = np.ma.masked_where(np.isnan(p),zz)
linestyle = "solid"
if (i == 0):
col = BLUE
label = "Model"
wide = 2
else:
col = "0.55"
wide = 1
label = "Model +/- 3 hr" if label == "Model" else "_nolegend_"
linestyle = "--"
line, = ax.plot(p,z_masked,color = col, linewidth = wide, linestyle = linestyle)
i += 1
all_lines.append(line)
depth,salinity,time = cruise_data
line, = ax.plot(salinity,depth,color = RED, label = "Observed", linewidth = 2)
all_lines.append(line)
ax.set_ylim(max(z_masked),0)
min_data,max_data = ax.get_xlim()
xcenter = (min_data+max_data)/2
xrange = max_data - min_data
if xrange <8.0:
print (" > 8")
#ax.set_xlim(max(0,min_data-3.5), min(35,max_data+3.5))
if xlabel != None:
ax.set_xlabel(xlabel, size = 14)
if ylabel != None:
ax.set_ylabel('Depth (m)', size = 14)
if add_legend:
leg=ax.legend((all_lines[0],all_lines[1],all_lines[-1]),('Model','Model +/- 3 hr','Observed'),loc='lower left',\
shadow=True, fancybox=True)
ltext = leg.get_texts() # all the text.Text instance in the legend
llines = leg.get_lines() # all the lines.Line2D instance in the legend
#frame.set_facecolor('0.80') # set the frame face color to light gray
#ax.setp(ltext, fontsize='small') # the legend text fontsize
#ax.setp(llines, linewidth=1.5) # the legend linewidth
#ax.set_xlim(0,35)
def longitudinal(cruise_data,station_data,ax,context_label=None,add_labels=False,xlabel=None,xmin=None,xmax=None,max_depth=None):
print ("Longitudinal")
base_date = dtm.datetime(2017,4,18)
maxdepth = 0
stations = []
station_dists = []
bedx=[]
bed=[]
for item in cruise_data.keys():
if (station_data.loc[item].dist_km > 0.0):
#print "Station %s" % item
#print cruise_data[item]
maxdepth=max(maxdepth, max(cruise_data[item][0]))
stations.append(item)
station_dists.append(station_data.loc[item].dist_km)
bedx.append(station_data.loc[item].dist_km)
bed.append( -max(cruise_data[item][0]))
station_dists = np.array(station_dists)
stations = np.array(stations)
sorted_dists = np.argsort(station_dists)
stations = stations[sorted_dists]
station_dists = station_dists[sorted_dists]
nstation = len(station_dists)
ndepth = int(maxdepth + 1)
salt = np.ones((ndepth,nstation),dtype=float) * np.nan
zloc = np.ones((ndepth,nstation),dtype=float) * np.nan
from scipy.interpolate import griddata
for i in range(nstation):
item = stations[i]
depth,salinity,time = cruise_data[item]
salt[:,i] = griddata(depth,salinity,np.arange(ndepth,dtype=float))
if np.isnan(salt[0,i]): salt[0,i] = salt[1,i]
#zloc[0:len(salinity),i] = depth
xloc,zloc = np.meshgrid(station_dists, np.arange(ndepth,dtype=float))
im, cs, ttxt = profile_plot(xloc,zloc,salt,ax,context_label,add_labels,xlabel,xmin,xmax,max_depth)
return cs
def model_data_for_longitude(cruise_data,station_data,x, z, times, model_data, base_date):
maxdepth = 0
stations = []
station_dists = []
# todo: this is boilerplate
for item in cruise_data.keys():
if (station_data[item].dist_km > 0.0):
maxdepth=max(maxdepth, max(cruise_data[item][0]))
stations.append(item)
station_dists.append(station_data[item].dist_km)
station_dists = np.array(station_dists)
stations = np.array(stations)
sorted_dists = np.argsort(station_dists)
stations = stations[sorted_dists]
station_dists = station_dists[sorted_dists]
nstation = len(station_dists)
ndepth = int(maxdepth + 1)
long_data = {}
for station_id in stations:
cruise_profile = cruise_data[station_id]
cruise_time = cruise_profile[2]
rt = (cruise_time - base_date).total_seconds()/(24*3600)
zz,profiles = match_cruise(rt, station_data[station_id], x, z, times, model_data)
prof = profiles[0]
long_data[station_id] = (zz,prof[1],prof[0])
return long_data
def cruise_xyt(path,station_data,base_time,outfile):
print ("cruise_xyt")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()[2:]
cruisefile.close()
cruiselines = [line.strip().split(",") for line in cruisetxt if (line != "\n")]
cruise_locs = []
processed = []
casts = {}
for entry in cruiselines:
if len(entry) < 2: continue
time = dtm.datetime.strptime("%s %s" % (entry[0],entry[1]), "%m/%d/%Y %H:%M")
elapsed = (time - base_time).total_seconds()
station = entry[2]
if station.endswith(".0"):
station = station[:-2]
if not station in processed:
sd=station_data.loc[station]
processed.append(station)
cruise_locs.append((sd.x,sd.y,elapsed,sd.name,station))
with open(outfile,"w") as out:
out.write("Cruise cast model requests\n%s\n" % len(cruise_locs))
for i,loc in enumerate(cruise_locs):
jj = i+1
locentries = (jj,loc[0],loc[1],loc[2],loc[3])
out.write("%s %s %s %s ! %s\n" % locentries)
#out.write("%s %s %s ! %s\n" % loc)
#print (locentries)
casts[jj] = loc
return casts
def gen_profile_plot(base_date,cruise_time,survey_file,model_file,station_file,xytfile):
filename = survey_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
model_data = process_xyt(model_file,casts,base_date)
fig, (ax0,ax1) = plt.subplots(2,1,sharex=True)
fig.set_size_inches(10,6)
context = cruise_time.strftime("USGS: %d-%b-%Y")
longitudinal(cruise_data,station_data,ax0,context_label = context ,xmin=20,xmax=104,max_depth=30)
cs=longitudinal(model_data,station_data,ax1,context_label = "Model",add_labels=True,
xlabel="Distance from Golden Gate (km)",xmin=20,xmax=104,max_depth=30)
# shared colorbar
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cb = fig.colorbar(cs, cax=cbar_ax,shrink=0.01)
cb.set_label("Salinity (psu)", size = 14)
plt.savefig("salinity_profile_"+cruise_time.strftime("%m_%d_%Y"),dpi=300)
#plt.show()
def main(base_date,cruise_time,obs_file,model_file,station_file,xytfile):
filename = obs_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
model_data = process_xyt(model_file,casts,base_date)
fig, axes = plt.subplots(2,2,sharex=True)
#x,z,times,model_data = process_data(station_data,model_outfile)
choices = ["657","649","2","3"]
#choices = ["10","13","14","15"]
nchoice = len(choices)
for ichoice in range(nchoice):
ax = axes[ichoice%2,int(ichoice/2)]
#pdb.set_trace()
choice = choices[ichoice]
cruise_profile = cruise_data[choice]
cruise_time = cruise_profile[2]
station = station_data.loc[choice]
model_profile = model_data[choice]
#ax = axes[ichoice%2,ichoice/2]
title = station.name + "(%s km) " % np.round(station.dist_km)
ax.set_title(title)
xlabel = "Salinity (psu)" if ichoice in (1,3) else None
ylabel = "Depth (m)" if ichoice in (0,1) else None
print ("ichoice: %s %s" % (ichoice,xlabel))
#add_legend = (ichoice == (nchoice - 1))
add_legend = (ichoice == 0)
surrounding_profiles = [model_profile]
do_depth_plot(station,cruise_profile, surrounding_profiles,ax,xlabel,ylabel,add_legend)
plt.show()
def gen_station_xyt(base_date,cruise_time,survey_file,station_file,xytfile):
filename = survey_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
def create_arg_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="cruise.py",
description=textwrap.dedent(
"""
Loop over a number of USGS polaris cruise water quality data in a folder, read observed salinity
data, generate station.xyt file and extract SCHISM model salinity from output nc files respectively.
Finally plot and compared observed and model transect salinity profile along the centerline took
by USGS polaris cruise.
Inputs: SCHISM model base time, a path containing SCHISM output files and a path
containing USGS polaris water quaility data files.
Outputs: A png files comparing observed and model salinity profile.
USGS polaris cruise data should have csv format like below:
Date,Time,Station Number,Depth,Salinity,Temperature
MM/DD/YYYY,24 hr.,,[meters],[psu],[°C]
6/22/2017,7:20,2,1,0.14,22.48
6/22/2017,7:20,2,2,0.13,22.48
6/22/2017,7:20,2,3,0.13,22.48
6/22/2017,7:20,2,4,0.13,22.48
......
Here is a example of command
python cruise.py --data_path ./ --start 04/18/2017 --schism_output_path I:\\itp\\hist_2017\\
Your system should include SCHISM postprocess tool path in the environment.
You can get help by typing $ cruise.py --help
"""))
parser.add_argument('--data_path', default=None,required=True,
help='path contains downloaded USGS crusier water quality data')
parser.add_argument('--start', type=str,required=True,
help='Starting date and time basis for SCHISM model output')
parser.add_argument('--schism_output_path', default=None,required=True,
help='path contains SCHISM output data')
return parser
if __name__== "__main__":
usgs_cruise_file_lst=[]
aug_parser = create_arg_parser()
args = aug_parser.parse_args()
data_folder=args.data_path
base_date=parser.parse(args.start)
schism_output_folder=args.schism_output_path
schism_vgrid_in=os.path.join(schism_output_folder,"vgrid.in")
if not(os.path.exists(schism_vgrid_in)):
raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), schism_vgrid_in)
schism_output_in=os.path.join(schism_output_folder,"read_output_xyt.in")
if not(os.path.exists(schism_output_in)):
raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), schism_output_in)
station_file="usgs_cruise_stations.csv"
if not(os.path.exists(os.path.join(data_folder,station_file))):
raise | |
# External Dependencies
from re import L
from PIL import ImageDraw, Image
from PIL.ImageOps import autocontrast
import hashlib
import math
import os
import time
# Internal file class dependencies
from . import View
from seedsigner.helpers import B, QR, Keyboard, TextEntryDisplay, mnemonic_generation
from seedsigner.models import DecodeQR, DecodeQRStatus, QRType, EncodeQR, Settings, Seed
from seedsigner.models import SeedStorage
class SeedToolsView(View):
ALPHABET = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
def __init__(self, disp, q) -> None:
View.__init__(self, disp, q)
# Gather words and seed display information
self.words = []
self.letters = []
self.possible_alphabet = []
self.possible_words = []
self.selected_possible_words_index = 0
self.seed_length = 12 # Default to 12, Valid values are 11, 12, 23 and 24
self.seed_qr_image = None
self.seed_entropy_image = None
# Dice information
self.roll_number = 1
self.dice_selected = 0
self.roll_data = ""
self.dice_seed_phrase = []
# Gather passphrase display information
self.passphrase = ""
self.pass_lower = "<PASSWORD>"
self.pass_upper = "<PASSWORD>GHIJKL<PASSWORD>"
self.pass_number = "<PASSWORD>"
self.pass_symbol = "!\"#$%&'()*+,=./;:<>?@[]|-_`~"
self.pass_letter = ""
self.pass_case_toggle = "lower"
###
### Display Gather Words Screen
###
def display_manual_seed_entry(self, num_words):
self.seed_length = num_words
self.reset()
cur_word = 1
while len(self.words) < self.seed_length:
initial_letters = ["a"]
if len(self.words) >= cur_word:
initial_letters = list(self.words[cur_word - 1]) # zero-indexed
ret_val = self.draw_seed_word_keyboard_entry(num_word=cur_word, initial_letters=initial_letters)
if ret_val == Keyboard.KEY_PREVIOUS_PAGE:
# Reload previous word
cur_word -= 1
if cur_word == 0:
# Exit this UI
return []
else:
# We've backed `cur_word` up, so re-enter loop
continue
if len(self.words) < cur_word:
self.words.append(ret_val.strip())
else:
self.words[cur_word - 1] = ret_val.strip()
cur_word += 1
return self.words
def draw_seed_word_keyboard_entry(self, num_word, initial_letters=["a"]):
def render_possible_matches(highlight_word=None):
""" Internal helper method to render the KEY 1, 2, 3 word candidates.
(has access to all vars in the parent's context)
"""
# Clear the right panel
View.draw.rectangle((keyboard_width, text_entry_display_height, View.canvas_width, View.canvas_height), fill="black")
if not self.possible_words:
return
row_height = 26
x = keyboard_width + 10
y = text_entry_display_height - int(row_height / 2)
highlighted_row = 3
num_possible_rows = 11
list_starting_index = self.selected_possible_words_index - highlighted_row
word_font = View.ROBOTOCONDENSED_REGULAR_22
for row, i in enumerate(range(list_starting_index, list_starting_index + num_possible_rows)):
if i < 0:
# We're near the top of the list, not enough items to fill above the highlighted row
continue
if row == highlighted_row:
# Leave the highlighted row to be rendered below
continue
if len(self.possible_words) <= i:
break
View.draw.text((x, y + row * row_height), self.possible_words[i], fill=View.color, font=word_font)
# Render the SELECT outline
if highlight_word:
fill_color = View.color
font_color = "black"
else:
fill_color = "#111"
font_color = View.color
View.draw.rounded_rectangle((keyboard_width + 4, y + (3 * row_height) - 2, 250, y + (4 * row_height) + 2), outline=View.color, fill=fill_color, radius=5, width=1)
if self.possible_words:
word_font = View.ROBOTOCONDENSED_BOLD_24
View.draw.text((x, y + 3 * row_height), self.possible_words[self.selected_possible_words_index], fill=font_color, font=word_font)
render_possible_matches_arrows()
def render_possible_matches_arrows():
# Render the up/down arrow buttons for KEY1 and KEY3
row_height = 26
arrow_button_width = 25
arrow_padding = 5
key_x = View.canvas_width - arrow_button_width
key_y = text_entry_display_height - int(row_height / 2) + int(0.75 * row_height)
background_color = "#111"
arrow_color = View.color
if arrow_up_is_active:
background_color = View.color
arrow_color = "#111"
View.draw.rounded_rectangle((key_x, key_y, 250, key_y + row_height), outline=View.color, fill=background_color, radius=5, width=1)
View.draw.polygon(
[(key_x + int(arrow_button_width)/2 + 1, key_y + arrow_padding), # centered top point
(View.canvas_width - arrow_padding + 1, key_y + row_height - arrow_padding), # bottom right point
(key_x + arrow_padding + 1, key_y + row_height - arrow_padding)], # bottom left point
fill=arrow_color
)
background_color = "#111"
arrow_color = View.color
if arrow_down_is_active:
background_color = View.color
arrow_color = "#111"
key_y = text_entry_display_height - int(row_height / 2) + int(5.25 * row_height)
View.draw.rounded_rectangle((key_x, key_y, 250, key_y + row_height), outline=View.color, fill=background_color, radius=5, width=1)
View.draw.polygon(
[(key_x + int(arrow_button_width)/2 + 1, key_y + row_height - arrow_padding), # bottom centered point
(View.canvas_width - arrow_padding + 1, key_y + arrow_padding), # right top point
(key_x + arrow_padding + 1, key_y + arrow_padding)], # left top point
fill=arrow_color
)
# Clear the screen
View.draw.rectangle((0,0, View.canvas_width,View.canvas_height), fill="black")
self.render_previous_button()
previous_button_is_active = False
arrow_up_is_active = False
arrow_down_is_active = False
# Have to ensure that we don't carry any effects from a previous run
# TODO: This shouldn't be a member var
self.possible_alphabet = "abcdefghijklmnopqrstuvwxyz"
# Set up the keyboard params
keyboard_width = 120
text_entry_display_height = 39
# TODO: support other BIP39 languages/charsets
keyboard = Keyboard(View.draw,
charset=self.possible_alphabet,
rows=5,
cols=6,
rect=(0,text_entry_display_height + 1, keyboard_width,240),
auto_wrap=[Keyboard.WRAP_LEFT, Keyboard.WRAP_RIGHT])
# Render the top text entry display
self.letters = initial_letters
text_entry_display = TextEntryDisplay(
View.draw,
rect=(self.previous_button_width,0, View.canvas_width,text_entry_display_height),
font=View.ROBOTOCONDENSED_BOLD_26,
font_color=View.color,
cur_text=f"{num_word}: " + "".join(self.letters)
)
text_entry_display.render()
# Initialize the current matches
self.possible_words = []
if len(self.letters) > 1:
self.letters.append(" ") # "Lock in" the last letter as if KEY_PRESS
self.calc_possible_alphabet()
keyboard.update_active_keys(active_keys=self.possible_alphabet)
keyboard.set_selected_key(selected_letter=self.letters[-2])
else:
keyboard.set_selected_key(selected_letter=self.letters[-1])
keyboard.render_keys()
render_possible_matches()
View.DispShowImage()
# Start the interactive update loop
while True:
input = View.buttons.wait_for(
[B.KEY_UP, B.KEY_DOWN, B.KEY_RIGHT, B.KEY_LEFT, B.KEY_PRESS, B.KEY1, B.KEY2, B.KEY3],
check_release=True,
release_keys=[B.KEY_PRESS, B.KEY2]
)
if previous_button_is_active:
if input == B.KEY_PRESS:
# User clicked the "back" arrow
return Keyboard.KEY_PREVIOUS_PAGE
elif input == B.KEY_UP:
input = Keyboard.ENTER_BOTTOM
# Re-render it without the highlight
previous_button_is_active = False
self.render_previous_button()
elif input == B.KEY_DOWN:
input = Keyboard.ENTER_TOP
# Re-render it without the highlight
previous_button_is_active = False
self.render_previous_button()
elif input in [B.KEY_RIGHT, B.KEY_LEFT]:
# no action in this context
continue
ret_val = keyboard.update_from_input(input)
if ret_val in Keyboard.EXIT_DIRECTIONS:
self.render_previous_button(highlight=True)
previous_button_is_active = True
elif ret_val in Keyboard.ADDITIONAL_KEYS:
if input == B.KEY_PRESS and ret_val == Keyboard.KEY_BACKSPACE["code"]:
self.letters = self.letters[:-2]
self.letters.append(" ")
# Reactivate keys after deleting last letter
self.calc_possible_alphabet()
keyboard.update_active_keys(active_keys=self.possible_alphabet)
keyboard.render_keys()
# Update the right-hand possible matches area
render_possible_matches()
elif ret_val == Keyboard.KEY_BACKSPACE["code"]:
# We're just hovering over DEL but haven't clicked. Show blank (" ")
# in the live text entry display at the top.
self.letters = self.letters[:-1]
self.letters.append(" ")
# Has the user made a final selection of a candidate word?
final_selection = None
if input == B.KEY1 and self.possible_words:
# Scroll the list up
self.selected_possible_words_index -= 1
if self.selected_possible_words_index < 0:
self.selected_possible_words_index = 0
if not arrow_up_is_active:
# Flash the up arrow as selected
arrow_up_is_active = True
# Update the right-hand possible matches area
render_possible_matches()
elif input == B.KEY2:
if self.possible_words:
final_selection = self.possible_words[self.selected_possible_words_index]
elif input == B.KEY3 and self.possible_words:
# Scroll the list down
self.selected_possible_words_index += 1
if self.selected_possible_words_index >= len(self.possible_words):
self.selected_possible_words_index = len(self.possible_words) - 1
if not arrow_down_is_active:
# Flash the down arrow as selected
arrow_down_is_active = True
# Update the right-hand possible matches area
render_possible_matches()
if input is not B.KEY1 and arrow_up_is_active:
# Deactivate the arrow and redraw
arrow_up_is_active = False
render_possible_matches_arrows()
if input is not B.KEY3 and arrow_down_is_active:
# Deactivate the arrow and redraw
arrow_down_is_active = False
render_possible_matches_arrows()
if final_selection:
# Animate the selection storage, then return the word to the caller
self.letters = list(final_selection + " ")
render_possible_matches(highlight_word=final_selection)
text_entry_display.render(f"{num_word}: " + "".join(self.letters))
View.DispShowImage()
return final_selection
elif input == B.KEY_PRESS and ret_val in self.possible_alphabet:
# User has locked in the current letter
if self.letters[-1] != " ":
# We'll save that locked in letter next but for now update the
# live text entry display with blank (" ") so that we don't try
# to autocalc matches against a second copy of the letter they
# just selected. e.g. They KEY_PRESS on "s" to build "mus". If
# we advance the live block cursor AND display "s" in it, the
# current word would then be "muss" with no matches. If "mus"
# can get us to our match, we don't want it to disappear right
# as we KEY_PRESS.
self.letters.append(" ")
else:
# clicked same letter twice in a row. Because of the above, an
# immediate second click of the same letter would lock in "ap "
# (note the space) instead of "app". So we replace that trailing
# space with the correct repeated letter and then, as above,
# append a trailing blank.
self.letters = self.letters[:-1]
self.letters.append(ret_val)
self.letters.append(" ")
# Recalc and deactivate keys after advancing
self.calc_possible_alphabet()
keyboard.update_active_keys(active_keys=self.possible_alphabet)
# Update the right-hand possible matches area
render_possible_matches()
if len(self.possible_alphabet) == 1:
# If | |
import math
import cv2
import numpy as np
import scipy.ndimage.morphology as sc_morph
import skimage.color as sk_color
import skimage.exposure as sk_exposure
import skimage.feature as sk_feature
import skimage.filters as sk_filters
import skimage.morphology as sk_morphology
def rgb_to_grayscale(np_img):
"""
Convert an RGB NumPy array to a grayscale NumPy array.
Shape (h, w, c) to (h, w).
Args:
np_img: RGB Image as a NumPy array.
Returns:
Grayscale image as NumPy array with shape (h, w).
"""
# Another common RGB ratio possibility: [0.299, 0.587, 0.114]
grayscale = np.dot(np_img[..., :3], [0.2125, 0.7154, 0.0721])
grayscale = grayscale.astype(np.uint8)
return grayscale
def obtain_complement(np_img):
"""
Obtain the complement of an image as a NumPy array.
Args:
np_img: Image as a NumPy array.
Returns:
Complement image as Numpy array.
"""
return 255 - np_img
def filter_black_to_white(rgb):
r = rgb[:, :, 0] == 0
g = rgb[:, :, 1] == 0
b = rgb[:, :, 2] == 0
result = r & g & b
rgb[result] = 255
return rgb
def filter_hysteresis_threshold(np_img, low=50, high=100):
"""
Apply two-level (hysteresis) threshold to an image as a NumPy array, returning a binary image.
Args:
np_img: Image as a NumPy array.
low: Low threshold.
high: High threshold.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a pixel above hysteresis threshold.
"""
hyst = sk_filters.apply_hysteresis_threshold(np_img, low, high)
hyst = (255 * hyst).astype(np.uint8)
return hyst
def filter_otsu_threshold(np_img):
"""
Compute Otsu threshold on image as a NumPy array and return binary image based on pixels above threshold.
Args:
np_img: Image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a pixel above Otsu threshold.
"""
otsu_thresh_value = sk_filters.threshold_otsu(np_img)
otsu = np_img > otsu_thresh_value
otsu = otsu.astype(np.uint8) * 255
return otsu
def filter_local_otsu_threshold(np_img, disk_size=3):
"""
Compute local Otsu threshold for each pixel and return binary image based on pixels being less than the
local Otsu threshold.
Args:
np_img: Image as a NumPy array.
disk_size: Radius of the disk structring element used to compute the Otsu threshold for each pixel.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where local Otsu threshold values have been applied to original image.
"""
local_otsu = sk_filters.rank.otsu(np_img, sk_morphology.disk(disk_size))
local_otsu = local_otsu.astype(np.uint8) * 255
return local_otsu
def filter_closing(np_img, kernel_size, iterations=1):
"""
Close a RGB Image, Closing is an dilation followed by erosion.
Closing can be used to remove small holes.
Args:
np_img: rgb image as a numpy array.
kernel_size: size of kernel which is convolved with the image (should be odd)
iterations: How many times to repeat.
"""
kernel = np.ones((kernel_size, kernel_size))
result = cv2.morphologyEx(np_img, cv2.MORPH_CLOSE, kernel, iterations=iterations)
return result
def filter_binary_closing(np_img, disk_size=3, iterations=1, output_type="uint8"):
"""
Close a binary object (bool, float, or uint8). Closing is a dilation followed by an erosion.
Closing can be used to remove small holes.
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for closing.
iterations: How many times to repeat.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) following binary closing.
"""
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_closing(
np_img, sk_morphology.disk(disk_size), iterations=iterations
)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
def filter_opening(np_img, kernel_size=3, iterations=1):
"""
Open a RGB Image, Opening is an erosion followed by dilation.
Opening can be used to remove small objects
Args:
np_img: rgb image as a numpy array.
kernel_size: size of kernel which is convolved with the image (should be odd)
iterations: How many times to repeat.
"""
kernel = np.ones((kernel_size, kernel_size))
result = cv2.morphologyEx(np_img, cv2.MORPH_OPEN, kernel, iterations=iterations)
return result
def filter_binary_opening(np_img, disk_size=3, iterations=1, output_type="uint8"):
"""
Open a binary object (bool, float, or uint8). Opening is an erosion followed by a dilation.
Opening can be used to remove small objects.
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for opening.
iterations: How many times to repeat.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) following binary opening.
"""
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_opening(
np_img, sk_morphology.disk(disk_size), iterations=iterations
)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
def filter_entropy(np_img, neighborhood=9, threshold=5):
"""
Filter image based on entropy (complexity).
Args:
np_img: Image as a NumPy array.
neighborhood: Neighborhood size (defines height and width of 2D array of 1's).
threshold: Threshold value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a measure of complexity.
"""
entr = (
sk_filters.rank.entropy(np_img, np.ones((neighborhood, neighborhood))) > threshold
)
entr = entr.astype(np.uint8) * 255
return entr
def filter_canny(np_img, sigma=1, low_threshold=0, high_threshold=25):
"""
Filter image based on Canny algorithm edges.
Args:
np_img: Image as a NumPy array.
sigma: Width (std dev) of Gaussian.
low_threshold: Low hysteresis threshold value.
high_threshold: High hysteresis threshold value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) representing Canny edge map (binary image).
"""
can = sk_feature.canny(
np_img, sigma=sigma, low_threshold=low_threshold, high_threshold=high_threshold
)
can = can.astype(np.uint8) * 255
return can
def filter_contrast_stretch(np_img, low=40, high=60):
"""
Filter image (gray or RGB) using contrast stretching to increase contrast in image based on the intensities in
a specified range.
Args:
np_img: Image as a NumPy array (gray or RGB).
low: Range low value (0 to 255).
high: Range high value (0 to 255).
Returns:
Image as NumPy array with contrast enhanced.
"""
low_p, high_p = np.percentile(np_img, (low * 100 / 255, high * 100 / 255))
cons_stretch = sk_exposure.rescale_intensity(np_img, in_range=(low_p, high_p))
return cons_stretch
def filter_histogram_equalization(np_img, nbins=256):
"""
Filter image (gray or RGB) using histogram equalization to increase contrast in image.
Args:
np_img: Image as a NumPy array (gray or RGB).
nbins: Number of histogram bins.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with contrast enhanced by histogram equalization.
"""
# if uint8 type and nbins is specified, convert to float so that nbins can be a value besides 256
if np_img.dtype is np.uint8 and nbins != 256:
np_img = np_img / 255
hist_equ = sk_exposure.equalize_hist(np_img, nbins=nbins)
hist_equ = (hist_equ * 255).astype(np.uint8)
return hist_equ
def filter_adaptive_equalization(np_img, nbins=256, clip_limit=0.01):
"""
Filter image (gray or RGB) using adaptive equalization to increase contrast in image, where contrast in local regions
is enhanced.
Args:
np_img: Image as a NumPy array (gray or RGB).
nbins: Number of histogram bins.
clip_limit: Clipping limit where higher value increases contrast.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with contrast enhanced by adaptive equalization.
"""
adapt_equ = sk_exposure.equalize_adapthist(np_img, nbins=nbins, clip_limit=clip_limit)
adapt_equ = (adapt_equ * 255).astype(np.uint8)
return adapt_equ
def filter_local_equalization(np_img, disk_size=50):
"""
Filter image (gray) using local equalization, which uses local histograms based on the disk structuring element.
Args:
np_img: Image as a NumPy array.
disk_size: Radius of the disk structuring element used for the local histograms
Returns:
NumPy array with contrast enhanced using local equalization.
"""
local_equ = sk_filters.rank.equalize(np_img, selem=sk_morphology.disk(disk_size))
return local_equ
def filter_rgb_to_hed(np_img):
"""
Filter RGB channels to HED (Hematoxylin - Eosin - Diaminobenzidine) channels.
Args:
np_img: RGB image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with HED channels.
"""
hed = sk_color.rgb2hed(np_img)
hed = (sk_exposure.rescale_intensity(hed, out_range=(0, 255))).astype(np.uint8)
return hed
def filter_rgb_to_hsv(np_img):
"""
Filter RGB channels to HSV (Hue, Saturation, Value).
Args:
np_img: RGB image as a NumPy array.
display_np_info: If True, display NumPy array info and filter time.
Returns:
Image as NumPy array in HSV representation.
"""
hsv = sk_color.rgb2hsv(np_img)
return hsv
def filter_hsv_to_h(hsv):
"""
Obtain | |
<reponame>sflis/plms
import os
import sys
import atexit
from subprocess import call
import subprocess
from multiprocessing import Process
import zmq
import time
from time import gmtime, strftime
from datetime import datetime
import inspect
import math
import collections
import pickle
from daemon import Daemon
import utils
from utils import parse, bcolors
from job import Job, job_process
from message import Message, RetMessage
#===================================================================================================
#++++++Class: Server++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#===================================================================================================
class PLMSServer(Daemon):
''' The PLMSServer (python local micro scheduler server) is a simple scheduler class which handles
job scheduling on local machines through communications
whith the a client.
'''
PMLSconf = collections.namedtuple("PMLSconf","tcp_addr, tcp_port, logs_path, n_proc_limit, time_limit, load_state, socket_path")
def __init__(self,
scheduler_name ,#name of the scheduler
conf_path ,#path to the configuration file
conf = None #configuration if no configuration file exists yet
):
import socket
#Getting and setting host name and scheduler name
self.host = socket.gethostname()
self.scheduler_name = scheduler_name
self.configure_file = os.path.join(conf_path, scheduler_name+".conf")
#initializing log output buffer (a string)
self.log_output = ""
self.hold_output = True
self.version_major = utils.VERSION_MAJOR
self.version_minor = utils.VERSION_MINOR
self.version_patch = utils.VERSION_PATCH
#if no configuration is given and a configuration file is found
#the configuration is read from the conf file.
if(os.path.isfile(self.configure_file) and conf == None):
self.log("Found configure file, loading configuration")
conf_file = open(self.configure_file,'r')
conf = conf_file.readlines()
socket_path = utils.parse(conf, "socket_path")
logs_path = utils.parse(conf, "logs_path")
self.n_proc_limit = int(utils.parse(conf, "n_proc_limit"))
self.proc_time_limit = int(utils.parse(conf, "proc_time_limit"))
self.tcp_addr = utils.parse(conf, "tcp_address")
self.tcp_port = utils.parse(conf, "tcp_port")
if(utils.parse(conf, "load_state") == "True" or utils.parse(conf, "load_state") == "true"):
init = True
else:
init = False
else:
if(conf == None):
print("No previous configuration found or given. Please provide PMLS configuration")
raise RuntimeError("No previous configuration found or given. Please provide PMLS configuration")
self.log("No previous configuration. Generating default configuration...")
self.n_proc_limit = conf.n_proc_limit
self.proc_time_limit = conf.time_limit
self.tcp_addr = conf.tcp_addr
self.tcp_port = conf.tcp_port
logs_path = conf.logs_path
socket_path = conf.socket_path
init = False
f = open(self.configure_file,'w')
f.write("#Micro python scheduler configuration file \n")
f.write("#This file was created automatically when the scheduler with this name was\n")
f.write("#was started for the first time. This file will be read each time the \n")
f.write("#scheduler is started and the settings will be configured from this file. \n")
f.write("tcp_address: %s\n"%conf.tcp_addr)
f.write("tcp_port: %s\n"%conf.tcp_port)
f.write("socket_path: %s\n"%conf.socket_path)
f.write("logs_path: %s\n"%conf.logs_path)
f.write("n_proc_limit: %d\n"%conf.n_proc_limit)
f.write("proc_time_limit: %d\n"%conf.time_limit)
f.write("load_state: %s\n"%conf.load_state)
#self.client_socket_name = socket_path+"/pmls_client_"+scheduler_name
#path to an ipc socket for communications with the running jobs
self.job_socket_name = socket_path+"/plms_job_"+scheduler_name+"_at_"+self.host
self.job_start_socket_name = socket_path+"/plms_job_start_"+scheduler_name+"_at_"+self.host
#path to the file which saves the state of the scheduler server when it
#shuts down.
self.statistics_file = conf_path+"/plms_stat_"+scheduler_name+".pkl"
self.client_socket_name = socket_path+"/plms_client_"+scheduler_name+"_at_"+self.host
self.default_log_path = os.path.join(logs_path,scheduler_name+'/')
utils.ensure_dir(self.default_log_path)
#Deamonizing the server
Daemon.__init__(self, '/tmp/plms_'+scheduler_name+'.pid',
stdout=conf_path+"/"+scheduler_name+".log",
stderr=conf_path+"/"+scheduler_name+".log"
)
self.queue = list()
self.jobs = list()
self.finished_jobs = list()
self.job_finish_status = list()
self.all_jobs = dict()
self.id_count = 0
self.quit = False
self.logging = True
self.commands = {'SUBMIT_JOBS' :self.command_SUBMIT_JOBS,
'CONFIGURE' :self.command_CONFIGURE,
'REMOVE_JOBS' :self.command_REMOVE_JOBS,
'STOP' :self.command_STOP,
'AVG_LOAD' :self.command_AVG_LOAD,
'PING' :self.command_PING,
'REQUEST_JOBS' :self.command_REQUEST_JOBS,
}
if(init):
self.load_state()
atexit.register(self.finish)
#___________________________________________________________________________________________________
def load_state(self):
''' Loads the state of the scheduler from a previous session.
'''
self.log("Loading scheduler state")
if(not os.path.isfile(self.statistics_file)):
self.log("No state file found...")
self.log("Failed to initiate previus scheduler state...")
return
self.log("loading previous state from %s"%self.statistics_file)
state = pickle.load(open(self.statistics_file))
self.queue = state["queue"]
self.finished_jobs = state["finished"]
self.id_count = state["id_count"]
if("all_jobs" in state.keys()):
self.all_jobs = state["all_jobs"]
#___________________________________________________________________________________________________
def init_sockets(self):
''' Initializes and binds to sockets for client-server communication and
job-server communication (ipc sockets).
'''
self.log("Initilizing sockets")
self.context = zmq.Context()
self.client_socket = self.context.socket(zmq.REP)
self.job_socket = self.context.socket(zmq.REP)
self.job_start_socket = self.context.socket(zmq.REP)
self.log("Binding to client socket: ipc://%s"%(self.client_socket_name))
self.client_socket.bind("ipc://%s"%(self.client_socket_name))
self.log("Binding to jobb socket: ipc://"+self.job_socket_name)
self.job_socket.bind("ipc://"+self.job_socket_name)
self.log("Binding to jobb start socket: ipc://"+self.job_start_socket_name)
self.job_start_socket.bind("ipc://"+self.job_start_socket_name)
#___________________________________________________________________________________________________
def command_SUBMIT_JOBS(self, msg):
''' Processes and submits a list of jobs.
'''
return_msg = RetMessage(server = self, status = "SUCCES")
ids = self.parse_job_submit_list(msg)
if(len(ids)>0):
self.log("Submited %d jobs"%len(ids))
else:
return_msg.status = "FAIL\n"
self.log("Failed to submit jobs")
return_msg.msg['job_ids'] = ids
return return_msg
#___________________________________________________________________________________________________
def command_REQUEST_QUEUE(self, msg):
pass
#___________________________________________________________________________________________________
def command_CONFIGURE(self, msg):
''' Processes configuration commands to the scheduler
server.
'''
return_msg = RetMessage(server = self,status = "SUCCES")
if(msg.opt[0] == "NPROC"):
if(msg.msg["n-proc"] != None):
self.n_proc_limit = int(msg.msg["n-proc"])
return_msg.msg['n-proc'] = self.n_proc_limit
else:
self.log("Failed to configure: unrecongnized option %s"%msg.opt[0])
return_msg.status = "FAIL\n"
return return_msg
#___________________________________________________________________________________________________
def command_REMOVE_JOBS(self, msg):
if(msg.opt[0] == "ALL"):
n = self.remove_jobs(None, msg.msg["job_ids"])
return_msg = "SUCCESS\n"
return_msg = str(n)
elif(msg.opt[0] == "LIST"):
n = self.remove_jobs(msg.msg["job_ids"], msg.user)
return_msg = "SUCCESS\n"
return_msg = str(n)
else:
return_msg = "FAIL\n"
return return_msg
#___________________________________________________________________________________________________
def command_AVG_LOAD(self, msg):
f = open("/proc/loadavg",'r')
return f.read()
#___________________________________________________________________________________________________
def command_STOP(self, msg):
''' Processes the stop command message
'''
return_msg = RetMessage(server = self,status = "SUCCES")
ids = list()
#Getting job ids of the running jobs
for j in self.jobs:
ids.append(j[1].id)
#Stopping the scheduler 'NOW' termiates any running jobs
if(msg.opt[0] == "NOW"):
n = self.remove_jobs(ids, "unkown")
return_msg.msg['msg'] = "Stopping scheduler..."
return_msg.msg['job_ids'] = ids
self.log("Stopping scheduler now!")
self.quit = True
#Stopping the scheduler 'GENTLE' exits the scheduler when the last running job stops.
elif(msg.opt[0] == "GENTLE"):
return_msg.msg['msg'] = "Stopping scheduler gently..."
return_msg.msg['job_ids'] = ids
self.quit = True
self.log("Stopping scheduler gently.")
else:
return_msg.status = "FAIL"
return_msg.error = "Unknown command"
return return_msg
#___________________________________________________________________________________________________
def command_PING(self, msg):
return_msg = "SUCCESS\n"
return_msg +=self.scheduler_name+"\n"
return_msg +=self.host+"\n"
return return_msg
#___________________________________________________________________________________________________
def command_REQUEST_JOBS(self, msg):
'''Returns a message of the requested job or a list of requested jobs.
'''
return_msg = RetMessage(server = self,status = "SUCCES")
if(msg.opt == None):
return_msg.msg['jobs'] = self.all_jobs
elif(msg.opt[0] in self.all_jobs.keys()):
return_msg.msg['jobs'] = self.all_jobs[msg.opt[0]]
else:
return_msg.status = "FAIL"
return_msg.msg["error"] = "Job id %d not found"%msg.opt[0]
return return_msg
#___________________________________________________________________________________________________
def recv_commands(self):
'''
Recieves requests from the client through the ipc socket
'''
# If no message waits recv will throw an exception
try:
msg = self.client_socket.recv_pyobj(flags=zmq.DONTWAIT)
except:
return
self.log("Recieved command from client: %s"%msg.cmd)
if(msg.cmd in self.commands.keys()):
return_msg = self.commands[msg.cmd](msg)
else:
return_msg = "FAIL\n"
self.log("Returning message to client")
self.client_socket.send_pyobj(return_msg)
#___________________________________________________________________________________________________
def parse_job_submit_list(self, msg):
self.log("Parsing job submit list")
ids = list()
if(msg.opt[0] == 'SIMPLE'):
for j in msg.msg["cmd_list"]:
ids.append(self.add_job(j, msg.user,
self.default_log_path + str(self.id_count)+".out",
self.default_log_path + str(self.id_count)+".err",
env = msg.msg["env"],
current_dir = msg.msg["wdir"],
shell = msg.msg["shell"]))
elif(msg.opt[0] == 'SIMPLE_LOG'):
log_out_path = msg.msg["log_out_path"]
log_err_path = msg.msg["log_err_path"]
for j in msg_dict["cmd_list"]:
ids.append(self.add_job(j, msg.user,
log_out_path + str(self.id_count)+".out",
log_err_path + str(self.id_count)+".err",
env = msg.msg["env"],
current_dir = msg.msg["current_dir"],
shell = msg.msg["shell"]))
elif(msg.opt[0] == 'JOB_DESCRIPTION'):
log_out = msg.msg["outlog"]
log_err = msg.msg["errlog"]
cmd = msg.msg["executable"]
cmd += " "+msg.msg["args"]
ids.append(self.add_job(cmd, msg.user, log_out, log_err, env = msg.msg["env"], shell = msg.msg["shell"]))
return ids
#___________________________________________________________________________________________________
def log(self, msg):
'''This function provides basic log functionallity for the server'''
import ntpath
frame,filename,line_number,function_name,lines,index=\
inspect.getouterframes(inspect.currentframe())[1]
s = datetime.now().strftime('%Y-%m-%d %H:%M:%S')+" %s:%d in %s : %s"%(ntpath.basename(filename),line_number,function_name,msg)
if(self.hold_output):
self.log_output += s+"\n"
else:
print(s)
#___________________________________________________________________________________________________
def add_job(self, cmd, user, log_out , log_err, env, current_dir = None, shell = False ):
job = Job(self.id_count,
cmd,
time.localtime(),
user,
log_out,
log_err,
env,
'',
current_dir,
shell)
self.queue.append(job)
self.all_jobs[self.id_count] = job
self.id_count +=1
return self.id_count-1
#___________________________________________________________________________________________________
def remove_jobs(self, ids, user):
n_jobs_removed = 0
terminated = list()
removed = list()
not_removed = list()
if(ids == None):
n_jobs_removed = len(self.queue)
for j in self.queue:
j.status = "removed"
j.end_time = time.time()
j.cpu_time = float("nan")
self.queue = list()
for j in self.jobs:
j[0].terminate()
j[1].status = "terminated"
j[1].end_time = time.time()
j[1].cpu_time = float("nan")
self.finished_jobs.append(j[1])
n_jobs_removed += 1
self.log("Removed job %d"%j[1].id)
self.jobs = list()
else:
queue = list()
for j in self.queue:
if(j.id not in ids):
queue.append(j)
else:
j.status = "removed"
j.end_time = time.time()
j.cpu_time = float("nan")
self.finished_jobs.append(j)
n_jobs_removed +=1
self.queue = queue
jobs = list()
for j in self.jobs:
if(j[1].id not in ids):
jobs.append(j)
else:
#Sending SIGTERM signal to job
j[0].terminate()
#To avoid zombie processes we aslo join the job process
j[0].join()
j[1].status = "terminated"
j[1].end_time = time.time()
j[1].cpu_time = float("nan")
self.finished_jobs.append(j[1])
#self.jobs.remove(j)
n_jobs_removed +=1
self.log("Removed job %d"%j[1].id)
self.jobs = jobs
return n_jobs_removed
#___________________________________________________________________________________________________
def pause_jobs(self, ids, user):
jobs2pause = list()
for j in list(self.jobs):
if(j.id in ids):
pass
#___________________________________________________________________________________________________
def shuffle_queue(self):
pass
#___________________________________________________________________________________________________
def check_jobs(self):
'''
Checks if the jobs in the job list are running and starts new jobs from the queue
when avaiable job slots open.
'''
job_message = ""
while(True):
# If no message waits recv will throw an exception
try:
message = self.job_socket.recv(flags=zmq.DONTWAIT)
except:
break
#send back an 'OK'
self.job_socket.send("OK")
lines = message.splitlines()
#print(lines)
job_id = int(lines[0])
self.job_finish_status += [(job_id,lines[1:])]
#Removing finished jobs from running list
for j in self.jobs:
for s in self.job_finish_status:
if(not j[0].is_alive() and j[1].id == s[0]):
#print(self.job_finish_status)
#print(j[1].end_time)
j[1].exit_status = int(parse(s[1], "Status"))
| |
<reponame>SidneyCodes/textgames
#importing the time module
import time
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("\tA JOURNEY OF OPTIONS")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
coin = 0
life = 7
briefcase = []
line = ""
def intro(): #defining a function with name intro
print("What is your first name?") #user input firstname
firstname = input(">>> ")
print("What is your last name?") #user input last name
lastname = input(">>> ")
print("")
welcome(firstname,lastname) #calling the last function
def welcome(firstname,lastname): #defining function with arguments
print("To " + firstname +" "+lastname+", ")
print("")
print("We are pleased to inform you that you have been accepted in Hogwarts School of Wizcraft and Wizardy \U0001f600.")
time.sleep(1)
print("\nWe have sent you a list of neccessary equipments that you should carry. ")
time.sleep(1)
print("\nYou have to board the train at Station from Platform 9 3/4")
time.sleep(1)
print("\nI look forward to see you in the campus.")
time.sleep(1)
print("")
print("\nFrom : ")
time.sleep(1)
print("Deputy Headmistress")
time.sleep(1)
print("Hogwarts School of Witchcraft and Wizardry")
time.sleep(0.5)
print("")
print("Do you accept? (y/n)") #user input for choice
accept = input(">>> ")
if accept == "y": #if y go to next function
Stage1(firstname)
elif accept == "n":
Misschance()
def Misschance(): #end function
sure = input("Are you sure? y/n ")
if sure == "n":
Stage1()
else:
print("So you don't want to go to Hogwarts, huh? You miss a lifetime experience.")
Exit()
def Exit(): #exit function
print("Are you sure you want to exit the game? (y/n)")
exit = input(">>> ")
time.sleep(2)
if exit == 'y':
print("Come back again if you change your mind. Bye. ")
print("\U0001f600 \U0001f600 \U0001f600 \U0001f600 \U0001f600 \U0001f600") #assigning emoji
else:
intro()
def Stage1(firstname):
print("")
print("Great! Welcome aboard " + firstname + " now that you have accepted the offer, ")
time.sleep(2)
print("you need to earn coins for your school shopping.")
time.sleep(2)
print("You can get some magic coins by fulfilling different goals and missions in order to receive rewards.")
time.sleep(2)
print("These rewards can be earned by completing a Mystry test")
time.sleep(2)
print("For each correct answer you will earn 20 magic coins which you will use for your shopping.")
time.sleep(1.5)
print("Are you ready for the test? (y/n)")
accept = input(">>> ")
if accept == "y":
game1(coin)
elif accept == "n":
print("\n You cannot proceed further without taking the test?")
time.sleep(1.5)
print("So, are you ready for the test? (y/n)")
accept = input(">>> ")
game1(coin)
def game1(coin):
print("Answer the statement in true or false: ")
print ("\n In space, you cannot cry.")
choice = input(">>> ")
if choice == 'true':
coin += 20 #If correct, the user gets twenty coin
else:
coin += 0
print ("\nThere is no word that rhymes with Orange.")
choice = input(">>> ")
if choice == 'true':
coin += 20
else:
coin += 0
print ("\nA sneeze is faster than an eye blink")
choice = input(">>> ")
if choice == 'true':
coin += 20
print ("\nFingernails grow faster than hair")
choice = input(">>> ")
if choice == 'false':
coin += 20
print ("\nA rabbit eats its own poops")
choice = input(">>> ")
if choice == 'true':
coin += 20
else:
coin += 0
print ("\nBravo you have finished the test. You got " + str(coin) + " coins out of 100")
time.sleep(1)
print("Here is a wallet for you to hold the coins.")
time.sleep(2)
print(f" \n Happy Shopping! ")
print("\U0001F911 \U0001F911 \U0001F911 \U0001F911 \U0001F911 \U0001F911 ")
briefcase.append('wallet')
time.sleep (1.5)
Shop1()
def Shop1(): #to buy wand
print("\nYou travel to Diagon Alley to do your school shopping.")
time.sleep(0.8)
print("Do you know where to find your wand?")
print("")
print("A. Weasley's Shop")
print("B. Gamphor's Wands")
print("C. Ollivanders: Makers of Fine Wands since 305 B.C.")
print("")
wandshop = input("A, B, or C? ")
if wandshop == "A":
print("\nOops!! Weasley's Shop isn't in Diagon Alley \U0001F606 ")
time.sleep(3)
Shop1()
elif wandshop == "B":
print("\nGamphore's Wands isn't in Diagon Alley, Silly!")
time.sleep(3)
Shop1()
elif wandshop == "C":
print("You're headed on the right track to be a great witch or wizard.")
print("\U0001f600")
time.sleep(3)
Wand(coin)
#red is a comment
def Wand(coin):
print("Welcome to Ollivander's Wand Shop!!!")
print("")
print("Are you most:")
print("A. Kind and Generous")
print("B. Tenacious")
print("C. A good leader")
print("D. Full of life")
print("")
wood = input(">>> ")
print("")
if wood == "A":
print("You recieve an Ash Wand, 13 4/17 inches, with a Unicorn Tail Hair core.")
print("")
briefcase.append('wand')
coin += -10
print(f"You spent 10 coins on your wand. \nYou have {briefcase} in your briefcase now.")
time.sleep(1)
Shop2()
elif wood == "B":
print("You receive an Ivy Wand, 9 2/9 inches, with a Dragon Heartstring core.")
print("")
briefcase.append('wand')
print(f"You spent 10 coins on your wand . You have {briefcase} in your briefcase now.")
time.sleep(1)
Shop2()
elif wood == "C":
print("You receive a Holly Wand, 11 1/2 inches, with a Phoenix Feather core.")
print("")
briefcase.append('wand')
print(f"You spent 10 coins on your wand . You have {briefcase} in your briefcase now.")
time.sleep(1)
Shop2()
elif wood == "D":
print("You receive a Birch Wand, 10 3/4 inches, with a Dragon Heartstring core.")
print("")
time.sleep(5)
briefcase.append('wand')
print(f"You spent 10 coins on your wand . You have {briefcase} in your briefcase now.")
time.sleep(1)
Shop2()
def Shop2():
print("\nWhere do you want to go next?")
time.sleep(2)
print("A. To buy uniform")
print("B. To buy books")
print("C. Go to Platform")
shop = input(">>>")
if shop == "A":
Uniform(coin)
elif shop == "B":
print("You will get all your books at Hogwarts Library.")
Shop2()
else:
Station()
def Uniform(coin):
print("\nWelcome to Madam Malkin's Robes for All Occasions Shop.")
print("")
print("\nAre you a first year student at Hogwarts? (y/n)")
time.sleep(0.5)
choice = input(">>>")
if choice == "y":
print("Perfect! Let me get you your uniform.")
time.sleep(1.5)
print("Here it is:\nOne winter cloak.\nThree sets of plain work robes.\nOne plain pointed hat for day wear.\nOne pair of protective gloves.")
time.sleep(1)
briefcase.append('uniform')
print(f"You spent 20 coins in your uniform! Now you have {briefcase} in your briefcase.")
time.sleep(5)
Station()
else:
print("Dont lie to her! ")
Uniform(coin)
def Station():
print("\n Now you need to get onto the platform")
print("")
print("\n \t WELCOME TO PUBLIC STATION.")
time.sleep(1)
print("")
print("\nDo you know how to get to Platform 9 3/4?")
print("A. Cast a spell to reach Hogwarts")
print("B. Run at the barrier between platforms nine and ten")
print("C. Fly a car to Hogwarts.")
print("")
platform = input(">>> ")
if platform == "A":
print(f"You have not learn to cast a spell yet. Silly!")
time.sleep(1.5)
Station()
elif platform == "B":
print("Wow, that was a nice run.You make it onto the platform.")
time.sleep(1)
print("Get ready to board the train.")
time.sleep(5)
HogwartsExpress()
elif platform == "C":
print("You don't own a flying car yet. Sorry!")
time.sleep(1.5)
Station()
def HogwartsExpress():
print("\n\tHOGWARTS EXPRESS")
print("\nHogwarts Express welcomes all the students on board. Safe travels!! ")
print("\nYou sit in a compartment and make friends with a boy and a girl who call themselves Luna and Felix.")
time.sleep(2.564)
print(f"You introduce yourself and starts talking with them")
time.sleep(1.78)
print(f"\n Felix: Hello, nice meeting you. Which Hogwarts house do you want to be? ")
time.sleep(1.876)
print("\nYou answer....")
print("<NAME>, bravest of the Hogwarts Four")
print("<NAME>, fairest of the Hogwarts Four")
print("<NAME>, clever of the Hogwarts Four")
print("<NAME>, greatest of the Hogwarts four")
print("")
houses = input("A, B, C or D? >>>")
if houses == "A" or "B" or "C" or "D":
print("Luna: Sounds cool.Now I can't wait to reach Hogwarts. ")
time.sleep(2.87654)
Sortinghat()
def Sortinghat():
print("\nIn the misty moonlight, you reach the beautiful Great Hall")
time.sleep(2.456)
print("of Hogwarts for the Welcoming Feast.")
time.sleep(4)
print("The room is filled with Hogwarts students, Hogwarts Staff and Hogwarts Ghost.")
time.sleep(4)
print(".... and there you hear the voice of an man speaking.")
time.sleep(4)
print("\n<NAME>: The very best of evenings to you!")
time.sleep(3)
print("Welcome to our new students, welcome, to our old students!")
time.sleep(3)
print("Another year full of magical education awaits you.")
time.sleep(3)
print("\nLET THE SORTING HAT CEREMONY BEGIN!!!")
print("")
time.sleep(3)
print("________________________________________")
print("\nThe ceremony's purpose is to assign first years to one of the four")
print("school Houses: Gryffindor, Hufflepuff, Ravenclaw, or Slytherin.")
print("It is done through the use of the famous Hogwarts Sorting Hat. The Sorting Hat's ")
print("decision is final.")
print("_________________________________________")
time.sleep(5)
print("The Sorting Hat started calling out students names.")
time.sleep(2)
print("\n")
print("")
print("<NAME>")
print("Hat: Griffindor!")
time.sleep(3)
print("\n")
print("<NAME>")
time.sleep(2.6785)
print("Hat: Ravenclaw!")
print("")
time.sleep(1.34)
print(f"Hat: Hello there ")
time.sleep(3)
print("Do you pride yourself on your")
print("")
print("A. Benevolence")
print("B. Intelligence")
print("C. Courageousness")
print("D. Knavishness")
print("")
pride = input(">>> ")
if pride == "A":
house = "Hufflepuff!"
elif pride == "B":
house = "Ravenclaw!"
elif pride == "C":
house | |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import time
from datetime import datetime
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from desktop.conf import TIME_ZONE
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str, smart_unicode
from desktop.lib.rest.http_client import RestException
from desktop.lib.view_util import format_duration_in_millis
from desktop.log.access import access_warn
from desktop.models import Document, Document2
from hadoop.fs.hadoopfs import Hdfs
from liboozie.oozie_api import get_oozie
from liboozie.credentials import Credentials
from liboozie.submission2 import Submission
from oozie.conf import OOZIE_JOBS_COUNT, ENABLE_CRON_SCHEDULING, ENABLE_V2
from oozie.forms import RerunForm, ParameterForm, RerunCoordForm, RerunBundleForm, UpdateCoordinatorForm
from oozie.models import Workflow as OldWorkflow, Job, utc_datetime_format, Bundle, Coordinator, get_link, History as OldHistory
from oozie.models2 import History, Workflow, WORKFLOW_NODE_PROPERTIES
from oozie.settings import DJANGO_APPS
from oozie.utils import convert_to_server_timezone
def get_history():
if ENABLE_V2.get():
return History
else:
return OldHistory
def get_workflow():
if ENABLE_V2.get():
return Workflow
else:
return OldWorkflow
LOG = logging.getLogger(__name__)
"""
Permissions:
A Workflow/Coordinator/Bundle can:
* be accessed only by its owner or a superuser or by a user with 'dashboard_jobs_access' permissions
* be submitted/modified only by its owner or a superuser
Permissions checking happens by calling:
* check_job_access_permission()
* check_job_edition_permission()
"""
def _get_workflows(user):
return [{
'name': workflow.name,
'owner': workflow.owner.username,
'value': workflow.uuid,
'id': workflow.id
} for workflow in [d.content_object for d in Document.objects.get_docs(user, Document2, extra='workflow2')]
]
def manage_oozie_jobs(request, job_id, action):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage an Oozie job.'))
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
response = {'status': -1, 'data': ''}
try:
oozie_api = get_oozie(request.user)
params = None
if action == 'change':
pause_time_val = request.POST.get('pause_time')
if request.POST.get('clear_pause_time') == 'true':
pause_time_val = ''
end_time_val = request.POST.get('end_time')
if end_time_val:
end_time_val = convert_to_server_timezone(end_time_val, TIME_ZONE.get())
if pause_time_val:
pause_time_val = convert_to_server_timezone(pause_time_val, TIME_ZONE.get())
params = {'value': 'endtime=%s' % (end_time_val) + ';'
'pausetime=%s' % (pause_time_val) + ';'
'concurrency=%s' % (request.POST.get('concurrency'))}
elif action == 'ignore':
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'type': 'action',
'scope': ','.join(job.aggreate(request.POST.get('actions').split())),
}
response['data'] = oozie_api.job_control(job_id, action, parameters=params)
response['status'] = 0
if 'notification' in request.POST:
request.info(_(request.POST.get('notification')))
except RestException, ex:
ex_message = ex.message
if ex._headers.get('oozie-error-message'):
ex_message = ex._headers.get('oozie-error-message')
msg = "Error performing %s on Oozie job %s: %s." % (action, job_id, ex_message)
LOG.exception(msg)
response['data'] = _(msg)
return JsonResponse(response)
def bulk_manage_oozie_jobs(request):
if request.method != 'POST':
raise PopupException(_('Use a POST request to manage the Oozie jobs.'))
response = {'status': -1, 'data': ''}
if 'job_ids' in request.POST and 'action' in request.POST:
jobs = request.POST.get('job_ids').split()
response = {'totalRequests': len(jobs), 'totalErrors': 0, 'messages': ''}
oozie_api = get_oozie(request.user)
for job_id in jobs:
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
oozie_api.job_control(job_id, request.POST.get('action'))
except RestException, ex:
LOG.exception("Error performing bulk operation for job_id=%s", job_id)
response['totalErrors'] = response['totalErrors'] + 1
response['messages'] += str(ex)
return JsonResponse(response)
def show_oozie_error(view_func):
def decorate(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except RestException, ex:
LOG.exception("Error communicating with Oozie in %s", view_func.__name__)
detail = ex._headers.get('oozie-error-message', ex)
if 'Max retries exceeded with url' in str(detail) or 'Connection refused' in str(detail):
detail = _('The Oozie server is not running')
raise PopupException(_('An error occurred with Oozie.'), detail=detail)
return wraps(view_func)(decorate)
@show_oozie_error
def list_oozie_workflows(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
just_sla = request.GET.get('justsla') == 'true'
if request.GET.get('startcreatedtime'):
kwargs['filters'].extend([('startcreatedtime', request.GET.get('startcreatedtime'))])
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
wf_list = oozie_api.get_workflows(**kwargs)
json_jobs = wf_list.jobs
total_jobs = wf_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_job(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user, just_sla)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_workflows.mako', request, {
'user': request.user,
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_coordinators(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
co_list = oozie_api.get_coordinators(**kwargs)
json_jobs = co_list.jobs
total_jobs = co_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_coordinators.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
'enable_cron_scheduling': enable_cron_scheduling,
})
@show_oozie_error
def list_oozie_bundles(request):
kwargs = {'cnt': OOZIE_JOBS_COUNT.get(), 'filters': []}
if not has_dashboard_jobs_access(request.user):
kwargs['filters'].append(('user', request.user.username))
oozie_api = get_oozie(request.user)
if request.GET.get('format') == 'json':
if request.GET.get('offset'):
kwargs['offset'] = request.GET.get('offset')
json_jobs = []
total_jobs = 0
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
bundle_list = oozie_api.get_bundles(**kwargs)
json_jobs = bundle_list.jobs
total_jobs = bundle_list.total
if request.GET.get('type') == 'progress':
json_jobs = [oozie_api.get_coordinator(job.id) for job in json_jobs]
response = massaged_oozie_jobs_for_json(json_jobs, request.user)
response['total_jobs'] = total_jobs
return JsonResponse(response, encoder=JSONEncoderForHTML)
return render('dashboard/list_oozie_bundles.mako', request, {
'jobs': [],
'has_job_edition_permission': has_job_edition_permission,
})
@show_oozie_error
def list_oozie_workflow(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
oozie_coordinator = None
if request.GET.get('coordinator_job_id'):
oozie_coordinator = check_job_access_permission(request, request.GET.get('coordinator_job_id'))
oozie_bundle = None
if request.GET.get('bundle_job_id'):
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
if oozie_coordinator is not None:
setattr(oozie_workflow, 'oozie_coordinator', oozie_coordinator)
if oozie_bundle is not None:
setattr(oozie_workflow, 'oozie_bundle', oozie_bundle)
oozie_parent = oozie_workflow.get_parent_job_id()
if oozie_parent:
oozie_parent = check_job_access_permission(request, oozie_parent)
workflow_data = None
credentials = None
doc = None
hue_workflow = None
workflow_graph = 'MISSING' # default to prevent loading the graph tab for deleted workflows
full_node_list = None
if ENABLE_V2.get():
try:
# To update with the new History document model
hue_coord = get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: hue_coord.workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow: hue_workflow.document.doc.get().can_read_or_exception(request.user)
if hue_workflow:
full_node_list = hue_workflow.nodes
workflow_id = hue_workflow.id
wid = {
'id': workflow_id
}
doc = Document2.objects.get(type='oozie-workflow2', **wid)
new_workflow = get_workflow()(document=doc)
workflow_data = new_workflow.get_data()
else:
try:
workflow_data = Workflow.gen_workflow_data_from_xml(request.user, oozie_workflow)
except Exception, e:
LOG.exception('Graph data could not be generated from Workflow %s: %s' % (oozie_workflow.id, e))
workflow_graph = ''
credentials = Credentials()
except:
LOG.exception("Error generating full page for running workflow %s" % job_id)
else:
history = get_history().cross_reference_submission_history(request.user, job_id)
hue_coord = history and history.get_coordinator() or get_history().get_coordinator_from_config(oozie_workflow.conf_dict)
hue_workflow = (hue_coord and hue_coord.workflow) or (history and history.get_workflow()) or get_history().get_workflow_from_config(oozie_workflow.conf_dict)
if hue_coord and hue_coord.workflow: Job.objects.can_read_or_exception(request, hue_coord.workflow.id)
if hue_workflow: Job.objects.can_read_or_exception(request, hue_workflow.id)
if hue_workflow:
workflow_graph = hue_workflow.gen_status_graph(oozie_workflow)
full_node_list = hue_workflow.node_list
else:
workflow_graph, full_node_list = get_workflow().gen_status_graph_from_xml(request.user, oozie_workflow)
parameters = oozie_workflow.conf_dict.copy()
for action in oozie_workflow.actions:
action.oozie_coordinator = oozie_coordinator
action.oozie_bundle = oozie_bundle
if request.GET.get('format') == 'json':
return_obj = {
'id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(full_node_list),
'graph': workflow_graph,
'actions': massaged_workflow_actions_for_json(oozie_workflow.get_working_actions(), oozie_coordinator, oozie_bundle)
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_workflow.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_workflow.id,
'parent_id': oozie_workflow.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
return render('dashboard/list_oozie_workflow.mako', request, {
'oozie_workflow': oozie_workflow,
'oozie_coordinator': oozie_coordinator,
'oozie_bundle': oozie_bundle,
'oozie_parent': oozie_parent,
'oozie_slas': oozie_slas,
'hue_workflow': hue_workflow,
'hue_coord': hue_coord,
'parameters': parameters,
'has_job_edition_permission': has_job_edition_permission,
'workflow_graph': workflow_graph,
'layout_json': json.dumps(workflow_data['layout'], cls=JSONEncoderForHTML) if workflow_data else '',
'workflow_json': json.dumps(workflow_data['workflow'], cls=JSONEncoderForHTML) if workflow_data else '',
'credentials_json': json.dumps(credentials.credentials.keys(), cls=JSONEncoderForHTML) if credentials else '',
'workflow_properties_json': json.dumps(WORKFLOW_NODE_PROPERTIES, cls=JSONEncoderForHTML),
'doc1_id': doc.doc.get().id if doc else -1,
'subworkflows_json': json.dumps(_get_workflows(request.user), cls=JSONEncoderForHTML),
'can_edit_json': json.dumps(doc is None or doc.doc.get().is_editable(request.user))
})
@show_oozie_error
def list_oozie_coordinator(request, job_id):
kwargs = {'cnt': 50, 'filters': []}
kwargs['offset'] = request.GET.get('offset', 1)
if request.GET.getlist('status'):
kwargs['filters'].extend([('status', status) for status in request.GET.getlist('status')])
oozie_coordinator = check_job_access_permission(request, job_id, **kwargs)
# Cross reference the submission history (if any)
coordinator = get_history().get_coordinator_from_config(oozie_coordinator.conf_dict)
try:
if not ENABLE_V2.get():
coordinator = get_history().objects.get(oozie_job_id=job_id).job.get_full_node()
except:
LOG.exception("Ignoring error getting oozie job coordinator for job_id=%s", job_id)
oozie_bundle = None
if request.GET.get('bundle_job_id'):
try:
oozie_bundle = check_job_access_permission(request, request.GET.get('bundle_job_id'))
except:
LOG.exception("Ignoring error getting oozie bundle for job_id=%s", job_id)
if request.GET.get('format') == 'json':
actions = massaged_coordinator_actions_for_json(oozie_coordinator, oozie_bundle)
return_obj = {
'id': oozie_coordinator.id,
'status': oozie_coordinator.status,
'progress': oozie_coordinator.get_progress(),
'nextTime': format_time(oozie_coordinator.nextMaterializedTime),
'endTime': format_time(oozie_coordinator.endTime),
'actions': actions,
'total_actions': oozie_coordinator.total
}
return JsonResponse(return_obj, encoder=JSONEncoderForHTML)
oozie_slas = []
if oozie_coordinator.has_sla:
oozie_api = get_oozie(request.user, api_version="v2")
params = {
'id': oozie_coordinator.id,
'parent_id': oozie_coordinator.id
}
oozie_slas = oozie_api.get_oozie_slas(**params)
enable_cron_scheduling = ENABLE_CRON_SCHEDULING.get()
update_coord_form = | |
<reponame>pmassolino/hw-sike
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Implementation by <NAME>,
# hereby denoted as "the implementer".
#
# To the extent possible under law, the implementer has waived all copyright
# and related or neighboring rights to the source code in this file.
# http://creativecommons.org/publicdomain/zero/1.0/
import time
import random
import sys
from zedboard_sidh import *
import sike_core_utils
import SIDH_round2_spec
import sidh_fp2
import sike_fpga_constants_v128
import sike_fpga_constants_v256
tests_prom_folder = "../assembler/"
sike_core_mac_ram_start_address = 0x00000;
sike_core_mac_ram_last_address = 0x07FFF;
sike_core_base_alu_ram_start_address = 0x0C000;
sike_core_base_alu_ram_last_address = 0x0C3FF;
sike_core_keccak_core_start_address = 0x0D000;
sike_core_keccak_core_last_address = 0x0D007;
sike_core_reg_program_counter_address = 0x0E000;
sike_core_reg_status_address = 0x0E001;
sike_core_reg_operands_size_address = 0x0E002;
sike_core_reg_prime_line_equal_one_address = 0x0E003;
sike_core_reg_prime_address_address = 0x0E004;
sike_core_reg_prime_plus_one_address_address = 0x0E005;
sike_core_reg_prime_line_address_address = 0x0E006;
sike_core_reg_2prime_address_address = 0x0E007;
sike_core_reg_initial_stack_address_address = 0x0E008;
sike_core_reg_flag_address = 0x0E009;
sike_core_reg_scalar_address_address = 0x0E00A;
sike_core_mac_ram_prime_address = 0x00000;
sike_core_mac_ram_prime_plus_one_address = 0x00001;
sike_core_mac_ram_prime_line_address = 0x00002;
sike_core_mac_ram_2prime_address = 0x00003;
sike_core_mac_ram_const_r_address = 0x00004;
sike_core_mac_ram_const_r2_address = 0x00005;
sike_core_mac_ram_const_1_address = 0x00006;
sike_core_mac_ram_inv_4_mont_address = 0x00007;
sike_core_mac_ram_sidh_xpa_mont_address = 0x00008;
sike_core_mac_ram_sidh_xpai_mont_address = 0x00009;
sike_core_mac_ram_sidh_xqa_mont_address = 0x0000A;
sike_core_mac_ram_sidh_xqai_mont_address = 0x0000B;
sike_core_mac_ram_sidh_xra_mont_address = 0x0000C;
sike_core_mac_ram_sidh_xrai_mont_address = 0x0000D;
sike_core_mac_ram_sidh_xpb_mont_address = 0x0000E;
sike_core_mac_ram_sidh_xpbi_mont_address = 0x0000F;
sike_core_mac_ram_sidh_xqb_mont_address = 0x00010;
sike_core_mac_ram_sidh_xqbi_mont_address = 0x00011;
sike_core_mac_ram_sidh_xrb_mont_address = 0x00012;
sike_core_mac_ram_sidh_xrbi_mont_address = 0x00013;
sike_core_base_alu_ram_sike_s_start_address = 0x000FB;
sike_core_base_alu_ram_sike_sk_start_address = 0x0011B;
sike_core_base_alu_ram_sike_m_start_address = 0x0013B;
sike_core_base_alu_ram_sike_ss_start_address = 0x0015B;
sike_core_base_alu_ram_sike_c1_start_address = 0x0017B;
sike_core_base_alu_ram_sike_message_length_address = 0x0019B;
sike_core_base_alu_ram_sike_shared_secret_length_address = 0x0019C;
sike_core_base_alu_ram_oa_mask_address = 0x0019D;
sike_core_base_alu_ram_ob_mask_address = 0x0019E;
sike_core_base_alu_ram_oa_bits_address = 0x0019F;
sike_core_base_alu_ram_ob_bits_address = 0x001A0;
sike_core_base_alu_ram_prime_size_bits_address = 0x001A1;
sike_core_base_alu_ram_splits_alice_start_address = 0x001A2;
sike_core_base_alu_ram_max_row_alice_address = 0x002D0;
sike_core_base_alu_ram_splits_bob_start_address = 0x002D1;
sike_core_base_alu_ram_max_row_bob_address = 0x003FF;
sike_core_mac_ram_input_function_start_address = 0x00014;
sike_core_mac_ram_output_function_start_address = 0x00024;
test_program_start_fp_inv_test = 1;
test_program_start_fp2_inv_test = 27;
test_program_start_j_inv_test = 55;
test_program_start_get_A_test = 83;
test_program_start_inv_2_way_test = 113;
test_program_start_ladder_3_pt_test = 149;
test_program_start_xDBLe_test = 181;
test_program_start_get_4_isog_test = 213;
test_program_start_eval_4_isog_test = 257;
test_program_start_xTPLe_test = 299;
test_program_start_get_3_isog_test = 331;
test_program_start_eval_3_isog_test = 363;
test_program_start_get_2_isog_test = 395;
test_program_start_eval_2_isog_test = 423;
def load_program(zedboard, prom_file_name, base_word_size, base_word_size_signed_number_words):
prom_file = open(prom_file_name, 'r')
program = []
prom_file.seek(0, 2)
prom_file_size = prom_file.tell()
prom_file.seek(0)
while (prom_file.tell() != prom_file_size):
program += [sike_core_utils.load_list_value_VHDL_MAC_memory_as_integer(prom_file, base_word_size, base_word_size_signed_number_words, 1, False)]
print("Loading program into SIKE core:" + str(prom_file_name))
zedboard.write_program_prom(0, program)
print("Reading program uploaded into SIKE core")
program_written = zedboard.read_program_prom(0, len(program))
print("Verifying program uploaded into SIKE core")
if(program_written == program):
return True
print(program)
print(program_written)
return False
def load_constants(zedboard, param):
number_of_words = param[4]
base_word_size = param[1]
extended_word_size = param[2]
prime = param[5]
prime_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[5])
prime_plus_one_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[7])
prime_line_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[8])
prime2 = param[10]
prime2_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[10])
r_mod_prime_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[17])
r2_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[18])
constant_1_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[19])
constant_inv_4_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, param[20])
error_computation = False
fp2 = sidh_fp2.sidh_fp2(prime)
oa = param[11]
ob = param[12]
oa_bits = param[15]
ob_bits = param[16]
oa_mask = param[13]
ob_mask = param[14]
prime_size_bits = param[6]
sike_message_length = param[39]
sike_shared_secret_length = param[40]
alice_splits = param[33]
alice_max_row = param[34]
alice_max_int_points = param[35]
bob_splits = param[36]
bob_max_row = param[37]
bob_max_int_points = param[38]
starting_position_stack_sidh_core = param[41]
enable_special_prime_line_arithmetic = param[9]
alice_gen_points_mont = param[21:27]
bob_gen_points_mont = param[27:33]
alice_gen_points = param[42:48]
bob_gen_points = param[48:54]
test_value_xpa_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[0])
test_value_xpai_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[1])
test_value_xqa_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[2])
test_value_xqai_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[3])
test_value_xra_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[4])
test_value_xrai_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, alice_gen_points_mont[5])
test_value_xpb_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[0])
test_value_xpbi_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[1])
test_value_xqb_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[2])
test_value_xqbi_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[3])
test_value_xrb_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[4])
test_value_xrbi_mont_list = sike_core_utils.integer_to_list(extended_word_size, number_of_words, bob_gen_points_mont[5])
zedboard.write_mac_ram_operand(sike_core_mac_ram_prime_address, prime_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_prime_plus_one_address, prime_plus_one_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_prime_line_address, prime_line_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_2prime_address, prime2_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_const_r_address, r_mod_prime_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_const_r2_address, r2_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_const_1_address, constant_1_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_inv_4_mont_address, constant_inv_4_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xpa_mont_address, test_value_xpa_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xpai_mont_address, test_value_xpai_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xqa_mont_address, test_value_xqa_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xqai_mont_address, test_value_xqai_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xra_mont_address, test_value_xra_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xrai_mont_address, test_value_xrai_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xpb_mont_address, test_value_xpb_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xpbi_mont_address, test_value_xpbi_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xqb_mont_address, test_value_xqb_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xqbi_mont_address, test_value_xqbi_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xrb_mont_address, test_value_xrb_mont_list, number_of_words)
zedboard.write_mac_ram_operand(sike_core_mac_ram_sidh_xrbi_mont_address, test_value_xrbi_mont_list, number_of_words)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_sike_message_length_address, sike_message_length)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_sike_shared_secret_length_address, sike_shared_secret_length)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_oa_mask_address, oa_mask)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_ob_mask_address, ob_mask)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_oa_bits_address, oa_bits)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_ob_bits_address, ob_bits)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_prime_size_bits_address, prime_size_bits)
start_address = sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_splits_alice_start_address
for i in range(0, len(alice_splits)):
zedboard.write_package(start_address+i, alice_splits[i])
for i in range(len(alice_splits), 302):
zedboard.write_package(start_address+i, 0)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_max_row_alice_address, alice_max_row)
start_address = sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_splits_bob_start_address
for i in range(0, len(bob_splits)):
zedboard.write_package(start_address+i, bob_splits[i])
for i in range(len(bob_splits), 302):
zedboard.write_package(start_address+i, 0)
zedboard.write_package(sike_core_base_alu_ram_start_address + sike_core_base_alu_ram_max_row_bob_address, bob_max_row)
zedboard.write_package(sike_core_reg_operands_size_address, number_of_words - 1)
zedboard.write_package(sike_core_reg_prime_line_equal_one_address, enable_special_prime_line_arithmetic)
zedboard.write_package(sike_core_reg_prime_address_address, 0)
zedboard.write_package(sike_core_reg_prime_plus_one_address_address, 1)
zedboard.write_package(sike_core_reg_prime_line_address_address, 2)
zedboard.write_package(sike_core_reg_2prime_address_address, 3)
zedboard.write_package(sike_core_reg_scalar_address_address, 0)
zedboard.write_package(sike_core_reg_initial_stack_address_address, starting_position_stack_sidh_core)
def test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, starting_program_address, debug_mode=False):
values_to_load_list = [sike_core_utils.integer_to_list(extended_word_size, number_of_words, each_value) for each_value in values_to_load]
for i in range(len(values_to_load_list)):
zedboard.write_mac_ram_operand(sike_core_mac_ram_input_function_start_address + i, values_to_load_list[i], number_of_words)
zedboard.write_package(sike_core_reg_program_counter_address, starting_program_address)
time.sleep(0.1)
while(not zedboard.isFree()):
time.sleep(0.1)
computed_test_value_o_list = [zedboard.read_mac_ram_operand(sike_core_mac_ram_output_function_start_address + i, number_of_words) for i in range(len(expected_output))]
computed_test_value_o = [sike_core_utils.list_to_integer(extended_word_size, number_of_words, x) for x in computed_test_value_o_list]
error_computation = False
for i in range(len(computed_test_value_o)):
if(computed_test_value_o[i] != expected_output[i]):
error_computation = True
break
if((debug_mode) or (error_computation)):
print("Error in computation ")
print("Values loaded")
for each_value in values_to_load:
print(each_value)
print("")
print("Expected values")
for each_value in expected_output:
print(each_value)
print("")
print("Computed values")
for each_value in computed_test_value_o:
print(each_value)
print("")
return True
return False
def test_sidh_function_fp_inv(zedboard, param, number_of_tests, debug_mode=False):
load_constants(zedboard, param)
number_of_words = param[4]
base_word_size = param[1]
extended_word_size = param[2]
prime = param[5]
fp2 = sidh_fp2.sidh_fp2(prime)
error_computation = False
# Fixed test
tests_already_performed = 0
fixed_tests = [[1, 2], [prime - 2, prime - 1]]
for test in fixed_tests:
test_value_1 = test[0]
test_value_2 = test[1]
values_to_load = [test_value_1, test_value_2]
expected_output = [pow(test_value_1, prime-2, prime), pow(test_value_2, prime-2, prime)]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_fp_inv_test, debug_mode)
tests_already_performed += 2
if(error_computation):
break
# Random tests
if(not error_computation):
for i in range(tests_already_performed, number_of_tests, 2):
if(((i %(1000)) == 0)):
print(i)
test_value_1 = random.randint(0, prime)
test_value_2 = random.randint(0, prime)
values_to_load = [test_value_1, test_value_2]
expected_output = [pow(test_value_1, prime-2, prime), pow(test_value_2, prime-2, prime)]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_fp_inv_test, debug_mode)
if(error_computation):
break
return error_computation
def test_all_sidh_function_fp_inv(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None):
error_computation = False
if(only_one_parameter != None):
all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1]
else:
all_testing_parameters = sike_fpga_constants
for param in all_testing_parameters:
print("Testing SIDH function fp inv " + param[0])
error_computation = test_sidh_function_fp_inv(zedboard, param, number_of_tests, debug_mode=False)
if(error_computation):
break
def test_sidh_function_fp2_inv(zedboard, param, number_of_tests, debug_mode=False):
load_constants(zedboard, param)
number_of_words = param[4]
base_word_size = param[1]
extended_word_size = param[2]
prime = param[5]
fp2 = sidh_fp2.sidh_fp2(prime)
error_computation = False
# Fixed test
tests_already_performed = 0
fixed_tests = [1, prime-1]
for test_value_1 in fixed_tests:
for test_value_1i in fixed_tests:
for test_value_2 in fixed_tests:
for test_value_2i in fixed_tests:
values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i]
expected_value_1 = fp2([test_value_1, test_value_1i])**(-1)
expected_value_2 = fp2([test_value_2, test_value_2i])**(-1)
expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_fp2_inv_test, debug_mode)
tests_already_performed += 2
if(error_computation):
break
if(error_computation):
break
if(error_computation):
break
if(error_computation):
break
# Random tests
if(not error_computation):
for i in range(tests_already_performed, number_of_tests, 2):
if(((i %(1000)) == 0)):
print(i)
test_value_1 = random.randint(1, prime)
test_value_1i = random.randint(1, prime)
test_value_2 = random.randint(1, prime)
test_value_2i = random.randint(1, prime)
values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i]
expected_value_1 = fp2([test_value_1, test_value_1i])**(-1)
expected_value_2 = fp2([test_value_2, test_value_2i])**(-1)
expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1], expected_value_2.polynomial()[0], expected_value_2.polynomial()[1]]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_fp2_inv_test, debug_mode)
if(error_computation):
break
return error_computation
def test_all_sidh_function_fp2_inv(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None):
error_computation = False
if(only_one_parameter != None):
all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1]
else:
all_testing_parameters = sike_fpga_constants
for param in all_testing_parameters:
print("Testing SIDH function fp2 inv " + param[0])
error_computation = test_sidh_function_fp2_inv(zedboard, param, number_of_tests, debug_mode=False)
if(error_computation):
break
def test_sidh_function_j_invariant(zedboard, param, number_of_tests, debug_mode=False):
load_constants(zedboard, param)
number_of_words = param[4]
base_word_size = param[1]
extended_word_size = param[2]
prime = param[5]
fp2 = sidh_fp2.sidh_fp2(prime)
error_computation = False
# Fixed test
tests_already_performed = 0
fixed_tests = [1, prime-1]
for test_value_1 in fixed_tests:
for test_value_1i in fixed_tests:
for test_value_2 in fixed_tests:
for test_value_2i in fixed_tests:
values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i]
expected_value_1 = SIDH_round2_spec.j_invariant(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]))
expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1]]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_j_inv_test, debug_mode)
tests_already_performed += 2
if(error_computation):
break
if(error_computation):
break
if(error_computation):
break
if(error_computation):
break
# Random tests
if(not error_computation):
for i in range(tests_already_performed, number_of_tests):
if(((i %(1000)) == 0)):
print(i)
test_value_1 = random.randint(0, prime)
test_value_1i = random.randint(0, prime)
test_value_2 = random.randint(0, prime)
test_value_2i = random.randint(0, prime)
values_to_load = [test_value_1, test_value_1i, test_value_2, test_value_2i]
expected_value_1 = SIDH_round2_spec.j_invariant(fp2, fp2([test_value_1, test_value_1i]), fp2([test_value_2, test_value_2i]))
expected_output = [expected_value_1.polynomial()[0], expected_value_1.polynomial()[1]]
error_computation = test_single_sidh_function(zedboard, base_word_size, extended_word_size, number_of_words, values_to_load, expected_output, test_program_start_j_inv_test, debug_mode)
if(error_computation):
break
return error_computation
def test_all_sidh_function_j_invariant(zedboard, sike_fpga_constants, number_of_tests, only_one_parameter=None):
error_computation = False
if(only_one_parameter != None):
all_testing_parameters = sike_fpga_constants[only_one_parameter:only_one_parameter+1]
else:
all_testing_parameters = sike_fpga_constants
for param in all_testing_parameters:
print("Testing SIDH function j invariant " + param[0])
error_computation = test_sidh_function_j_invariant(zedboard, param, number_of_tests, debug_mode=False)
if(error_computation):
break
def test_sidh_function_get_A(zedboard, param, | |
<reponame>AnnieL01/pike<gh_stars>0
#
# Copyright (c) 2013, EMC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Module Name:
#
# smb2.py
#
# Abstract:
#
# SMB2 support
#
# Authors: <NAME> (<EMAIL>)
#
"""
SMB2 packet definitions
This module contains definitions of SMB2 packet frames and
associated constants and data types.
Packet field names are derived by taking the name from MS-SMB2 and
making it PEP-8 compliant. For example, FooBarBaz becomes foo_bar_baz.
This makes it simple to correlate the code with the spec while
maintaining a clear visual distinction between values and types.
"""
import array
import core
import nttime
import re
import ntstatus
# Dialects constants
class Dialect(core.ValueEnum):
DIALECT_SMB2_WILDCARD = 0x02FF
DIALECT_SMB2_002 = 0x0202
DIALECT_SMB2_1 = 0x0210
DIALECT_SMB3_0 = 0x0300
DIALECT_SMB3_0_2 = 0x0302
DIALECT_SMB3_1_1 = 0x0311
Dialect.import_items(globals())
# Flag constants
class Flags(core.FlagEnum):
SMB2_FLAGS_NONE = 0x00000000
SMB2_FLAGS_SERVER_TO_REDIR = 0x00000001
SMB2_FLAGS_ASYNC_COMMAND = 0x00000002
SMB2_FLAGS_RELATED_OPERATIONS = 0x00000004
SMB2_FLAGS_SIGNED = 0x00000008
SMB2_FLAGS_DFS_OPERATIONS = 0x10000000
SMB2_FLAGS_REPLAY_OPERATION = 0x20000000
Flags.import_items(globals())
# Command constants
class CommandId(core.ValueEnum):
SMB2_NEGOTIATE = 0x0000
SMB2_SESSION_SETUP = 0x0001
SMB2_LOGOFF = 0x0002
SMB2_TREE_CONNECT = 0x0003
SMB2_TREE_DISCONNECT = 0x0004
SMB2_CREATE = 0x0005
SMB2_CLOSE = 0x0006
SMB2_FLUSH = 0x0007
SMB2_READ = 0x0008
SMB2_WRITE = 0x0009
SMB2_LOCK = 0x000a
SMB2_IOCTL = 0x000b
SMB2_CANCEL = 0x000c
SMB2_ECHO = 0x000d
SMB2_QUERY_DIRECTORY = 0x000e
SMB2_CHANGE_NOTIFY = 0x000f
SMB2_QUERY_INFO = 0x0010
SMB2_SET_INFO = 0x0011
SMB2_OPLOCK_BREAK = 0x0012
CommandId.import_items(globals())
# Share Capabilities
class ShareCaps(core.FlagEnum):
SMB2_SHARE_CAP_DFS = 0x00000008
SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY = 0x00000010
SMB2_SHARE_CAP_SCALEOUT = 0x00000020
SMB2_SHARE_CAP_CLUSTER = 0x00000040
ShareCaps.import_items(globals())
# Share flags
class ShareFlags(core.FlagEnum):
SMB2_SHAREFLAG_MANUAL_CACHING = 0x00000000
SMB2_SHAREFLAG_AUTO_CACHING = 0x00000010
SMB2_SHAREFLAG_VDO_CACHING = 0x00000020
SMB2_SHAREFLAG_NO_CACHING = 0x00000030
SMB2_SHAREFLAG_DFS = 0x00000001
SMB2_SHAREFLAG_DFS_ROOT = 0x00000002
SMB2_SHAREFLAG_RESTRICT_EXCLUSIVE_OPENS = 0x00000100
SMB2_SHAREFLAG_FORCE_SHARED_DELETE = 0x00000200
SMB2_SHAREFLAG_ALLOW_NAMESPACE_CACHING = 0x00000400
SMB2_SHAREFLAG_ACCESS_BASED_DIRECTORY_ENUM = 0x00000800
SMB2_SHAREFLAG_FORCE_LEVELII_OPLOCK = 0x00001000
SMB2_SHAREFLAG_ENABLE_HASH_V1 = 0x00002000
SMB2_SHAREFLAG_ENABLE_HASH_V2 = 0x00004000
SMB2_SHAREFLAG_ENCRYPT_DATA = 0x00008000
ShareFlags.import_items(globals())
# Misc
RELATED_FID = (2**64-1,2**64-1)
UNSOLICITED_MESSAGE_ID = (2**64-1)
class Smb2(core.Frame):
_request_table = {}
_response_table = {}
_notification_table = {}
# Decorators to register class as request/response/notification payload
request = core.Register(_request_table, 'command_id', 'structure_size')
response = core.Register(_response_table, 'command_id', 'structure_size')
notification = core.Register(_notification_table, 'command_id', 'structure_size')
def __init__(self, parent, context=None):
core.Frame.__init__(self, parent, context)
self.credit_charge = None
self.channel_sequence = 0
self.status = None
self.command = None
self.credit_request = None
self.credit_response = None
self.flags = SMB2_FLAGS_NONE
self.next_command = 0
self.message_id = None
self.async_id = None
self.session_id = 0
self.tree_id = 0
self._command = None
if parent is not None:
parent.append(self)
def _children(self):
return [self._command] if self._command is not None else []
def _encode(self, cur):
cur.encode_bytes('\xfeSMB')
cur.encode_uint16le(64)
cur.encode_uint16le(self.credit_charge)
if self.flags & SMB2_FLAGS_SERVER_TO_REDIR:
cur.encode_uint32le(self.status)
else:
cur.encode_uint16le(self.channel_sequence)
cur.encode_uint16le(0)
if self.command is None:
self.command = self._command.command_id
cur.encode_uint16le(self.command)
if self.flags & SMB2_FLAGS_SERVER_TO_REDIR:
cur.encode_uint16le(self.credit_response)
else:
cur.encode_uint16le(self.credit_request)
cur.encode_uint32le(self.flags)
# Set NextCommand to 0 for now
next_command_hole = cur.hole.encode_uint32le(0)
cur.encode_uint64le(self.message_id)
if self.flags & SMB2_FLAGS_ASYNC_COMMAND:
cur.encode_uint64le(self.async_id)
else:
cur.encode_uint32le(0xfeff) # default process id
cur.encode_uint32le(self.tree_id)
cur.encode_uint64le(self.session_id)
# Set Signature to 0 for now
signature_hole = cur.hole.encode_bytes([0]*16)
# Encode command body
self._command.encode(cur)
# If we are not last command in chain
if not self.is_last_child():
# Add padding
cur.align(self.start, 8)
cur.trunc()
# Calculate next_command
self.next_command = cur - self.start
else:
self.next_command = 0
next_command_hole(self.next_command)
# Calculate and backpatch signature
if not hasattr(self, "signature"):
if self.flags & SMB2_FLAGS_SIGNED:
digest = self.context.signing_digest()
key = self.context.signing_key(self.session_id)
self.signature = digest(key, self.start[:cur])[:16]
else:
self.signature = array.array('B',[0]*16)
signature_hole(self.signature)
def _decode(self, cur):
if (cur.decode_bytes(4).tostring() != '\xfeSMB'):
raise core.BadPacket()
if (cur.decode_uint16le() != 64):
raise core.BadPacket()
self.credit_charge = cur.decode_uint16le()
# Look ahead and decode flags first
self.flags = Flags((cur + 8).decode_uint32le())
if self.flags & SMB2_FLAGS_SERVER_TO_REDIR:
self.status = ntstatus.Status(cur.decode_uint32le())
self.channel_sequence = None
else:
self.channel_sequence = cur.decode_uint16le()
# Ignore reserved
cur.decode_uint16le()
self.status = None
self.command = CommandId(cur.decode_uint16le())
if self.flags & SMB2_FLAGS_SERVER_TO_REDIR:
self.credit_response = cur.decode_uint16le()
self.credit_request = None
else:
self.credit_request = cur.decode_uint16le()
self.credit_response = None
# Skip over flags
cur += 4
self.next_command = cur.decode_uint32le()
self.message_id = cur.decode_uint64le()
if self.flags & SMB2_FLAGS_ASYNC_COMMAND:
self.async_id = cur.decode_uint64le()
self.tree_id = None
else:
# Ignore reserved
cur.decode_uint32le()
self.tree_id = cur.decode_uint32le()
self.async_id = None
self.session_id = cur.decode_uint64le()
self.signature = cur.decode_bytes(16)
# Peek ahead at structure_size
structure_size = (cur+0).decode_uint16le()
key = (self.command, structure_size)
if self.flags & SMB2_FLAGS_SERVER_TO_REDIR:
# Distinguish unsoliticed response, error response, normal response
if self.message_id == UNSOLICITED_MESSAGE_ID:
if key in Smb2._notification_table:
cls = Smb2._notification_table[key]
else:
raise core.BadPacket()
elif key in Smb2._response_table:
cls = Smb2._response_table[key]
if self.status not in cls.allowed_status and \
structure_size == ErrorResponse.structure_size:
cls = ErrorResponse
else:
cls = ErrorResponse
else:
cls = Smb2._request_table[key]
# Figure out limit of command data
if self.next_command:
end = self.start + self.next_command
else:
end = cur.upperbound
self._command = cls(self)
with cur.bounded(cur, end):
self._command.decode(cur)
# Advance to next frame or end of data
cur.advanceto(end)
def verify(self, digest, key):
if self.flags & SMB2_FLAGS_SIGNED:
message = self.start[:self.end]
# Zero out signature in message
message[12*4:12*4+16] = array.array('B',[0]*16)
# Calculate signature
signature = digest(key, message)[:16]
# Check that signatures match
if signature != self.signature:
raise core.BadPacket()
class Command(core.Frame):
def __init__(self, parent):
core.Frame.__init__(self, parent)
parent._command = self
def _encode_pre(self, cur):
core.Frame._encode_pre(self, cur)
cur.encode_uint16le(self.structure_size)
def _decode_pre(self, cur):
core.Frame._decode_pre(self, cur)
if cur.decode_uint16le() != self.structure_size:
raise core.BadPacket()
@Smb2.request
class Request(Command):
pass
@Smb2.response
class Response(Command):
allowed_status = [ntstatus.STATUS_SUCCESS]
@Smb2.notification
class Notification(Command):
pass
class ErrorResponse(Command):
structure_size = 9
_context_table = {}
error_context = core.Register(_context_table, 'error_id', 'parent_status')
special_statuses = [ntstatus.STATUS_STOPPED_ON_SYMLINK,
ntstatus.STATUS_BUFFER_TOO_SMALL]
def __init__(self, parent):
super(ErrorResponse,self).__init__(parent)
parent._command = self
self.byte_count = None
self.error_context_count = 0
self.error_data = None
self._error_contexts = []
def _children(self):
return self._error_contexts
def append(self, e):
self._error_contexts.append(e)
def _decode(self, cur):
self.error_context_count = cur.decode_uint8le()
# Ignore Reserved
cur.decode_uint8le()
self.byte_count = cur.decode_uint32le()
end = cur + self.byte_count
# SMB 3.1.1+ Error context handling
if self.error_context_count > 0:
for ix in xrange(self.error_context_count):
cur.align(self.parent.start, 8)
data_length = cur.decode_uint32le()
error_id = cur.decode_uint32le()
parent_status = self.parent.status
if parent_status not in self.special_statuses:
parent_status = None
key = (error_id, parent_status)
ctx = self._context_table[key]
with cur.bounded(cur, end):
ctx(self, data_length).decode(cur)
elif self.byte_count > 0:
# compatability shim for older dialects
error_id = 0
parent_status = self.parent.status
if parent_status not in self.special_statuses:
parent_status = None
key = (error_id, parent_status)
ctx = self._context_table[key]
with cur.bounded(cur, end):
self.error_data = ctx(self, self.byte_count)
self.error_data.decode(cur)
class ErrorId(core.ValueEnum):
SMB2_ERROR_ID_DEFAULT = 0x0
ErrorId.import_items(globals())
@ErrorResponse.error_context
class ErrorResponseContext(core.Frame):
def __init__(self, parent, data_length):
core.Frame.__init__(self, parent)
self.data_length = data_length
self.error_data = None
if parent is not None:
parent.append(self)
class ErrorResponseDefault(ErrorResponseContext):
error_id = SMB2_ERROR_ID_DEFAULT
parent_status = None
def _decode(self, cur):
self.error_data = cur.decode_bytes(self.data_length)
class ErrorResponseDefaultBufferSize(ErrorResponseContext):
error_id = SMB2_ERROR_ID_DEFAULT
parent_status = ntstatus.STATUS_BUFFER_TOO_SMALL
def _decode(self, cur):
self.error_data = cur.decode_uint32le()
self.minimum_buffer_length = self.error_data
class SymbolicLinkErrorResponse(ErrorResponseContext):
error_id = SMB2_ERROR_ID_DEFAULT
parent_status = ntstatus.STATUS_STOPPED_ON_SYMLINK
def _decode(self, cur):
end = cur + self.data_length
self.sym_link_length = cur.decode_uint32le()
self.sym_link_error_tag = cur.decode_uint32le()
self.reparse_tag = cur.decode_uint32le()
if self.sym_link_error_tag != 0x4C4D5953:
raise core.BadPacket()
reparse_data = GetReparsePointResponse._reparse_tag_map[self.reparse_tag]
with cur.bounded(cur, end):
self.error_data = reparse_data(self)
self.error_data.decode(cur)
class Cancel(Request):
command_id = SMB2_CANCEL
structure_size = 4
def _encode(self, cur):
# Reserved
cur.encode_uint16le(0)
# Negotiate constants
class SecurityMode(core.FlagEnum):
SMB2_NEGOTIATE_NONE = 0x0000
SMB2_NEGOTIATE_SIGNING_ENABLED = 0x0001
SMB2_NEGOTIATE_SIGNING_REQUIRED = 0x0002
SecurityMode.import_items(globals())
class GlobalCaps(core.FlagEnum):
SMB2_GLOBAL_CAP_DFS = 0x00000001
SMB2_GLOBAL_CAP_LEASING = 0x00000002
SMB2_GLOBAL_CAP_LARGE_MTU = 0x00000004
SMB2_GLOBAL_CAP_MULTI_CHANNEL = 0x00000008
SMB2_GLOBAL_CAP_PERSISTENT_HANDLES = 0x00000010
SMB2_GLOBAL_CAP_DIRECTORY_LEASING = 0x00000020
SMB2_GLOBAL_CAP_ENCRYPTION = 0x00000040
GlobalCaps.import_items(globals())
class NegotiateContextType(core.ValueEnum):
SMB2_PREAUTH_INTEGRITY_CAPABILITIES = 0x0001
SMB2_ENCRYPTION_CAPABILITIES = 0x0002
NegotiateContextType.import_items(globals())
class HashAlgorithms(core.ValueEnum):
SMB2_SHA_512 = 0x0001
HashAlgorithms.import_items(globals())
class NegotiateRequest(Request):
command_id = SMB2_NEGOTIATE
structure_size = 36
def __init__(self, parent):
Request.__init__(self, parent)
self.security_mode = 0
self.capabilities = 0
self.client_guid = [0]*16
self.dialects = []
self.negotiate_contexts_count = None
self.negotiate_contexts_offset = None
self.negotiate_contexts_alignment_skew = 0
self.negotiate_contexts = []
def _children(self):
return self.negotiate_contexts
def _has_negotiate_contexts(self):
return (self.negotiate_contexts or
self.negotiate_contexts_offset is | |
<reponame>IbHansen/ModelFlow
# -*- coding: utf-8 -*-
"""
This is a module for testing new features of the model class, but in a smaler file.
Created on Sat Sep 29 06:03:35 2018
@author: hanseni
"""
import sys
import time
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
from sympy import sympify,Symbol
from collections import defaultdict, namedtuple
import numpy as np
import scipy as sp
import networkx as nx
import os
from subprocess import run
import webbrowser as wb
import seaborn as sns
import ipywidgets as ip
import inspect
from itertools import chain, zip_longest
import fnmatch
from IPython.display import SVG, display, Image, Math ,Latex, Markdown
try:
from numba import jit
except:
print('Numba not avaiable')
import itertools
from collections import namedtuple
from dataclasses import dataclass, field, asdict
import sys
import time
import re
# print(f'name:{__name__} and package={__package__}!-' )
__package__ = 'ModelFlow'
import modelpattern as pt
from modelclass import model, ttimer, insertModelVar
from modelvis import vis
import modelmanipulation as mp
import modeldiff as md
from modelmanipulation import split_frml,udtryk_parse,find_statements,un_normalize_model,explode
from modelclass import model, ttimer, insertModelVar
from modelinvert import targets_instruments
import modeljupyter as mj
import modelvis as mv
import modelmf
from modelhelp import tovarlag
class newmodel(model):
def __call__(self, *args, **kwargs ):
''' Runs a model.
Default a straight model is calculated by *xgenr* a simultaneous model is solved by *sim*
:sim: If False forces a model to be calculated (not solved) if True force simulation
:setbase: If True, place the result in model.basedf
:setlast: if False don't place the results in model.lastdf
if the modelproperty previousbase is true, the previous run is used as basedf.
'''
if hasattr(self,'oldkwargs'):
newkwargs = {**self.oldkwargs,**kwargs}
else:
newkwargs = kwargs
self.oldkwargs = newkwargs.copy()
if self.save:
if self.previousbase and hasattr(self,'lastdf'):
self.basedf = self.lastdf.copy(deep=True)
if self.maxlead >= 1:
outdf = self.newtonstack_un_normalized(*args, **newkwargs )
elif self.oldkwargs.get('sim2',True):
outdf = self.sim2d(*args, **newkwargs )
else:
outdf = self.sim1d( *args, **newkwargs)
if self.save:
if (not hasattr(self,'basedf')) or kwargs.get('setbase',False) :
self.basedf = outdf.copy(deep=True)
if kwargs.get('setlast',True) :
self.lastdf = outdf.copy(deep=True)
return outdf
@property
def showstartnr(self):
self.findpos()
variabler=[x for x in sorted(self.allvar.keys())]
return {v:self.allvar[v]['startnr'] for v in variabler}
def sim2d(self, databank, start='', slut='', silent=0,samedata=0,alfa=1.0,stats=False,first_test=1,
antal=1,conv=[],absconv=0.01,relconv=0.0000000000000001,
dumpvar=[],ldumpvar=False,dumpwith=15,dumpdecimal=5,chunk=None,ljit=False,timeon=False,
fairopt={'fairantal':1},**kwargs):
'''Evaluates this model on a databank from start to slut (means end in Danish).
First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function
(:func:`modelclass.model.fouteval`)
then it evaluates the function and returns the values to a the Dataframe in the databank.
The text for the evaluater function is placed in the model property **make_los_text**
where it can be inspected
in case of problems.
'''
starttimesetup=time.time()
fairantal = {**fairopt,**kwargs}.get('fairantal',1)
sol_periode = self.smpl(start,slut,databank)
if self.maxlag and not (self.current_per[0]+self.maxlag) in databank.index :
print('***** Warning: You are solving the model before all lags are avaiable')
print('Maxlag:',self.maxlag,'First solveperiod:',self.current_per[0],'First dataframe index',databank.index[0])
sys.exit()
if not silent : print ('Will start calculating: ' + self.name)
if not self.eqcolumns(self.genrcolumns,databank.columns):
databank=insertModelVar(databank,self) # fill all Missing value with 0.0
for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type
newdata = True
else:
newdata = False
if ljit:
if newdata or not hasattr(self,'pro2d_jit'):
if not silent: print(f'Create compiled solving function for {self.name}')
self.make_los_text2d_jit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1))
exec(self.make_los_text2d_jit,globals()) # creates the los function
self.pro2d_jit,self.solve2d_jit,self.epi2d_jit = make_los(self.funks,self.errfunk)
self.pro2d,self.solve2d,self.epi2d = self.pro2d_jit,self.solve2d_jit,self.epi2d_jit
else:
if newdata or not hasattr(self,'pro2d_nojit'):
if not silent: print(f'Create solving function for {self.name}')
self.make_los_text2d_nojit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1))
exec(self.make_los_text2d_nojit,globals()) # creates the los function
self.pro2d_nojit,self.solve2d_nojit,self.epi2d_nojit = make_los(self.funks,self.errfunk)
self.pro2d,self.solve2d,self.epi2d = self.pro2d_nojit,self.solve2d_nojit,self.epi2d_nojit
values = databank.values.copy() #
self.genrcolumns = databank.columns.copy()
self.genrindex = databank.index.copy()
convvar = [conv.upper()] if isinstance(conv,str) else [c.upper() for c in conv] if conv != [] else list(self.endogene)
convplace=[databank.columns.get_loc(c) for c in convvar] # this is how convergence is measured
convergence = True
if ldumpvar:
self.dumplist = []
self.dump = convvar if dumpvar == [] else [v for v in self.vlist(dumpvar) if v in self.endogene]
dumpplac = [databank.columns.get_loc(v) for v in self.dump]
ittotal = 0
endtimesetup=time.time()
starttime=time.time()
for fairiteration in range(fairantal):
if fairantal >=2:
print(f'Fair-Taylor iteration: {fairiteration}')
for self.periode in sol_periode:
row=databank.index.get_loc(self.periode)
if ldumpvar:
self.dumplist.append([fairiteration,self.periode,int(0)]+[values[row,p]
for p in dumpplac])
itbefore = [values[row,c] for c in convplace]
self.pro2d(values, values, row , 1.0 )
for iteration in range(antal):
with ttimer(f'Evaluate {self.periode}/{iteration} ',timeon) as t:
self.solve2d(values, values, row , alfa )
ittotal += 1
if ldumpvar:
self.dumplist.append([fairiteration,self.periode, int(iteration+1)]+[values[row,p]
for p in dumpplac])
if iteration > first_test:
itafter=[values[row,c] for c in convplace]
convergence = True
for after,before in zip(itafter,itbefore):
# print(before,after)
if before > absconv and abs(after-before)/abs(before) > relconv:
convergence = False
break
if convergence:
break
else:
itbefore=itafter
self.epi2d(values, values, row , 1.0 )
if not silent:
if not convergence :
print(f'{self.periode} not converged in {iteration} iterations')
else:
print(f'{self.periode} Solved in {iteration} iterations')
if ldumpvar:
self.dumpdf= pd.DataFrame(self.dumplist)
del self.dumplist
self.dumpdf.columns= ['fair','per','iteration']+self.dump
if fairantal<=2 : self.dumpdf.drop('fair',axis=1,inplace=True)
outdf = pd.DataFrame(values,index=databank.index,columns=databank.columns)
if stats:
numberfloats = self.calculate_freq[-1][1]*ittotal
endtime = time.time()
self.simtime = endtime-starttime
self.setuptime = endtimesetup - starttimesetup
print(f'Setup time (seconds) :{self.setuptime:>15,.2f}')
print(f'Foating point operations :{self.calculate_freq[-1][1]:>15,}')
print(f'Total iterations :{ittotal:>15,}')
print(f'Total floating point operations :{numberfloats:>15,}')
print(f'Simulation time (seconds) :{self.simtime:>15,.2f}')
if self.simtime > 0.0:
print(f'Floating point operations per second : {numberfloats/self.simtime:>15,.1f}')
if not silent : print (self.name + ' solved ')
return outdf
@staticmethod
def grouper(iterable, n, fillvalue=''):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def outsolve2dcunk(self,databank, debug=1,chunk=None,ljit=False,type='gauss',cache=False):
''' takes a list of terms and translates to a evaluater function called los
The model axcess the data through:Dataframe.value[rowindex+lag,coloumnindex] which is very efficient
'''
short,long,longer = 4*' ',8*' ',12 *' '
columnsnr=self.get_columnsnr(databank)
if ljit:
thisdebug = False
else:
thisdebug = debug
#print(f'Generating source for {self.name} using ljit = {ljit} ')
def make_gaussline2(vx,nodamp=False):
''' takes a list of terms and translates to a line in a gauss-seidel solver for
simultanius models
the variables
New version to take hand of several lhs variables. Dampning is not allowed for
this. But can easely be implemented by makeing a function to multiply tupels
nodamp is for pre and epilog solutions, which should not be dampened.
'''
termer=self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
if nodamp:
ldamp=False
else:
if 'Z' in self.allvar[vx]['frmlname']: # convention for damping equations
assert assigpos == 1 , 'You can not dampen equations with several left hand sides:'+vx
endovar=[t.op if t.op else ('values[row,'+str(columnsnr[t.var])+']') for j,t in enumerate(termer) if j <= assigpos-1 ]
damp='(1-alfa)*('+''.join(endovar)+')+alfa*(' # to implemet dampning of solution
ldamp = True
else:
ldamp = False
out=[]
for i,t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if i == assigpos and ldamp:
out.append(damp)
if t.number:
out.append(t.number)
elif t.var:
if i > assigpos:
out.append('values[row'+t.lag+','+str(columnsnr[t.var])+']' )
else:
out.append('values[row'+t.lag+','+str(columnsnr[t.var])+']' )
if ldamp: out.append(')') # the last ) in the dampening
res = ''.join(out)
return res+'\n'
def make_resline2(vx,nodamp):
''' takes a list of terms and translates to a line calculating linne
'''
termer=self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
out=[]
for i,t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if t.number:
out.append(t.number)
elif t.var:
lag=int(t.lag) if t.lag else 0
if i < assigpos:
out.append('outvalues[row'+t.lag+','+str(columnsnr[t.var])+']' )
else:
out.append('values[row'+t.lag+','+str(columnsnr[t.var])+']' )
res = ''.join(out)
return res+'\n'
def makeafunk(name,order,linemake,chunknumber,debug=False,overhead = 0 ,oldeqs=0,nodamp=False,ljit=False,totalchunk=1):
''' creates the source of an evaluation function
keeps tap of how many equations and lines is in the functions abowe.
This allows the errorfunction to retriewe the variable for which a math error is thrown
'''
fib1=[]
fib2=[]
if ljit:
fib1.append((short+'print("'+f"Compiling chunk {chunknumber+1}/{totalchunk} "+'",time.strftime("%H:%M:%S")) \n') if ljit else '')
fib1.append(short+'@jit("(f8[:,:],f8[:,:],i8,f8)",fastmath=True)\n')
fib1.append(short + 'def '+name+'(values,outvalues,row,alfa=1.0):\n')
# fib1.append(long + 'outvalues = | |
<reponame>vnguyen7/watchmaker
# -*- coding: utf-8 -*-
"""Watchmaker salt worker."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import ast
import codecs
import json
import os
import shutil
import yaml
import watchmaker.utils
from watchmaker import static
from watchmaker.exceptions import InvalidValue, WatchmakerException
from watchmaker.managers.platform import (LinuxPlatformManager,
PlatformManagerBase,
WindowsPlatformManager)
from watchmaker.workers.base import WorkerBase
class SaltBase(WorkerBase, PlatformManagerBase):
r"""
Cross-platform worker for running salt.
Args:
salt_debug_log: (:obj:`list`)
Filesystem path to a file where the salt debug output should be
saved. When unset, the salt debug log is saved to the Watchmaker
log directory.
(*Default*: ``''``)
salt_content: (:obj:`str`)
URL to a salt content archive (zip file) that will be uncompressed
in the watchmaker salt "srv" directory. This typically is used to
create a top.sls file and to populate salt's file_roots.
(*Default*: ``''``)
- *Linux*: ``/srv/watchmaker/salt``
- *Windows*: ``C:\Watchmaker\Salt\srv``
salt_states: (:obj:`str`)
Comma-separated string of salt states to execute. Accepts two
special keywords (case-insensitive).
(*Default*: ``''``)
- ``none``: Do not apply any salt states.
- ``highstate``: Apply the salt "highstate".
exclude_states: (:obj:`str`)
Comma-separated string of states to exclude from execution.
(*Default*: ``''``)
user_formulas: (:obj:`dict`)
Map of formula names and URLs to zip archives of salt formulas.
These formulas will be downloaded, extracted, and added to the salt
file roots. The zip archive must contain a top-level directory
that, itself, contains the actual salt formula. To "overwrite"
bundled submodule formulas, make sure the formula name matches the
submodule name.
(*Default*: ``{}``)
admin_groups: (:obj:`str`)
Sets a salt grain that specifies the domain groups that should have
root privileges on Linux or admin privileges on Windows. Value must
be a colon-separated string. E.g. ``"group1:group2"``
(*Default*: ``''``)
admin_users: (:obj:`str`)
Sets a salt grain that specifies the domain users that should have
root privileges on Linux or admin privileges on Windows. Value must
be a colon-separated string. E.g. ``"user1:user2"``
(*Default*: ``''``)
environment: (:obj:`str`)
Sets a salt grain that specifies the environment in which the
system is being built. E.g. ``dev``, ``test``, ``prod``, etc.
(*Default*: ``''``)
ou_path: (:obj:`str`)
Sets a salt grain that specifies the full DN of the OU where the
computer account will be created when joining a domain.
E.g. ``"OU=SuperCoolApp,DC=example,DC=com"``
(*Default*: ``''``)
"""
def __init__(self, *args, **kwargs):
# Init inherited classes
super(SaltBase, self).__init__(*args, **kwargs)
# Pop arguments used by SaltBase
self.user_formulas = kwargs.pop('user_formulas', None) or {}
self.computer_name = kwargs.pop('computer_name', None) or ''
self.ent_env = kwargs.pop('environment', None) or ''
self.valid_envs = kwargs.pop('valid_environments', []) or []
self.salt_debug_log = kwargs.pop('salt_debug_log', None) or ''
self.salt_content = kwargs.pop('salt_content', None) or ''
self.ou_path = kwargs.pop('ou_path', None) or ''
self.admin_groups = kwargs.pop('admin_groups', None) or ''
self.admin_users = kwargs.pop('admin_users', None) or ''
self.salt_states = kwargs.pop('salt_states', None) or ''
self.exclude_states = kwargs.pop('exclude_states', None) or ''
# Init attributes used by SaltBase, overridden by inheriting classes
self.salt_working_dir = None
self.salt_working_dir_prefix = None
self.salt_log_dir = None
self.salt_conf_path = None
self.salt_conf = None
self.salt_call = None
self.salt_base_env = None
self.salt_formula_root = None
self.salt_file_roots = None
self.salt_state_args = None
self.salt_debug_logfile = None
def before_install(self):
"""Validate configuration before starting install."""
# Convert environment to lowercase
env = str(self.ent_env).lower()
# Convert all valid environment to lowercase
valid_envs = [str(x).lower() for x in self.valid_envs]
if valid_envs and env not in valid_envs:
msg = (
'Selected environment ({0}) is not one of the valid'
' environment types: {1}'.format(env, valid_envs)
)
self.log.critical(msg)
raise InvalidValue(msg)
def install(self):
"""Install Salt."""
pass
@staticmethod
def _get_salt_dirs(srv):
salt_base_env = os.sep.join((srv, 'states'))
salt_formula_root = os.sep.join((srv, 'formulas'))
salt_pillar_root = os.sep.join((srv, 'pillar'))
return (
salt_base_env, salt_formula_root, salt_pillar_root
)
def _prepare_for_install(self):
self.working_dir = self.create_working_dir(
self.salt_working_dir,
self.salt_working_dir_prefix
)
if (
self.salt_debug_log and
self.salt_debug_log != 'None'
):
self.salt_debug_logfile = self.salt_debug_log
else:
self.salt_debug_logfile = os.sep.join(
(self.salt_log_dir, 'salt_call.debug.log')
)
self.salt_state_args = [
'--log-file', self.salt_debug_logfile,
'--log-file-level', 'debug',
'--log-level', 'error',
'--out', 'quiet',
'--return', 'local'
]
for salt_dir in [
self.salt_base_env,
self.salt_formula_root,
self.salt_conf_path
]:
try:
os.makedirs(salt_dir)
except OSError:
if not os.path.isdir(salt_dir):
msg = ('Unable create directory - {0}'.format(salt_dir))
self.log.error(msg)
raise SystemError(msg)
with codecs.open(
os.path.join(self.salt_conf_path, 'minion'),
'w',
encoding="utf-8"
) as fh_:
yaml.safe_dump(self.salt_conf, fh_, default_flow_style=False)
def _get_formulas_conf(self):
# Append Salt formulas bundled with Watchmaker package.
formulas_path = os.sep.join((static.__path__[0], 'salt', 'formulas'))
for formula in os.listdir(formulas_path):
formula_path = os.path.join(self.salt_formula_root, '', formula)
if os.path.exists(formula_path):
shutil.rmtree(formula_path)
shutil.copytree(
os.sep.join((formulas_path, formula)),
formula_path)
# Obtain & extract any Salt formulas specified in user_formulas.
for formula_name, formula_url in self.user_formulas.items():
filename = os.path.basename(formula_url)
file_loc = os.sep.join((self.working_dir, filename))
# Download the formula
self.retrieve_file(formula_url, file_loc)
# Extract the formula
formula_working_dir = self.create_working_dir(
self.working_dir,
'{0}-'.format(filename)
)
self.extract_contents(
filepath=file_loc,
to_directory=formula_working_dir
)
# Get the first directory within the extracted directory
formula_inner_dir = os.path.join(
formula_working_dir,
next(os.walk(formula_working_dir))[1][0]
)
# Move the formula to the formula root
formula_loc = os.sep.join((self.salt_formula_root, formula_name))
self.log.debug(
'Placing user formula in salt file roots. formula_url=%s, '
'formula_loc=%s',
formula_url, formula_loc
)
if os.path.exists(formula_loc):
shutil.rmtree(formula_loc)
shutil.move(formula_inner_dir, formula_loc)
return [
os.path.join(self.salt_formula_root, x) for x in next(os.walk(
self.salt_formula_root))[1]
]
def _build_salt_formula(self, extract_dir):
if self.salt_content:
salt_content_filename = watchmaker.utils.basename_from_uri(
self.salt_content
)
salt_content_file = os.sep.join((
self.working_dir,
salt_content_filename
))
self.retrieve_file(self.salt_content, salt_content_file)
self.extract_contents(
filepath=salt_content_file,
to_directory=extract_dir
)
with codecs.open(
os.path.join(self.salt_conf_path, 'minion'),
'r+',
encoding="utf-8"
) as fh_:
salt_conf = yaml.safe_load(fh_)
salt_conf.update(self.salt_file_roots)
fh_.seek(0)
yaml.safe_dump(salt_conf, fh_, default_flow_style=False)
def _set_grain(self, grain, value):
cmd = [
'grains.setval',
grain,
str(json.dumps(value))
]
self.run_salt(cmd)
def _get_failed_states(self, state_ret):
failed_states = {}
try:
# parse state return
salt_id_delim = '_|-'
salt_id_pos = 1
for state, data in state_ret['return'].items():
if data['result'] is False:
state_id = state.split(salt_id_delim)[salt_id_pos]
failed_states[state_id] = data
except AttributeError:
# some error other than a failed state, msg is in the 'return' key
self.log.debug('Salt return (AttributeError): %s', state_ret)
failed_states = state_ret['return']
except KeyError:
# not sure what failed, just return everything
self.log.debug('Salt return (KeyError): %s', state_ret)
failed_states = state_ret
return failed_states
def run_salt(self, command, **kwargs):
"""
Execute salt command.
Args:
command: (:obj:`str` or :obj:`list`)
Salt options and a salt module to be executed by salt-call.
Watchmaker will always begin the command with the options
``--local``, ``--retcode-passthrough``, and ``--no-color``, so
do not specify those options in the command.
"""
cmd = [
self.salt_call,
'--local',
'--retcode-passthrough',
'--no-color',
'--config-dir',
self.salt_conf_path
]
if isinstance(command, list):
cmd.extend(command)
else:
cmd.append(command)
return self.call_process(cmd, **kwargs)
def service_status(self, service):
"""
Get the service status using salt.
Args:
service: (obj:`str`)
Name of the service to query.
Returns:
:obj:`tuple`: ``('running', 'enabled')``
First element is the service running status. Second element is
the service enabled status. Each element is a :obj:`bool`
representing whether the service is running or enabled.
"""
cmd_status = [
'service.status', service,
'--out', 'newline_values_only'
]
cmd_enabled = [
'service.enabled', service,
'--out', 'newline_values_only'
]
return (
self.run_salt(cmd_status)['stdout'].strip().lower() == b'true',
self.run_salt(cmd_enabled)['stdout'].strip().lower() == b'true'
)
def service_stop(self, service):
"""
Stop a service status using salt.
Args:
service: (:obj:`str`)
Name of the service to stop.
Returns:
:obj:`bool`:
``True`` if the service was stopped. ``False`` if the service
could not be stopped.
"""
cmd = [
'service.stop', service,
'--out', 'newline_values_only'
]
return self.run_salt(cmd)['stdout'].strip().lower() == b'true'
def service_start(self, service):
"""
Start a service status using salt.
Args:
service: (:obj:`str`)
Name of the service to start.
Returns:
:obj:`bool`:
``True`` if the service was started. ``False`` if the service
could not be started.
"""
cmd = [
'service.start', service,
'--out', 'newline_values_only'
]
return self.run_salt(cmd)['stdout'].strip().lower() == b'true'
def service_disable(self, service):
"""
Disable a service using salt.
Args:
service: (:obj:`str`)
Name of the service to disable.
Returns:
:obj:`bool`:
``True`` if the service was disabled. ``False`` if the service
could not be disabled.
"""
cmd = [
'service.disable', service,
'--out', 'newline_values_only'
]
return self.run_salt(cmd)['stdout'].strip().lower() == b'true'
def service_enable(self, service):
"""
Enable a service using salt.
Args:
service: (:obj:`str`)
Name of the service to enable.
Returns:
:obj:`bool`:
``True`` if the service was enabled. ``False`` if the service
could not be enabled.
"""
cmd = [
'service.enable', service,
'--out', 'newline_values_only'
]
return self.run_salt(cmd)['stdout'].strip().lower() == b'true'
def process_grains(self):
"""Set salt grains."""
ent_env = {'enterprise_environment': str(self.ent_env)}
self._set_grain('systemprep', ent_env)
self._set_grain('watchmaker', ent_env)
grain = {}
if self.ou_path and self.ou_path != 'None':
grain['oupath'] = self.ou_path
if self.admin_groups and self.admin_groups != 'None':
grain['admin_groups'] = self.admin_groups.split(':')
if self.admin_users and self.admin_users != 'None':
grain['admin_users'] = self.admin_users.split(':')
if grain:
self._set_grain('join-domain', grain)
if self.computer_name and self.computer_name != 'None':
name = {'computername': str(self.computer_name)}
self._set_grain('name-computer', name)
self.log.info('Syncing custom salt modules...')
self.run_salt('saltutil.sync_all')
def process_states(self, states, exclude):
"""
| |
<reponame>hunterluepke/Learn-Python-for-Stats-and-Econ<gh_stars>10-100
import copy
import random
import numpy as np
from scipy.stats.mstats import gmean
from Patch import *
#Agent.py
class Agent():
# **inheritance are the inherited
def __init__(self, model, row, col, ID, parent = None):
def selectParameters(mutate = False, reservation_demand = True,
reproduction_criteria= True,
**mutate_kwargs):
# at first, you are the agent does not know any one else
# give all agents these variables to avoid error when deleted from
# inheritance dict
def setReservationDemand():#price_change = True, quantity_change = True):
### don't mutate reservation quantity and price
### these are set in live time
init_vals = self.model.max_init_demand_vals
min_res_q = init_vals["quantity"]["min"]
max_res_q = init_vals["quantity"]["max"]
min_res_p = init_vals["price"]["min"]
max_res_p = init_vals["price"]["max"]
self.reservation_demand = {good:{
"quantity": min_res_q + random.random()
* (max_res_q - min_res_q)}
for good in self.model.goods}
self.reservation_demand["sugar"]["price"] = np.e ** (
np.log(min_res_p) + random.random() * (np.log(max_res_p) - np.log(min_res_p)))
self.reservation_demand["water"]["price"] = 1 / self.reservation_demand["sugar"]["price"]
### set rates of adjustment
# change price (WTP//WTA) by at most 10% per period
# if price_change:
## price_change defined in kwargs if mutate
min_price_change = 1.01 if not mutate else\
self.parent.price_change / (1 + self.mutate_rate)
max_price_change = 1.1 if not mutate else\
self.parent.price_change * (1 + self.mutate_rate)
self.price_change = min_price_change + random.random() * (max_price_change - min_price_change)
# change reservation demand (quantity) by at most 10% per period
# if quantity_change:
min_quantity_change = 1.001 if not mutate else\
parent.quantity_change / (1 + self.mutate_rate)
max_quantity_change = 1.01 if not mutate else\
self.parent.quantity_change * (1 + self.mutate_rate)
self.quantity_change = min_quantity_change + random.random() * (max_quantity_change - min_quantity_change)
def setReproductionLevel():
min_reproduction_criteria, max_reproduction_criteria = {}, {}
for good in self.model.goods:
min_reproduction_criteria[good] = self.model.goods_params[good]["max"] * 2 if not mutate else\
self.parent.reproduction_criteria[good] / (1 + self.mutate_rate)
max_reproduction_criteria[good] = 2 * min_reproduction_criteria[good] if not mutate else\
self.parent.reproduction_criteria[good] * (1 + self.mutate_rate)
self.reproduction_criteria = {
good :min_reproduction_criteria[good] +random.random() * (
max_reproduction_criteria[good] - min_reproduction_criteria[good])
for good in self.model.goods}
def selectBreed():
if self.parent:
# place herder first in list
shuffle_breeds = copy.copy(self.model.primary_breeds)
random.shuffle(shuffle_breeds)
for breed_ in ["herder"] + shuffle_breeds:
if random.random() < self.mutate_rate:
# if mutation occurs, switch breed boolean
select_breed = False if getattr(self, breed_) else True
setattr(self, breed_, select_breed)
if select_breed == True and breed_ in shuffle_breeds:
shuffle_breeds.remove(breed_)
for not_my_breed in shuffle_breeds:
setattr(self, not_my_breed, False)
break
# set breed basic if all breeds are turned to False
if True not in (getattr(self, brd)
for brd in self.model.primary_breeds):
self.setBreedBasic(herder = self.herder)
# select breed randomly if agent has no parent
else:
# for breed_, prob in self.model.breed_probabilities.items():
# if random.random() <= prob :
# setattr(self, breed_, True)
# else:
# setattr(self, breed_, False)
# since switcher and basic are mutually exclusive,
# All initial agents are basic, other breeds only
# appear through mutation
self.setBreedBasic(herder = False)
self.selectBreedParameters(mutate, self.parent,
herding = False)
def setMutateRate():
if self.model.mutate:
min_rate = 0 if not mutate else\
self.parent.mutate_rate / (1 + self.parent.mutate_rate)
max_rate = self.model.max_mutate_rate if not mutate else\
self.parent.mutate_rate * (1 + self.parent.mutate_rate)
# keep a hard limit on the height of mutation rate
self.mutate_rate = min_rate + random.random() * (max_rate - min_rate)
if self.mutate_rate >= self.model.max_mutate_rate:
self.mutate_rate = self.model.max_mutate_rate
###################################################################
# define mutate rate first so that it effects mutation of all
# other attributes
setMutateRate()
# set value of commodity holdings, if agent has parents,
# these values will be replaced by the max values
setStocks()
if reservation_demand:
setReservationDemand()
if reproduction_criteria:
setReproductionLevel()
setTargets()
self.vision = random.randint(1, self.model.max_vision)
selectBreed()
#######################################################################
def setStocks():
if self.parent == None:
for good, vals in self.model.goods_params.items():
val = random.randint(vals["min"], vals["max"])
setattr(self, good, val)
else:
for good in self.model.goods:
setattr(self, good, self.model.goods_params[good]["max"])
setattr(self.parent, good,
getattr(self.parent,good) - self.model.goods_params[good]["max"])
# wealth is the number of periods worth of food owned by the agent
# assumes that one good is instantly convertable to another
self.wealth = sum(getattr(self, good) / self.model.consumption_rate[good]
for good in self.model.goods)
def setTargets():
# set exchange target randomly at first
goods = list(self.model.goods)
random.shuffle(goods)
self.target = goods.pop()
self.not_target = goods[0]
def mutate():
# select which parameters will be mutated
mutate_dict = {key: val if random.random() < self.mutate_rate else False for key, val in inheritance.items()}
# mutate select parameters
selectParameters(mutate = True, **mutate_dict)
if parent != None: inheritance = parent.defineInheritance()
self.parent = parent
self.model = model
if self.parent:
####### parameters already inherited if agent has parent ########
for attr, val in inheritance.items():
setattr(self, attr, val)
setStocks()
# randomly set target, will be redifined in according to breed
# parameters in the following period
setTargets()
# inherited values are mutated vals in dictionary if mutation is on
if self.model.mutate:
mutate()
else:
self.selectBreedParameters(mutate = False,
parent = self.parent,
herding = False)
else:
selectParameters()
# allocate each .good to agent within quantity in range specified by
# randomly choose initial target good
self.col = col
self.row = row
self.dx = 0
self.dy = 0
self.id = ID
self.reproduced = False
###############################################################################
def setBreedBasic(self, herder):
self.basic = True
self.switcher = False
self.arbitrageur = False
self.herder = herder
def selectBreedParameters(self, mutate, parent, herding = False,
partner = None):
def generateBreedParameters():
if breed == "basic":
self.target = "sugar"
self.not_target = "water"
# if breed == "switcher":
# switch_min = 5 if not mutate or"switch_rate" not in inheritance else\
# int(inheritance["switch_rate"] / (1 + self.mutate_rate))
# switch_max = 50 if not mutate or "switch_rate" not in inheritance else\
# int(inheritance["switch_rate"] * (1 + self.mutate_rate))
# self.switch_rate = random.randint(switch_min, switch_max)
# self.periods_to_switch = self.switch_rate
# start switcher with random target
if breed == "arbitrageur":
# track past exchange prices
# if average prices is below price agent believes is correct,
min_denominator = 10 if not mutate or "present_price_weight" not in inheritance else\
int(inheritance["present_price_weight"] / (1 + self.mutate_rate))
max_denominator = 100 if not mutate or "present_price_weight" not in inheritance else\
int(inheritance["present_price_weight"] * (1 + self.mutate_rate))
self.present_price_weight = random.randint(
min_denominator, max_denominator)
self.expected_price = self.reservation_demand["sugar"]["price"]
if breed == "herder":
self.wealthiest = parent if inheritance else self
self.top_wealth = parent.wealth if inheritance else self.wealth
# print("set attributes new:", breed)
def copyPartnerParameters():
# if copied breed and missing parameter value, draw from partner
if getattr(self, breed):
# if breed == "switcher":
# if not hasattr(self, 'switch_rate'):
# self.switch_rate = partner.switch_rate
# self.periods_to_switch = self.switch_rate
# self.basic = False
if breed == "herder":
if not hasattr(self, "top_wealth"):
self.top_wealth = partner.wealth
self.wealthiest = partner
if breed == "arbitrageur":
if not hasattr(self, "expected_price"):
self.expected_price = partner.expected_price
if not hasattr(self, "present_price_weight"):
self.present_price_weight = partner.present_price_weight
# if not
# self.target = partner.target
# self.not_target = partner.not_target
for breed in self.model.breeds:
if getattr(self, breed):
inheritance = parent.defineInheritance() if parent else ""
# those who change breed due to herding need only need to fill missing
# parameter values
if herding:
copyPartnerParameters()
else:
generateBreedParameters()
def defineInheritance(self):
# use attributes to define inheritance
copy_attributes = copy.copy(vars(self))
# redefine "good" or else values are drawn from parent for children
for key in self.model.drop_attr:
try:
del copy_attributes[key]
except:
continue
return copy_attributes
def updateParams(self):
def setTargetGood():
self.wealth = sum((getattr(self,good) / self.model.consumption_rate[good] for good in self.model.goods))
if self.herder:
if self.wealth > self.top_wealth:
self.wealthiest = self
if self.wealthiest != self:
self.top_wealth *= .999
# let exchange target be determined by reservation demand
# if shortage of both goods, choose randomly
good1 = random.choice(self.model.goods)
good2 = "water" if good1 == "sugar" else "sugar"
# if self.basic and not self.arbitrageur:
if self.switcher:
if getattr(self,good1) < self.reservation_demand[good1]["quantity"]\
and getattr(self,good2) < self.reservation_demand[good2]["quantity"]:
self.target, self.not_target = good1, good2
# in case to level of wealth falls, as it does one population
# grows, allow top_wealth to decay
elif getattr(self,good1) < self.reservation_demand[good1]["quantity"]\
and getattr(self,good2) > self.reservation_demand[good2]["quantity"]:
self.target, self.not_target = good1, good2
elif getattr(self,good2) < self.reservation_demand[good2]["quantity"]\
and getattr(self,good1) > self.reservation_demand[good1]["quantity"]:
self.target, self.not_target | |
else:
rec_bet = 'Avoid'
# Test Recommended bet
if total_diff >= 0:
percentage_chance = total_diff
if 60 > percentage_chance >= 0:
test_rec_bet = '1x'
else:
test_rec_bet = '1'
else:
percentage_chance = abs(total_diff)
if 60 > percentage_chance > 0:
test_rec_bet = '2x'
else:
test_rec_bet = '2'
main_cur.execute("UPDATE match_analysis SET home_points_total = ? WHERE fid = ?", (home_total, fid))
main_cur.execute("UPDATE match_analysis SET away_points_total = ? WHERE fid = ?", (away_total, fid))
main_cur.execute("UPDATE match_analysis SET rec_bet = ? WHERE fid = ?", (rec_bet, fid))
main_cur.execute("UPDATE match_analysis SET percentage_chance = ? WHERE fid = ?", (percentage_chance, fid))
main_cur.execute("UPDATE match_analysis SET TEST_rec_bet = ? WHERE fid = ?", (test_rec_bet, fid))
main_conn.commit()
# ------------------------------------Bet Tickets-------------------------------------------
def percentage_rec_calc():
"""Calculate Percentage recommendation for each match"""
main_cur.execute("SELECT match_analysis.fid, percentage_chance, league_winperc FROM match_analysis "
"INNER JOIN leagues ON match_analysis.country_league_name = leagues.country_league_name "
"WHERE bet_result isnull")
data = main_cur.fetchall()
for fid, percentage_chance, league_winperc in data:
percentage_rec = (percentage_chance + league_winperc) / 2
main_cur.execute("UPDATE match_analysis SET percentage_rec = ? WHERE fid = ?", (percentage_rec, fid))
main_conn.commit()
def view_all_recommended():
"""View all recommended bets"""
main_cur.execute("SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec "
"FROM match_analysis where bet_result isnull AND rec_bet <> 'Avoid' "
"AND match_datetime > datetime('now') ORDER BY percentage_rec DESC")
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " + rec_bet
+ " | " + str(round(percentage_rec, 2)))
def odds_list():
"""Returns the recommended odds for each match"""
# draw matches from database
main_cur.execute("SELECT fid, rec_bet, home_win, home_draw, away_draw, away_win "
"FROM match_analysis where bet_result isnull AND rec_bet <> 'Avoid' "
"AND match_datetime >= datetime('now', 'localtime') ORDER BY percentage_rec DESC")
data = main_cur.fetchall()
odds_data_list = []
# determine odds and place in dictionary
for fid, rec_bet, home_win, home_draw, away_draw, away_win in data:
bet_odds = ' '
if rec_bet == '1':
bet_odds = float(home_win)
elif rec_bet == '1x':
bet_odds = float(home_draw)
elif rec_bet == '2x':
bet_odds = float(away_draw)
elif rec_bet == '2':
bet_odds = float(away_win)
if bet_odds > 1.14:
odds_data = (fid, bet_odds)
odds_data_list.append(odds_data)
else:
continue
return odds_data_list
def ticket_generation(acca_limit, odds_listing):
"""Create match tickets automatically"""
acca = 1 # Default multiplier
ticket_number = 1
print("--------------------------- Ticket", ticket_number, "----------------------------------")
for odds in odds_listing:
acca = acca * odds[1]
if acca <= acca_limit:
main_cur.execute(
"SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec FROM match_analysis where fid = ? ", (odds[0],))
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " +
rec_bet + " | " + str(odds[1]) + " | " + str(round(percentage_rec, 2)))
else:
print('Tickets Odds:', round((acca / odds[1]), 2))
acca = 1
ticket_number += 1
print(' ')
print("--------------------------- Ticket", ticket_number, "----------------------------------")
acca = acca * odds[1]
main_cur.execute(
"SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec "
"FROM match_analysis where fid = ? ", (odds[0],))
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " +
rec_bet + " | " + str(odds[1]) + " | " + str(round(percentage_rec, 2)))
print('Tickets Odds:', round(acca, 2))
# ------------------------------------Results-----------------------------------------------
def match_results():
"""Gather match results"""
print('Collecting match results...')
main_cur.execute('SELECT fid, match_url FROM match_analysis WHERE match_result ISNULL')
data = main_cur.fetchall()
match_count = 0
for fid, match_url in data:
driver.get(match_url)
match_status = driver.find_element_by_xpath('//*[@id="flashscore"]/div[1]/div[2]/div[2]'
).get_attribute('textContent')
if match_status == 'Finished':
home_score = driver.find_element_by_xpath('//*[@id="event_detail_current_result"]/span[1]'
).get_attribute('textContent')
away_score = driver.find_element_by_xpath('//*[@id="event_detail_current_result"]/span[2]/span[2]'
).get_attribute('textContent')
match_result = int(home_score) - int(away_score)
if match_result > 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Home Win' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
elif match_result < 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Away Win' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
elif match_result == 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Draw' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
else:
print("There was an error retrieving a match result", match_url)
continue
print("Number of match results retrieved:", match_count)
elif match_status == 'Postponed':
main_cur.execute('DELETE FROM match_analysis WHERE fid = ?', (fid,))
main_conn.commit()
def bet_result():
"""Calculate the bet result"""
main_cur.execute("SELECT fid, rec_bet, match_result FROM match_analysis WHERE bet_result ISNULL AND match_result IS"
" NOT NULL")
data = main_cur.fetchall()
for fid, rec_bet, match_result in data:
if match_result == 'Home Win' and (rec_bet == '1' or rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (rec_bet == '2' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (rec_bet == '1x' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and (rec_bet == '2' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (rec_bet == '1' or rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (rec_bet == '1' or rec_bet == '2'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
else:
print('There was an error processing the bet result', fid, rec_bet, match_result)
continue
def test_bet_result():
"""Calculate the result of the test bet"""
main_cur.execute("SELECT fid, TEST_rec_bet, match_result FROM match_analysis WHERE TEST_bet_result "
"ISNULL AND match_result IS NOT NULL")
data = main_cur.fetchall()
for fid, Test_rec_bet, match_result in data:
if match_result == 'Home Win' and (Test_rec_bet == '1' or Test_rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (Test_rec_bet == '2' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (Test_rec_bet == '1x' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and (Test_rec_bet == '2' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (Test_rec_bet == '1' or Test_rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (Test_rec_bet == '1' or Test_rec_bet == '2'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
else:
print('There was an error processing the bet result', fid, Test_rec_bet, match_result)
continue
def view_match_results_5days():
"""View match results for the last 5 days"""
main_cur.execute("select match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec, match_result, bet_result from match_analysis "
"where match_datetime between datetime('now', '-5 days') and datetime('now') "
"and bet_result is not null ORDER BY match_datetime DESC")
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec, mr, br in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " + rec_bet
+ " | " + str(round(percentage_rec, 2)) + " | " + mr + " | " + br)
# ------------------------------------Match Archive-----------------------------------------
def match_archive_insert():
"""Insert completed matches into the match archive"""
main_cur.execute('INSERT OR IGNORE INTO match_archive(match_datetime, country_name, league_name, '
'country_league_name, home_team_name, home_team_id, away_team_name, away_team_ID, home_win, '
'home_draw, away_draw, away_win, home_position, away_position, total_clubs, home_matches_played, '
'away_matches_played, home_matches_won, away_matches_won, home_matches_draw, away_matches_draw, '
'home_matches_loss, away_matches_loss, home_goal_diff, away_goal_diff, home_team_form, '
'away_team_form, home_team_name_rank, away_team_name_rank, home_position_rank, away_position_rank,'
' home_form_rank, away_form_rank, home_gd_rank, away_gd_rank, home_points_total, '
'away_points_total, rec_bet, percentage_chance, percentage_rec, match_result, bet_result, '
'TEST_rec_bet, TEST_bet_result, match_url, league_url, league_winperc, single_chnc_margin, '
| |
types=frozenset(['length']),
const=empty_list),
'title': SVGAttribute('title', anim=False,
types=frozenset(['string']),
const=empty_list),
'to': SVGAttribute('to', anim=False,
types=frozenset(['string']),
const=empty_list),
'transform': SVGAttribute('transform', anim=True,
types=frozenset(['transform-list']),
const=empty_list),
'type': SVGMultiAttribute({
'* feColorMatrix': SVGAttribute(
'type', anim=True,
types=empty_list,
const=frozenset(['matrix', 'saturate', 'hueRotate', 'luminanceToAlpha'])),
'feTurbulence': SVGAttribute(
'type', anim=True,
types=empty_list,
const=frozenset(['fractalNoise', 'turbulence'])),
'feFuncR feFuncG feFuncB feFuncA': SVGAttribute(
'type', anim=True,
types=empty_list,
const=frozenset(['identity', 'table', 'discrete', 'linear', 'gamma'])),
'script style': SVGAttribute(
'type', anim=False,
types=frozenset(['content-type']),
const=empty_list),
'animateTransform': SVGAttribute(
'type', anim=False,
types=empty_list,
const=frozenset(['translate', 'scale', 'rotate', 'skewX', 'skewY'])),
}),
'u1': SVGAttribute('u1', anim=False,
types=frozenset(['string']),
const=empty_list),
'u2': SVGAttribute('u2', anim=False,
types=frozenset(['string']),
const=empty_list),
'underline-position': SVGAttribute('underline-position', anim=False,
types=frozenset(['number']),
const=empty_list),
'underline-thickness': SVGAttribute('underline-thickness', anim=False,
types=frozenset(['number']),
const=empty_list),
'unicode': SVGAttribute('unicode', anim=False,
types=frozenset(['string']),
const=empty_list),
'unicode-bidi': SVGAttribute('unicode-bidi', anim=False,
types=empty_list,
const=frozenset(['embed', 'inherit', 'bidi-override', 'normal'])),
'unicode-range': SVGAttribute('unicode-range', anim=False,
types=frozenset(['string']),
const=empty_list),
'units-per-em': SVGAttribute('units-per-em', anim=False,
types=frozenset(['number']),
const=empty_list),
'v-alphabetic': SVGAttribute('v-alphabetic', anim=False,
types=frozenset(['number']),
const=empty_list),
'v-hanging': SVGAttribute('v-hanging', anim=False,
types=frozenset(['number']),
const=empty_list),
'v-ideographic': SVGAttribute('v-ideographic', anim=False,
types=frozenset(['number']),
const=empty_list),
'v-mathematical': SVGAttribute('v-mathematical', anim=False,
types=frozenset(['number']),
const=empty_list),
'values': SVGMultiAttribute({
'*': SVGAttribute(
'values', anim=False,
types=frozenset(['semicolon-list']),
const=empty_list),
'feColorMatrix': SVGAttribute(
'values', anim=True,
types=frozenset(['list-of-number']),
const=empty_list),
}),
'version': SVGAttribute('version', anim=False,
types=empty_list,
const=frozenset(['1.1', '1.2'])),
'vert-adv-y': SVGAttribute('vert-adv-y', anim=False,
types=frozenset(['number']),
const=empty_list),
'vert-origin-x': SVGAttribute('vert-origin-x', anim=False,
types=frozenset(['number']),
const=empty_list),
'vert-origin-y': SVGAttribute('vert-origin-y', anim=False,
types=frozenset(['number']),
const=empty_list),
'viewBox': SVGAttribute('viewBox', anim=True,
types=frozenset(['four-numbers']),
const=empty_list),
'viewTarget': SVGAttribute('viewTarget', anim=False,
types=frozenset(['list-of-XML-Name']),
const=empty_list),
'visibility': SVGAttribute('visibility', anim=True,
types=empty_list,
const=frozenset(['visible', 'hidden', 'collapse', 'inherit'])),
'width': SVGAttribute('width', anim=True,
types=frozenset(['length']),
const=empty_list),
'widths': SVGAttribute('widths', anim=False,
types=frozenset(['string']),
const=empty_list),
'word-spacing': SVGAttribute('word-spacing', anim=True,
types=frozenset(['length']),
const=frozenset(['inherit', 'normal'])),
'writing-mode': SVGAttribute('writing-mode', anim=False,
types=empty_list,
const=frozenset(['rl-tb', 'lr', 'rl', 'tb-rl', 'lr-tb', 'inherit', 'tb'])),
'x': SVGMultiAttribute({
'*': SVGAttribute(
'x', anim=True,
types=frozenset(['coordinate']),
const=empty_list),
'altGlyph text tref tspan': SVGAttribute(
'x', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'fePointLight feSpotLight glyphRef': SVGAttribute(
'x', anim=True,
types=frozenset(['number']),
const=empty_list),
}),
'x-height': SVGAttribute('x-height', anim=False,
types=frozenset(['number']),
const=empty_list),
'x1': SVGAttribute('x1', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'x2': SVGAttribute('x2', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'xChannelSelector': SVGAttribute('xChannelSelector', anim=True,
types=empty_list,
const=frozenset(['A', 'B', 'R', 'G'])),
'xlink:actuate': SVGMultiAttribute({
'*': SVGAttribute(
'xlink:actuate', anim=False,
types=empty_list,
const=frozenset(['onLoad'])),
'a': SVGAttribute(
'xlink:actuate', anim=False,
types=empty_list,
const=frozenset(['onRequest'])),
}),
'xlink:arcrole': SVGAttribute('xlink:arcrole', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xlink:href': SVGAttribute('xlink:href', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xlink:role': SVGAttribute('xlink:role', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xlink:show': SVGMultiAttribute({
'*': SVGAttribute(
'xlink:show', anim=False,
types=empty_list,
const=frozenset(['other', 'new', 'replace', 'none', 'embed'])),
'a': SVGAttribute(
'xlink:show', anim=False,
types=empty_list,
const=frozenset(['new', 'replace'])),
}),
'xlink:title': SVGAttribute('xlink:title', anim=False,
types=frozenset(['string']),
const=empty_list),
'xlink:type': SVGAttribute('xlink:type', anim=False,
types=empty_list,
const=frozenset(['simple'])),
'xmlns': SVGAttribute('xmlns', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xmlns:xlink': SVGAttribute('xmlns:xlink', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xmlns:ev': SVGAttribute('xmlns:ev', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xml:base': SVGAttribute('xml:base', anim=False,
types=frozenset(['IRI']),
const=empty_list),
'xml:lang': SVGAttribute('xml:lang', anim=False,
types=frozenset(['name']),
const=empty_list),
'xml:space': SVGAttribute('xml:space', anim=False,
types=empty_list,
const=frozenset(['default', 'preserve'])),
'y': SVGMultiAttribute({
'*': SVGAttribute(
'y', anim=True,
types=frozenset(['coordinate']),
const=empty_list),
'altGlyph text tref tspan': SVGAttribute(
'y', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'fePointLight feSpotLight glyphRef': SVGAttribute(
'y', anim=True,
types=frozenset(['number']),
const=empty_list),
}),
'y1': SVGAttribute('y1', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'y2': SVGAttribute('y2', anim=True,
types=frozenset(['list-of-coordinate']),
const=empty_list),
'yChannelSelector': SVGAttribute('yChannelSelector', anim=True,
types=empty_list,
const=frozenset(['A', 'B', 'R', 'G'])),
'z': SVGAttribute('z', anim=True,
types=frozenset(['number']),
const=empty_list),
'zoomAndPan': SVGAttribute('zoomAndPan', anim=False,
types=empty_list,
const=frozenset(['disable', 'magnify'])),
}
presentation_attributes = frozenset([ "alignment-baseline", "baseline-shift",
"clip", "clip-path", "clip-rule", "color", "color-interpolation",
"color-interpolation-filters", "color-profile", "color-rendering", "cursor",
"direction", "display", "dominant-baseline", "enable-background",
"fill", "fill-opacity", "fill-rule", "filter", "flood-color",
"flood-opacity", "font-family", "font-size", "font-size-adjust",
"font-stretch", "font-style", "font-variant", "font-weight",
"glyph-orientation-horizontal", "glyph-orientation-vertical",
"image-rendering", "kerning", "letter-spacing", "lighting-color", "marker",
"marker-end", "marker-mid", "marker-start", "mask", "opacity",
"overflow", "pointer-events", "shape-rendering", "stop-color",
"stop-opacity", "stroke", "stroke-dasharray", "stroke-dashoffset",
"stroke-linecap", "stroke-linejoin", "stroke-miterlimit",
"stroke-opacity", "stroke-width", "text-anchor", "text-decoration",
"text-rendering", "unicode-bidi", "visibility", "word-spacing",
"writing-mode"])
elements = {
'a': SVGElement('a',
attributes=frozenset(['xlink:title', 'xml:base', 'onmouseup', 'onmouseout', 'requiredExtensions', 'onfocusout', 'xml:space', 'xlink:href', 'systemLanguage', 'onmouseover', 'xlink:type', 'externalResourcesRequired', 'id', 'xlink:actuate', 'onload', 'style', 'xlink:show', 'target', 'onactivate', 'onmousedown', 'transform', 'class', 'xlink:role', 'requiredFeatures', 'xml:lang', 'onmousemove', 'xmlns:xlink', 'onclick', 'xlink:arcrole', 'onfocusin']),
properties=presentation_attributes,
children=frozenset(['set', 'text', 'image', 'font-face', 'polyline', 'marker', 'animate', 'font', 'color-profile', 'ellipse', 'cursor', 'style', 'polygon', 'title', 'pattern', 'circle', 'radialGradient', 'metadata', 'defs', 'symbol', 'use', 'animateMotion', 'animateColor', 'path', 'line', 'rect', 'desc', 'a', 'g', 'svg', 'script', 'mask', 'altGlyphDef', 'filter', 'switch', 'animateTransform', 'linearGradient', 'clipPath', 'foreignObject', 'view'])),
'altGlyph': SVGElement('altGlyph',
attributes=frozenset(['requiredExtensions', 'onfocusout', 'xml:space', 'xlink:href', 'id', 'onload', 'style', 'onmousedown', 'onmousemove', 'onclick', 'xlink:arcrole', 'onfocusin', 'xml:base', 'onmouseup', 'onmouseout', 'format', 'xlink:title', 'systemLanguage', 'onmouseover', 'dx', 'dy', 'xlink:type', 'externalResourcesRequired', 'class', 'xlink:actuate', 'xlink:show', 'onactivate', 'glyphRef', 'xlink:role', 'requiredFeatures', 'xml:lang', 'y', 'x', 'rotate']),
properties=presentation_attributes,
children=empty_list),
'altGlyphDef': SVGElement('altGlyphDef',
attributes=frozenset(['xml:space', 'xml:lang', 'xml:base', 'id']),
properties=empty_list,
children=frozenset(['*'])),
'altGlyphItem': SVGElement('altGlyphItem',
attributes=frozenset(['xml:space', 'xml:lang', 'xml:base', 'id']),
properties=empty_list,
children=frozenset(['*'])),
'animate': SVGElement('animate',
attributes=frozenset(['requiredExtensions', 'from', 'repeatCount', 'xml:space', 'xlink:href', 'xlink:type', 'attributeType', 'repeatDur', 'id', 'fill', 'onload', 'additive', 'calcMode', 'min', 'keySplines', 'to', 'dur', 'xlink:arcrole', 'onend', 'begin', 'xml:base', 'max', 'xlink:title', 'attributeName', 'onbegin', 'systemLanguage', 'accumulate', 'end', 'externalResourcesRequired', 'by', 'restart', 'xlink:actuate', 'xlink:show', 'xlink:role', 'requiredFeatures', 'xml:lang', 'values', 'keyTimes', 'onrepeat']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'title'])),
'animateColor': SVGElement('animateColor',
attributes=frozenset(['requiredExtensions', 'from', 'repeatCount', 'xml:space', 'xlink:href', 'xlink:type', 'attributeType', 'repeatDur', 'id', 'fill', 'onload', 'additive', 'calcMode', 'min', 'keySplines', 'to', 'dur', 'xlink:arcrole', 'onend', 'begin', 'xml:base', 'max', 'xlink:title', 'attributeName', 'onbegin', 'systemLanguage', 'accumulate', 'end', 'externalResourcesRequired', 'by', 'restart', 'xlink:actuate', 'xlink:show', 'xlink:role', 'requiredFeatures', 'xml:lang', 'values', 'keyTimes', 'onrepeat']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'title'])),
'animateMotion': SVGElement('animateMotion',
attributes=frozenset(['origin', 'requiredExtensions', 'from', 'repeatCount', 'xml:space', 'xlink:href', 'xlink:type', 'repeatDur', 'id', 'fill', 'onload', 'additive', 'calcMode', 'min', 'keySplines', 'to', 'dur', 'xlink:arcrole', 'onend', 'begin', 'xlink:title', 'xml:base', 'max', 'end', 'keyPoints', 'onbegin', 'systemLanguage', 'accumulate', 'path', 'externalResourcesRequired', 'by', 'restart', 'xlink:actuate', 'xlink:show', 'xlink:role', 'requiredFeatures', 'xml:lang', 'values', 'keyTimes', 'onrepeat', 'rotate']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'mpath', 'title'])),
'animateTransform': SVGElement('animateTransform',
attributes=frozenset(['requiredExtensions', 'from', 'repeatCount', 'xml:space', 'xlink:href', 'xlink:type', 'attributeType', 'repeatDur', 'id', 'fill', 'onload', 'additive', 'calcMode', 'min', 'keySplines', 'to', 'dur', 'xlink:arcrole', 'type', 'onend', 'begin', 'xml:base', 'max', 'xlink:title', 'attributeName', 'onbegin', 'systemLanguage', 'accumulate', 'end', 'externalResourcesRequired', 'by', 'restart', 'xlink:actuate', 'xlink:show', 'xlink:role', 'requiredFeatures', 'xml:lang', 'values', 'keyTimes', 'onrepeat']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'title'])),
'circle': SVGElement('circle',
attributes=frozenset(['xml:base', 'onmouseup', 'onmouseout', 'requiredExtensions', 'onfocusout', 'xml:space', 'cy', 'cx', 'onmouseover', 'externalResourcesRequired', 'id', 'onload', 'style', 'onactivate', 'onmousedown', 'transform', 'class', 'requiredFeatures', 'r', 'onmousemove', 'onclick', 'xml:lang', 'onfocusin', 'systemLanguage']),
properties=presentation_attributes,
children=frozenset(['animateMotion', 'set', 'title', 'animateColor', 'animateTransform', 'animate', 'metadata', 'desc'])),
'clipPath': SVGElement('clipPath',
attributes=frozenset(['clipPathUnits', 'style', 'xml:base', 'requiredExtensions', 'xml:space', 'transform', 'id', 'requiredFeatures', 'xml:lang', 'externalResourcesRequired', 'class', 'systemLanguage']),
properties=presentation_attributes,
children=frozenset(['set', 'animate', 'text', 'use', 'animateColor', 'polyline', 'path', 'line', 'ellipse', 'rect', 'desc', 'animateMotion', 'polygon', 'title', 'animateTransform', 'circle', 'metadata'])),
'color-profile': SVGElement('color-profile',
attributes=frozenset(['xlink:actuate', 'xlink:show', 'xml:base', 'name', 'rendering-intent', 'xml:space', 'xlink:href', 'xlink:role', 'xml:lang', 'xlink:type', 'xlink:title', 'xlink:arcrole', 'local', 'id']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'title'])),
'cursor': SVGElement('cursor',
attributes=frozenset(['xlink:title', 'xml:base', 'requiredExtensions', 'xml:space', 'xlink:href', 'systemLanguage', 'xlink:type', 'externalResourcesRequired', 'id', 'xlink:actuate', 'xlink:show', 'xlink:role', 'requiredFeatures', 'xml:lang', 'y', 'x', 'xlink:arcrole']),
properties=empty_list,
children=frozenset(['desc', 'metadata', 'title'])),
'defs': SVGElement('defs',
attributes=frozenset(['xml:base', 'onmouseup', 'onmouseout', 'requiredExtensions', 'onfocusout', 'xml:space', 'systemLanguage', 'onmouseover', 'externalResourcesRequired', 'class', 'onload', 'style', 'onactivate', 'onmousedown', 'transform', 'id', 'requiredFeatures', 'xml:lang', 'onmousemove', 'onclick', 'onfocusin']),
properties=presentation_attributes,
children=frozenset(['set', 'text', 'image', 'font-face', 'polyline', 'marker', 'animate', 'font', 'color-profile', 'ellipse', 'cursor', 'style', 'polygon', 'title', 'pattern', 'circle', 'radialGradient', 'metadata', 'defs', 'symbol', 'use', 'animateMotion', 'animateColor', 'path', 'line', 'rect', 'desc', 'a', 'g', 'svg', 'script', 'mask', 'altGlyphDef', 'filter', 'switch', 'animateTransform', 'linearGradient', 'clipPath', 'foreignObject', 'view'])),
'desc': SVGElement('desc',
attributes=frozenset(['style', 'xml:lang', 'xml:base', 'xml:space', 'class', 'id']),
properties=empty_list,
children=frozenset(['*'])),
'ellipse': SVGElement('ellipse',
attributes=frozenset(['xml:base', 'onmouseup', 'onmouseout', 'requiredExtensions', 'onfocusout', 'xml:space', 'ry', 'cy', 'cx', 'onmouseover', 'externalResourcesRequired', 'id', 'onload', 'style', 'onactivate', 'onmousedown', 'rx', 'transform', 'class', 'requiredFeatures', 'systemLanguage', 'onmousemove', 'onclick', 'xml:lang', 'onfocusin']),
properties=presentation_attributes,
children=frozenset(['animateMotion', 'set', 'title', 'animateColor', 'animateTransform', 'animate', 'desc', 'metadata'])),
'feBlend': SVGElement('feBlend',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'in2', 'height', 'width', 'xml:lang', 'id', 'result', 'in', 'y', 'x', 'class', 'mode']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feColorMatrix': SVGElement('feColorMatrix',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'id', 'height', 'width', 'xml:lang', 'values', 'result', 'in', 'y', 'x', 'type', 'class']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feComponentTransfer': SVGElement('feComponentTransfer',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'height', 'width', 'xml:lang', 'id', 'result', 'in', 'y', 'x', 'class']),
properties=presentation_attributes,
children=frozenset(['feFuncA', 'feFuncR', 'feFuncB', 'feFuncG'])),
'feComposite': SVGElement('feComposite',
attributes=frozenset(['xml:base', 'xml:space', 'in2', 'height', 'result', 'in', 'operator', 'class', 'style', 'width', 'id', 'k3', 'k2', 'k1', 'xml:lang', 'k4', 'y', 'x']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feConvolveMatrix': SVGElement('feConvolveMatrix',
attributes=frozenset(['xml:base', 'xml:space', 'kernelUnitLength', 'edgeMode', 'height', 'bias', 'result', 'in', 'preserveAlpha', 'id', 'style', 'divisor', 'kernelMatrix', 'width', 'xml:lang', 'targetX', 'targetY', 'y', 'x', 'class2', 'order']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feDiffuseLighting': SVGElement('feDiffuseLighting',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'diffuseConstant', 'height', 'kernelUnitLength', 'width', 'xml:lang', 'id', 'result', 'in', 'y', 'x', 'class', 'surfaceScale']),
properties=presentation_attributes,
children=frozenset(['fePointLight', 'feSpotLight', 'title', 'metadata', 'feDistantLight', 'desc'])),
'feDisplacementMap': SVGElement('feDisplacementMap',
attributes=frozenset(['xml:base', 'xml:space', 'yChannelSelector', 'in2', 'height', 'result', 'in', 'class', 'style', 'scale', 'id', 'width', 'xml:lang', 'xChannelSelector', 'y', 'x']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feDistantLight': SVGElement('feDistantLight',
attributes=frozenset(['xml:lang', 'elevation', 'azimuth', 'xml:base', 'xml:space', 'id']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feFlood': SVGElement('feFlood',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'height', 'width', 'xml:lang', 'id', 'result', 'y', 'x', 'class']),
properties=presentation_attributes,
children=frozenset(['animate', 'set', 'animateColor'])),
'feFuncA': SVGElement('feFuncA',
attributes=frozenset(['slope', 'xml:base', 'tableValues', 'xml:space', 'xml:lang', 'intercept', 'amplitude', 'offset', 'type', 'id', 'exponent']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feFuncB': SVGElement('feFuncB',
attributes=frozenset(['slope', 'xml:base', 'tableValues', 'xml:space', 'xml:lang', 'intercept', 'amplitude', 'offset', 'type', 'id', 'exponent']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feFuncG': SVGElement('feFuncG',
attributes=frozenset(['slope', 'xml:base', 'tableValues', 'xml:space', 'xml:lang', 'intercept', 'amplitude', 'offset', 'type', 'id', 'exponent']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feFuncR': SVGElement('feFuncR',
attributes=frozenset(['slope', 'xml:base', 'tableValues', 'xml:space', 'xml:lang', 'intercept', 'amplitude', 'offset', 'type', 'id', 'exponent']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feGaussianBlur': SVGElement('feGaussianBlur',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'height', 'width', 'xml:lang', 'id', 'result', 'in', 'y', 'x', 'stdDeviation', 'class']),
properties=presentation_attributes,
children=frozenset(['animate', 'set'])),
'feImage': SVGElement('feImage',
attributes=frozenset(['xlink:title', 'xml:base', 'xml:space', 'xlink:href', 'height', 'result', 'xlink:type', 'externalResourcesRequired', 'preserveAsectRatio', 'class', 'xlink:actuate', 'style', 'xlink:show', 'id', 'xlink:role', 'width', 'xml:lang', 'y', 'x', 'xlink:arcrole']),
properties=presentation_attributes,
children=frozenset(['animate', 'set', 'animateColor'])),
'feMerge': SVGElement('feMerge',
attributes=frozenset(['style', 'xml:base', 'xml:space', 'height', 'width', 'xml:lang', 'id', 'result', 'y', 'x', 'class']),
properties=presentation_attributes,
children=frozenset(['animate', 'set', 'feMergeNode'])),
'feMergeNode': SVGElement('feMergeNode',
attributes=frozenset(['xml:space', 'xml:lang', 'xml:base', 'id', 'in']),
properties=empty_list,
children=frozenset(['animate', 'set'])),
'feMorphology': SVGElement('feMorphology',
attributes=frozenset(['style', 'xml:base', 'y', 'xml:space', 'id', 'height', 'width', 'xml:lang', 'radius', 'result', 'in', 'operator', 'x', 'class']),
properties=presentation_attributes,
| |
#!/usr/bin/env python3
import pickle
import os
import numpy as np
def get_sweep_parameters(parameters, env_config, index):
"""
Gets the parameters for the hyperparameter sweep defined by the index.
Each hyperparameter setting has a specific index number, and this function
will get the appropriate parameters for the argument index. In addition,
this the indices will wrap around, so if there are a total of 10 different
hyperparameter settings, then the indices 0 and 10 will return the same
hyperparameter settings. This is useful for performing loops.
For example, if you had 10 hyperparameter settings and you wanted to do
10 runs, the you could just call this for indices in range(0, 10*10). If
you only wanted to do runs for hyperparameter setting i, then you would
use indices in range(i, 10, 10*10)
Parameters
----------
parameters : dict
The dictionary of parameters, as found in the agent's json
configuration file
env_config : dict
The environment configuration dictionary, as found in the environment's
json configuration file
index : int
The index of the hyperparameters configuration to return
Returns
-------
dict, int
The dictionary of hyperparameters to use for the agent and the total
number of combinations of hyperparameters (highest possible unique
index)
"""
out_params = {}
out_params["gamma"] = env_config["gamma"]
accum = 1
for key in parameters:
num = len(parameters[key])
out_params[key] = parameters[key][(index // accum) % num]
accum *= num
return (out_params, accum)
def get_sweep_num(parameters):
"""
Similar to get_sweep_parameters but only returns the total number of
hyperparameter combinations. This number is the total number of distinct
hyperparameter settings. If this function returns k, then there are k
distinct hyperparameter settings, and indices 0 and k refer to the same
distinct hyperparameter setting.
Parameters
----------
parameters : dict
The dictionary of parameters, as found in the agent's json
configuration file
Returns
-------
int
The number of distinct hyperparameter settings
"""
accum = 1
for key in parameters:
num = len(parameters[key])
accum *= num
return accum
def get_hyperparam_indices(data, hp_name, hp_value):
"""
Gets all hyperparameter indices that have the hyperparameter hp_name
having the value hp_value.
Parameters
----------
data : dict
The data dictionary generated from running main.py
hp_name : str
The name of the hyperparameter to check the value of
hp_value : object
The value that the hyperparameter should have in each hyperparameter
settings index
Returns
-------
list of int
The hyperparameter settings that have the argument hyperparameter
hp_name having the argument value hp_value
"""
agent_param = data["experiment"]["agent"]["parameters"]
env_config = data["experiment"]["environment"]
hp_indices = []
for i in range(get_sweep_num(agent_param)):
# Get the hyperparameters for each hyperparameter setting
hp_setting = get_sweep_parameters(agent_param, env_config, i)[0]
if hp_setting[hp_name] == hp_value:
hp_indices.append(i)
return hp_indices
def get_varying_single_hyperparam(data, hp_name):
"""
Gets the hyperparameter indices where only a single hyperparameter is
varying and all other hyperparameters remain constant.
Parameters
----------
data : dict
The data dictionary generated from running main.py
hp_name : str
The name of the hyperparameter to vary
Returns
-------
n-tuple of m-tuple of int
Gets and returns the hyperparameter indices where only a single
hyperparameter is varying and all others remain constant. The
total number of values that the varying hyperparameter can take on
is m; n is the total number of hyperparameter combinations // m.
For example, if the hyperparameter is the decay rate and it can take
on values in [0.0, 0.1, 0.5] and there are a total of 81 hyperparameter
settings combinations, then m = 3 and n = 81 // 3 = 27
"""
agent_param = data["experiment"]["agent"]["parameters"]
hps = [] # set(range(exp.get_sweep_num(agent_param)))
for hp_value in agent_param[hp_name]:
hps.append(get_hyperparam_indices(data, hp_name, hp_value))
return tuple(zip(*hps))
def get_best_hp(data, type_, after=0):
"""
Gets and returns a list of the hyperparameter settings, sorted by average
return.
Parameters
----------
data : dict
They Python data dictionary generated from running main.py
type_ : str
The type of return by which to compare hyperparameter settings, one of
"train" or "eval"
after : int, optional
Hyperparameters will only be compared by their performance after
training for this many episodes (in continuing tasks, this is the
number of times the task is restarted). For example, if after = -10,
then only the last 10 returns from training/evaluation are taken
into account when comparing the hyperparameters. As usual, positive
values index from the front, and negative values index from the back.
Returns
-------
n-tuple of 2-tuple(int, float)
A tuple with the number of elements equal to the total number of
hyperparameter combinations. Each sub-tuple is a tuple of (hyperparameter
setting number, mean return over all runs and episodes)
"""
if type_ not in ("train", "eval"):
raise ValueError("type_ should be one of 'train', 'eval'")
return_type = "train_episode_rewards" if type_ == "train" \
else "eval_episode_rewards"
mean_returns = []
hp_settings = sorted(list(data["experiment_data"].keys()))
for hp_setting in hp_settings:
hp_returns = []
for run in data["experiment_data"][hp_setting]["runs"]:
hp_returns.append(run[return_type])
hp_returns = np.stack(hp_returns)
# If evaluating, use the mean return over all episodes for each
# evaluation interval. That is, if 10 eval episodes for each evaluation
# the take the average return over all these eval episodes
if type_ == "eval":
hp_returns = hp_returns.mean(axis=-1)
# Calculate the average return over all runs
hp_returns = hp_returns[after:, :].mean(axis=0)
# Calculate the average return over all "episodes"
hp_returns = hp_returns.mean(axis=0)
# Save mean return
mean_returns.append(hp_returns)
# Return the best hyperparam settings in order with the
# mean returns sorted by hyperparmater setting performance
best_hp_settings = np.argsort(mean_returns)
mean_returns = np.array(mean_returns)[best_hp_settings]
return tuple(zip(best_hp_settings, mean_returns))
def combine_runs(data1, data2):
"""
Adds the runs for each hyperparameter setting in data2 to the runs for the
corresponding hyperparameter setting in data1.
Given two data dictionaries, this function will get each hyperparameter
setting and extend the runs done on this hyperparameter setting and saved
in data1 by the runs of this hyperparameter setting and saved in data2.
In short, this function extends the lists
data1["experiment_data"][i]["runs"] by the lists
data2["experiment_data"][i]["runs"] for all i. This is useful if
multiple runs are done at different times, and the two data files need
to be combined.
Parameters
----------
data1 : dict
A data dictionary as generated by main.py
data2 : dict
A data dictionary as generated by main.py
Raises
------
KeyError
If a hyperparameter setting exists in data2 but not in data1. This
signals that the hyperparameter settings indices are most likely
different, so the hyperparameter index i in data1 does not correspond
to the same hyperparameter index in data2. In addition, all other
functions expect the number of runs to be consistent for each
hyperparameter setting, which would be violated in this case.
"""
for hp_setting in data1["experiment_data"]:
if hp_setting not in data2.keys():
# Ensure consistent hyperparam settings indices
raise KeyError("hyperparameter settings are different " +
"between the two experiments")
extra_runs = data2["experiment_data"][hp_setting]["runs"]
data1["experiment_data"][hp_setting]["runs"].extend(extra_runs)
def get_returns(data, type_, ind):
"""
Gets the returns seen by an agent
Gets the online or offline returns seen by an agent trained with
hyperparameter settings index ind.
Parameters
----------
data : dict
The Python data dictionary generated from running main.py
type_ : str
Whether to get the training or evaluation returns, one of 'train',
'eval'
ind : int
Gets the returns of the agent trained with this hyperparameter
settings index
Returns
-------
array_like
The array of returns of the form (N, R, C) where N is the number of
runs, R is the number of times a performance was measured, and C is the
number of returns generated each time performance was measured
(offline >= 1; online = 1). For the online setting, N is the number of
runs, and R is the number of episodes and C = 1. For the offline
setting, N is the number of runs, R is the number of times offline
evaluation was performed, and C is the number of episodes run each
time performance was evaluated offline.
"""
returns = []
if type_ == "eval":
# Get the offline evaluation episode returns per run
for run in data["experiment_data"][ind]["runs"]:
returns.append(run["eval_episode_rewards"])
returns = np.stack(returns)
elif | |
return pulumi.get(self, "deploy_stage_id")
@deploy_stage_id.setter
def deploy_stage_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deploy_stage_id", value)
@property
@pulumi.getter(name="deploymentArguments")
def deployment_arguments(self) -> Optional[pulumi.Input['DeploymentDeploymentArgumentsArgs']]:
"""
Specifies list of arguments passed along with the deployment.
"""
return pulumi.get(self, "deployment_arguments")
@deployment_arguments.setter
def deployment_arguments(self, value: Optional[pulumi.Input['DeploymentDeploymentArgumentsArgs']]):
pulumi.set(self, "deployment_arguments", value)
@property
@pulumi.getter(name="deploymentExecutionProgress")
def deployment_execution_progress(self) -> Optional[pulumi.Input['DeploymentDeploymentExecutionProgressArgs']]:
"""
The execution progress details of a deployment.
"""
return pulumi.get(self, "deployment_execution_progress")
@deployment_execution_progress.setter
def deployment_execution_progress(self, value: Optional[pulumi.Input['DeploymentDeploymentExecutionProgressArgs']]):
pulumi.set(self, "deployment_execution_progress", value)
@property
@pulumi.getter(name="deploymentType")
def deployment_type(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Specifies type for this deployment.
"""
return pulumi.get(self, "deployment_type")
@deployment_type.setter
def deployment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_type", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Deployment display name. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter(name="previousDeploymentId")
def previous_deployment_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the OCID of the previous deployment to be redeployed.
"""
return pulumi.get(self, "previous_deployment_id")
@previous_deployment_id.setter
def previous_deployment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "previous_deployment_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of a project.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the deployment.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"orcl-cloud.free-tier-retained": "true"}`
"""
return pulumi.get(self, "system_tags")
@system_tags.setter
def system_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "system_tags", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
Time the deployment was created. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
Time the deployment was updated. Format defined by [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339).
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
class Deployment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_artifact_override_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeployArtifactOverrideArgumentsArgs']]] = None,
deploy_pipeline_id: Optional[pulumi.Input[str]] = None,
deploy_stage_id: Optional[pulumi.Input[str]] = None,
deployment_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeploymentArgumentsArgs']]] = None,
deployment_type: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
previous_deployment_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Deployment resource in Oracle Cloud Infrastructure Devops service.
Creates a new deployment.
## Import
Deployments can be imported using the `id`, e.g.
```sh
$ pulumi import oci:devops/deployment:Deployment test_deployment "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[pulumi.InputType['DeploymentDeployArtifactOverrideArgumentsArgs']] deploy_artifact_override_arguments: Specifies the list of artifact override arguments at the time of deployment.
:param pulumi.Input[str] deploy_pipeline_id: The OCID of a pipeline.
:param pulumi.Input[str] deploy_stage_id: Specifies the OCID of the stage to be redeployed.
:param pulumi.Input[pulumi.InputType['DeploymentDeploymentArgumentsArgs']] deployment_arguments: Specifies list of arguments passed along with the deployment.
:param pulumi.Input[str] deployment_type: (Updatable) Specifies type for this deployment.
:param pulumi.Input[str] display_name: (Updatable) Deployment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] previous_deployment_id: Specifies the OCID of the previous deployment to be redeployed.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeploymentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Deployment resource in Oracle Cloud Infrastructure Devops service.
Creates a new deployment.
## Import
Deployments can be imported using the `id`, e.g.
```sh
$ pulumi import oci:devops/deployment:Deployment test_deployment "id"
```
:param str resource_name: The name of the resource.
:param DeploymentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeploymentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_artifact_override_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeployArtifactOverrideArgumentsArgs']]] = None,
deploy_pipeline_id: Optional[pulumi.Input[str]] = None,
deploy_stage_id: Optional[pulumi.Input[str]] = None,
deployment_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeploymentArgumentsArgs']]] = None,
deployment_type: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
previous_deployment_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeploymentArgs.__new__(DeploymentArgs)
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["deploy_artifact_override_arguments"] = deploy_artifact_override_arguments
if deploy_pipeline_id is None and not opts.urn:
raise TypeError("Missing required property 'deploy_pipeline_id'")
__props__.__dict__["deploy_pipeline_id"] = deploy_pipeline_id
__props__.__dict__["deploy_stage_id"] = deploy_stage_id
__props__.__dict__["deployment_arguments"] = deployment_arguments
if deployment_type is None and not opts.urn:
raise TypeError("Missing required property 'deployment_type'")
__props__.__dict__["deployment_type"] = deployment_type
__props__.__dict__["display_name"] = display_name
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["previous_deployment_id"] = previous_deployment_id
__props__.__dict__["compartment_id"] = None
__props__.__dict__["deploy_pipeline_artifacts"] = None
__props__.__dict__["deploy_pipeline_environments"] = None
__props__.__dict__["deployment_execution_progress"] = None
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["project_id"] = None
__props__.__dict__["state"] = None
__props__.__dict__["system_tags"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
super(Deployment, __self__).__init__(
'oci:devops/deployment:Deployment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
deploy_artifact_override_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeployArtifactOverrideArgumentsArgs']]] = None,
deploy_pipeline_artifacts: Optional[pulumi.Input[pulumi.InputType['DeploymentDeployPipelineArtifactsArgs']]] = None,
deploy_pipeline_environments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeployPipelineEnvironmentsArgs']]] = None,
deploy_pipeline_id: Optional[pulumi.Input[str]] = None,
deploy_stage_id: Optional[pulumi.Input[str]] = None,
deployment_arguments: Optional[pulumi.Input[pulumi.InputType['DeploymentDeploymentArgumentsArgs']]] = None,
deployment_execution_progress: Optional[pulumi.Input[pulumi.InputType['DeploymentDeploymentExecutionProgressArgs']]] = None,
deployment_type: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
previous_deployment_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
system_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None) -> 'Deployment':
"""
Get an existing Deployment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compartment_id: The OCID of a compartment.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[pulumi.InputType['DeploymentDeployArtifactOverrideArgumentsArgs']] deploy_artifact_override_arguments: Specifies the list of artifact override arguments at the time of deployment.
:param pulumi.Input[pulumi.InputType['DeploymentDeployPipelineArtifactsArgs']] deploy_pipeline_artifacts: List of all artifacts used in the pipeline.
:param pulumi.Input[pulumi.InputType['DeploymentDeployPipelineEnvironmentsArgs']] deploy_pipeline_environments: List of all environments used in the pipeline.
:param pulumi.Input[str] deploy_pipeline_id: The OCID of a pipeline.
:param pulumi.Input[str] deploy_stage_id: Specifies the OCID of the stage to be redeployed.
:param pulumi.Input[pulumi.InputType['DeploymentDeploymentArgumentsArgs']] deployment_arguments: Specifies list of arguments passed along with the deployment.
:param pulumi.Input[pulumi.InputType['DeploymentDeploymentExecutionProgressArgs']] deployment_execution_progress: The execution progress details of a deployment.
:param pulumi.Input[str] deployment_type: (Updatable) Specifies type for this deployment.
:param pulumi.Input[str] display_name: (Updatable) Deployment display name. Avoid entering confidential information.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"bar-key": "value"}`
:param pulumi.Input[str] lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param pulumi.Input[str] previous_deployment_id: Specifies the OCID of the previous deployment to be redeployed.
:param pulumi.Input[str] project_id: The OCID of a project.
:param pulumi.Input[str] state: The current state of the deployment.
:param pulumi.Input[Mapping[str, Any]] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. See [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). | |
<reponame>City-of-Helsinki/hauki
import datetime
from operator import itemgetter
from typing import Tuple
import pytz
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Q
from django.http import Http404
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend
from django_orghierarchy.models import Organization
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiParameter,
OpenApiResponse,
extend_schema,
extend_schema_view,
inline_serializer,
)
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import (
APIException,
NotFound,
PermissionDenied,
ValidationError,
)
from rest_framework.fields import BooleanField, CharField, ListField
from rest_framework.filters import BaseFilterBackend, OrderingFilter
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import clone_request
from rest_framework.response import Response
from .authentication import HaukiSignedAuthData
from .enums import State
from .filters import DatePeriodFilter, TimeSpanFilter, parse_maybe_relative_date_string
from .models import DatePeriod, Resource, Rule, TimeElement, TimeSpan
from .permissions import (
IsMemberOrAdminOfOrganization,
ReadOnlyPublic,
filter_queryset_by_permission,
)
from .serializers import (
DailyOpeningHoursSerializer,
DatePeriodSerializer,
IsOpenNowSerializer,
OrganizationSerializer,
ResourceDailyOpeningHoursSerializer,
ResourceSerializer,
RuleCreateSerializer,
RuleSerializer,
TimeSpanCreateSerializer,
TimeSpanSerializer,
)
from .signals import DeferUpdatingDenormalizedDatePeriodData
from .utils import get_resource_pk_filter
class OnCreateOrgMembershipCheck:
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if not request.user.is_superuser:
organization = serializer.validated_data.get("organization")
ancestry_organizations = set()
resource = None
if "resource" in serializer.validated_data.keys():
resource = serializer.validated_data.get("resource")
elif isinstance(
serializer, (RuleCreateSerializer, TimeSpanCreateSerializer)
):
time_span_group = serializer.validated_data.get("group")
resource = time_span_group.period.resource
if resource:
# We are creating object related to resource.
if not organization:
organization = resource.organization
if resource.ancestry_organization:
ancestry_organizations = set(resource.ancestry_organization)
else:
# We are creating a new resource.
if not organization:
organization = None
parents = serializer.validated_data.get("parents")
if parents:
resource = parents[0]
organization = resource.organization
for parent in parents:
ancestry_organizations.add(parent.organization.id)
if parent.ancestry_organization:
ancestry_organizations.update(parent.ancestry_organization)
if not organization and not ancestry_organizations:
raise ValidationError(
detail=_(
"Cannot create or edit resources that "
"are not part of an organization "
)
)
else:
users_organizations = request.user.get_all_organizations()
auth = request.auth
if (
isinstance(auth, HaukiSignedAuthData)
and auth.resource
and auth.resource == resource
):
# A special case for users signed in using the
# HaukiSignedAuthentication
pass
elif (
not ancestry_organizations
and organization not in users_organizations
) or (
ancestry_organizations
and set(ancestry_organizations).difference(
[uo.id for uo in users_organizations]
)
):
raise PermissionDenied(
detail=_(
"Cannot add data to organizations the user "
"is not a member of"
)
)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
serializer.data, status=status.HTTP_201_CREATED, headers=headers
)
class PermissionCheckAction:
@extend_schema(
summary="Check method permission for object",
request=inline_serializer("", {}),
responses=inline_serializer(
"permission_check", {"has_permission": BooleanField()}
),
)
@action(
detail=True,
methods=["get", "post", "put", "patch", "delete"],
permission_classes=[],
)
def permission_check(self, request, pk=None):
"""Runs check_object_permission for the object with the used method and returns
the result"""
obj = self.get_object()
# This action should be callable without any permissions, but the
# check_object_permissions call should use the original permissions
# from the viewset.
old_permission_classes = self.permission_classes
self.permission_classes = self.__class__.permission_classes
try:
self.check_object_permissions(request, obj)
has_permission = True
except APIException:
has_permission = False
self.permission_classes = old_permission_classes
return Response({"has_permission": has_permission})
class PageSizePageNumberPagination(PageNumberPagination):
page_size = 100
max_page_size = 1000
page_size_query_param = "page_size"
def get_start_and_end_from_params(request) -> Tuple[datetime.date, datetime.date]:
if not request.query_params.get("start_date") or not request.query_params.get(
"end_date"
):
raise ValidationError(
detail=_("start_date and end_date GET parameters are required")
)
try:
start_date = parse_maybe_relative_date_string(
request.query_params.get("start_date", "")
)
except ValueError:
raise ValidationError(detail=_("Invalid start_date"))
try:
end_date = parse_maybe_relative_date_string(
request.query_params.get("end_date", ""), end_date=True
)
except ValueError:
raise ValidationError(detail=_("Invalid end_date"))
if start_date > end_date:
raise ValidationError(detail=_("start_date must be before end_date"))
return start_date, end_date
class ResourceFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
resource_ids = request.query_params.get("resource_ids", None)
data_source = request.query_params.get("data_source", None)
origin_id_exists = request.query_params.get("origin_id_exists", None)
parent = request.query_params.get("parent", None)
child = request.query_params.get("child", None)
date_periods_hash = request.query_params.get("date_periods_hash", None)
filter_q = Q()
if resource_ids is not None:
resource_id_parts = resource_ids.split(",")
primary_keys = []
origin_id_filters = Q()
for part in resource_id_parts:
part = part.strip()
try:
primary_keys.append(int(part))
except ValueError:
if ":" in part:
origin_id_filters |= Q(**get_resource_pk_filter(part))
filter_q &= Q(pk__in=primary_keys) | origin_id_filters
if data_source is not None:
filter_q &= Q(origins__data_source=data_source) | Q(
ancestry_data_source__contains=[data_source]
)
if parent is not None:
filter_q &= Q(parents__id=parent)
if child is not None:
filter_q &= Q(children__id=child)
if origin_id_exists is not None and origin_id_exists:
origin_id_exists = origin_id_exists.lower() == "true"
if origin_id_exists or not data_source:
# Keep all resources that don't have any origin ids
# (when origin_id_exists=True)
# or Don't have origin id in any data source.
# (when data_source=None and origin_id_exists=False)
filter_q &= Q(origins__origin_id__isnull=not origin_id_exists)
else:
# Exclude resources that have origin id in the provided
# data source
return queryset.filter(filter_q).exclude(
Q(origins__data_source=data_source)
& Q(origins__origin_id__isnull=False)
)
if date_periods_hash is not None:
filter_q &= Q(date_periods_hash=date_periods_hash)
return queryset.filter(filter_q)
@extend_schema_view(
list=extend_schema(
summary="List Resources",
parameters=[
OpenApiParameter(
"resource_ids",
location=OpenApiParameter.QUERY,
description="Filter by one or more resource ids (comma separated list)."
" Supports [data_source_id]:[origin_id] style ids.",
style="form",
explode=False,
),
OpenApiParameter(
"data_source",
OpenApiTypes.UUID,
OpenApiParameter.QUERY,
description="Filter by data source",
),
OpenApiParameter(
"origin_id_exists",
OpenApiTypes.BOOL,
OpenApiParameter.QUERY,
description="Filter by existing/missing origin_id",
),
OpenApiParameter(
"parent",
OpenApiTypes.UUID,
OpenApiParameter.QUERY,
description="Filter by parent id",
),
OpenApiParameter(
"child",
OpenApiTypes.UUID,
OpenApiParameter.QUERY,
description="Filter by child id",
),
],
),
create=extend_schema(summary="Create a Resource"),
retrieve=extend_schema(summary="Find Resource by ID"),
update=extend_schema(summary="Update existing Resource"),
partial_update=extend_schema(summary="Update existing Resource partially"),
destroy=extend_schema(summary="Delete existing Resource"),
opening_hours=extend_schema(
summary="Get opening hours for Resource",
parameters=[
OpenApiParameter(
"start_date",
OpenApiTypes.DATE,
OpenApiParameter.QUERY,
description="First date to return hours for",
),
OpenApiParameter(
"end_date",
OpenApiTypes.DATE,
OpenApiParameter.QUERY,
description="Last date to return hours for",
),
],
responses=DailyOpeningHoursSerializer,
),
is_open_now=extend_schema(
summary="Is Resource open now?", responses=IsOpenNowSerializer
),
copy_date_periods=extend_schema(
summary="Copy all the periods from this resource to other resources",
request=OpenApiTypes.NONE,
parameters=[
OpenApiParameter(
"target_resources",
OpenApiTypes.STR,
OpenApiParameter.QUERY,
description="Comma separated list of target resource ids",
),
OpenApiParameter(
"replace",
OpenApiTypes.BOOL,
OpenApiParameter.QUERY,
description="Replace all the periods in the target resource",
default=False,
),
],
responses={
200: OpenApiResponse(
description="Copy succeeded",
),
400: OpenApiResponse(description="Bad request"),
403: OpenApiResponse(
description="No permission read source resource or no permission"
" to modify one or more of the target resources"
),
404: OpenApiResponse(description="One or more target resources not found"),
},
),
)
class ResourceViewSet(
OnCreateOrgMembershipCheck, PermissionCheckAction, viewsets.ModelViewSet
):
serializer_class = ResourceSerializer
permission_classes = [ReadOnlyPublic | IsMemberOrAdminOfOrganization]
pagination_class = PageSizePageNumberPagination
filter_backends = (DjangoFilterBackend, ResourceFilterBackend, OrderingFilter)
def get_queryset(self):
queryset = (
Resource.objects.prefetch_related(
"origins", "children", "parents", "origins__data_source"
)
.distinct()
.order_by("id")
)
# Filter the queryset according to read permissions
queryset = filter_queryset_by_permission(
self.request.user, queryset, auth=self.request.auth
)
return queryset
def get_object(self, check_permission=True):
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
pk = self.kwargs.get(lookup_url_kwarg, None)
if not pk:
raise Http404
obj = get_object_or_404(queryset, **get_resource_pk_filter(pk))
if check_permission:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
@action(detail=True)
def opening_hours(self, request, pk=None):
resource = self.get_object()
(start_date, end_date) = get_start_and_end_from_params(request)
opening_hours = resource.get_daily_opening_hours(start_date, end_date)
opening_hours_list = []
for the_date, time_elements in opening_hours.items():
opening_hours_list.append(
{
"date": the_date,
"times": time_elements,
}
)
opening_hours_list.sort(key=itemgetter("date"))
serializer = DailyOpeningHoursSerializer(opening_hours_list, many=True)
return Response(serializer.data)
@action(detail=True)
def is_open_now(self, request, pk=None):
resource = self.get_object()
open_states = State.open_states()
time_now = timezone.now()
tz = resource.timezone
if not tz:
tz = pytz.timezone(settings.RESOURCE_DEFAULT_TIMEZONE)
if not tz:
tz = pytz.timezone("Europe/Helsinki")
resource_time_now = time_now.astimezone(tz)
other_tz = None
if request.query_params.get("timezone"):
try:
other_tz = pytz.timezone(request.query_params.get("timezone"))
except pytz.exceptions.UnknownTimeZoneError:
raise APIException("Unknown timezone")
opening_hours = resource.get_daily_opening_hours(
resource_time_now.date(), resource_time_now.date()
).get(
datetime.date(
year=resource_time_now.year,
month=resource_time_now.month,
day=resource_time_now.day,
),
[],
)
matching_opening_hours = []
matching_opening_hours_other_tz = []
for opening_hour in opening_hours:
start_date = resource_time_now.date()
end_date = resource_time_now.date()
if opening_hour.end_time_on_next_day:
end_date = resource_time_now.date() + relativedelta(days=1)
start_time = (
opening_hour.start_time
if opening_hour.start_time
else datetime.time.min
)
end_time = (
opening_hour.end_time if opening_hour.end_time else datetime.time.max
)
start_datetime = tz.localize(
datetime.datetime(
year=start_date.year,
month=start_date.month,
day=start_date.day,
hour=start_time.hour,
minute=start_time.minute,
second=start_time.second,
)
)
end_datetime = tz.localize(
datetime.datetime(
year=end_date.year,
month=end_date.month,
day=end_date.day,
hour=end_time.hour,
minute=end_time.minute,
second=end_time.second,
)
)
if (
start_datetime <= resource_time_now <= end_datetime
and opening_hour.resource_state in open_states
):
matching_opening_hours.append(opening_hour)
if not other_tz:
continue
other_timezone_start_datetime = start_datetime.astimezone(other_tz)
other_timezone_end_datetime = end_datetime.astimezone(other_tz)
matching_opening_hours_other_tz.append(
TimeElement(
start_time=other_timezone_start_datetime.time(),
end_time=other_timezone_end_datetime.time(),
end_time_on_next_day=other_timezone_start_datetime.date()
!= other_timezone_end_datetime.date(),
resource_state=opening_hour.resource_state,
override=opening_hour.override,
full_day=opening_hour.full_day,
name=opening_hour.name,
description=opening_hour.description,
periods=opening_hour.periods,
)
)
other_timezone_time_now = resource_time_now.astimezone(other_tz)
data = {
"is_open": bool(matching_opening_hours),
"resource_timezone": tz,
"resource_time_now": resource_time_now,
"matching_opening_hours": matching_opening_hours,
"resource": resource,
}
if other_tz:
data = {
**data,
"other_timezone": other_tz,
"other_timezone_time_now": other_timezone_time_now,
"matching_opening_hours_in_other_tz": matching_opening_hours_other_tz,
}
serializer = IsOpenNowSerializer(data)
return Response(serializer.data)
@action(detail=True, methods=["post"])
def copy_date_periods(self, request, pk=None):
resource = self.get_object(check_permission=False)
# The user only needs read permission to the source resource
self.check_object_permissions(clone_request(self.request, "GET"), resource)
if not request.query_params.get("target_resources"):
raise ValidationError(detail=_("target_resources parameter is required"))
replace = False
if request.query_params.get("replace"):
replace_value = request.query_params.get("replace").lower().strip()
if replace_value in ["1", "true", "yes"]:
replace = True
target_resource_ids = [
resource_id.strip()
for resource_id in request.query_params.get("target_resources", "").split(
","
)
if resource_id.strip()
]
target_resources = []
no_permission_resource_ids = []
for target_resource_id in target_resource_ids:
try:
target_resource = Resource.objects.get(
**get_resource_pk_filter(target_resource_id)
)
except Resource.DoesNotExist:
detail = _('Resource | |
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_access_control_allow_methods(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors access control allow' \
' methods header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_allow_origin_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Allow-Origin': 'http://foobar.org'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors access control allow' \
' origin header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_expose_headers_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Expose-Headers': 'X-Foo-Header'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors expose headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_max_age_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Max-Age': '5'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors max age header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_request_headers_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Request-Headers': 'x-requested-with'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors request headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_request_method_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Access-Control-Request-Method': 'GET'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors request method header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_cors_origin_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Origin': 'http://foobar.org'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors origin header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_retrieval_with_file_compression(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Uncompressed test file data'
compressed_object_data = zlib.compress(object_data)
content_length = str(len(compressed_object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Content-Encoding': 'gzip'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=compressed_object_data)
method = 'object creation with cors content encoding header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_retrieval_with_browser_bypass(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'Content-Disposition': 'attachment; filename=testdata.txt'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation with cors content disposition header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_x_delete_at_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
start_time = calendar.timegm(time.gmtime())
future_time = str(int(start_time + 60))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'X-Delete-At': future_time}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation x delete at header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_creation_with_delete_after_header(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT,
'X-Delete-After': '60'}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
method = 'object creation x delete after header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
container_name,
object_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
@unittest.skipUnless(
get_value('versioned_containers') == 'true', NOT_CONFIGURED_MSG)
def test_versioned_container_creation_with_valid_data(self):
#Create a container for 'non-current' object storage
non_current_version_container_name = self.setup_container(
self.base_container_name)
#Create a container for 'current' object storage
current_version_container_headers = \
{'X-Versions-Location': non_current_version_container_name}
current_version_container_name = self.setup_container(
self.base_container_name,
headers=current_version_container_headers)
#Create an object (version 1)
object_name = self.base_object_name
object_data = 'Version 1'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
response = self.client.create_object(
current_version_container_name,
object_name,
headers=headers,
data=object_data)
method = 'object version one creation'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.list_objects(
non_current_version_container_name)
method = 'list on empty versioned container'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
#Update an object (version 2)
object_data = 'Version 2'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
response = self.client.create_object(
current_version_container_name,
object_name,
headers=headers,
data=object_data)
method = 'update version one object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.list_objects(
non_current_version_container_name)
method = 'list on versioned container'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_put_copy_object(self):
src_container_name = self.setup_container(self.base_container_name)
dest_container_name = self.setup_container(self.base_container_name)
src_obj_name = '{0}_source'.format(self.base_object_name)
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
src_container_name,
src_obj_name,
headers=headers,
data=object_data)
dest_obj_name = '{0}_destination'.format(self.base_object_name)
source = '/{0}/{1}'.format(src_container_name, src_obj_name)
hdrs = {'X-Copy-From': source, 'Content-Length': '0'}
response = self.client.copy_object(
dest_container_name,
dest_obj_name,
headers=hdrs)
method = 'put copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_obj_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_copy_object(self):
src_container_name = self.setup_container(self.base_container_name)
dest_container_name = self.setup_container(self.base_container_name)
src_obj_name = '{0}_source'.format(self.base_object_name)
object_data = 'Test file data'
content_length = str(len(object_data))
dest_obj_name = '{0}_destination'.format(self.base_object_name)
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
self.client.create_object(
src_container_name,
src_obj_name,
headers=headers,
data=object_data)
dest = '/{0}/{1}'.format(dest_container_name, dest_obj_name)
hdrs = {'Destination': dest}
response = self.client.copy_object(
src_container_name,
src_obj_name,
headers=hdrs)
method = 'copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_obj_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
def test_object_deletion_with_valid_object(self):
container_name = self.setup_container(self.base_container_name)
object_name = self.base_object_name
object_data = 'Test file data'
content_length = str(len(object_data))
headers = {'Content-Length': content_length,
'Content-Type': CONTENT_TYPE_TEXT}
response = self.client.create_object(
container_name,
object_name,
headers=headers,
data=object_data)
self.assertEqual(response.status_code, 201, 'should be created')
response = self.client.delete_object(
container_name,
object_name)
method = 'delete | |
obj_attr_list["cloud_vv_type"], \
availability_zone = None, \
imageRef = _imageid)
self.annotate_time_breakdown(obj_attr_list, "create_volume_time", _mark_a)
sleep(int(obj_attr_list["update_frequency"]))
obj_attr_list["cloud_vv_uuid"] = '{0}'.format(_instance.id)
_mark_a = time()
_wait_for_volume = 180
for i in range(1, _wait_for_volume) :
_vol_status = self.oskconnstorage[obj_attr_list["name"]].volumes.get(_instance.id).status
if _vol_status == "available" :
cbdebug("Volume " + obj_attr_list["cloud_vv_name"] + " took " + str(i) + " second(s) to become available",True)
break
elif _vol_status == "error" :
_fmsg = "Volume " + obj_attr_list["cloud_vv_name"] + " reported error after " + str(i) + " second(s)"
break
else :
sleep(1)
self.annotate_time_breakdown(obj_attr_list, "volume_available_time", _mark_a)
if str(obj_attr_list["boot_from_volume"]).lower() == "true" :
obj_attr_list["boot_volume_imageid1"] = None
obj_attr_list['cloud_vv'] = self.oskconnstorage[obj_attr_list["name"]].volumes.get(_instance.id).size
obj_attr_list["block_device_mapping"] = {'vda':'%s' % obj_attr_list["cloud_vv_uuid"]}
if _vol_status == "error" :
_status = 17262
else :
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VV", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vvdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if str(obj_attr_list["cloud_vv_uuid"]).lower() != "none" :
_instance = self.get_instances(obj_attr_list, "vv", obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VV", obj_attr_list, "destroying", 0, '')
if len(_instance.attachments) :
_server_id = _instance.attachments[0]["server_id"]
_attachment_id = _instance.attachments[0]["id"]
# There is weird bug on the python novaclient code. Don't change the
# following line, it is supposed to be "oskconncompute", even though
# is dealing with volumes. Will explain latter.
self.oskconncompute[obj_attr_list["name"]].volumes.delete_server_volume(_server_id, _attachment_id)
self.oskconnstorage[obj_attr_list["name"]].volumes.delete(_instance)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VV", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_instance = False
self.determine_instance_name(obj_attr_list)
self.determine_key_name(obj_attr_list)
obj_attr_list["last_known_state"] = "about to connect to " + self.get_description() + " manager"
self.take_action_if_requested("VM", obj_attr_list, "provision_originated")
# KEEP IT HERE TOO, NEEDS TO BE DUPLICATED, DO NOT REMOVE
self.determine_key_name(obj_attr_list)
if obj_attr_list["tenant"] != "default" :
if "ssh_key_injected" not in obj_attr_list :
self.check_ssh_key(obj_attr_list["vmc_name"], \
obj_attr_list["key_name"], \
obj_attr_list, True)
if "user" not in obj_attr_list :
obj_attr_list["user"] = obj_attr_list["tenant"]
obj_attr_list["admin_credentials"] = obj_attr_list["credentials"]
obj_attr_list["credentials"] = self.parse_authentication_data(obj_attr_list["credentials"], \
obj_attr_list["tenant"], \
obj_attr_list["user"], \
True)
if obj_attr_list["name"] in self.oskconncompute :
del self.oskconncompute[obj_attr_list["name"]]
_mark_a = time()
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{"use_cinderclient" : obj_attr_list["use_cinderclient"]}, \
False, \
False, \
obj_attr_list["name"])
self.annotate_time_breakdown(obj_attr_list, "authenticate_time", _mark_a)
_mark_a = time()
if self.is_vm_running(obj_attr_list) :
_msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
_msg += "\" is already running. It needs to be destroyed first."
_status = 187
cberr(_msg)
raise CldOpsException(_msg, _status)
self.annotate_time_breakdown(obj_attr_list, "check_existing_instance_time", _mark_a)
obj_attr_list["last_known_state"] = "about to get flavor and image list"
if str(obj_attr_list["security_groups"]).lower() == "false" :
_security_groups = None
else :
# "Security groups" must be a list
_security_groups = []
_security_groups.append(obj_attr_list["security_groups"])
self.vm_placement(obj_attr_list)
obj_attr_list["last_known_state"] = "about to send create request"
_mark_a = time()
self.get_flavors(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_flavor_time", _mark_a)
_mark_a = time()
self.get_images(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_imageid_time", _mark_a)
obj_attr_list["userdata"] = self.populate_cloudconfig(obj_attr_list)
if obj_attr_list["userdata"] :
obj_attr_list["config_drive"] = True
else :
obj_attr_list["config_drive"] = None
_mark_a = time()
_netnames, _netids = self.get_networks(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_netid_time", _mark_a)
_meta = {}
if "meta_tags" in obj_attr_list :
if obj_attr_list["meta_tags"] != "empty" and \
obj_attr_list["meta_tags"].count(':') and \
obj_attr_list["meta_tags"].count(',') :
_meta = str2dic(obj_attr_list["meta_tags"])
_fip = None
if str(obj_attr_list["use_floating_ip"]).lower() == "true" :
_msg = " Attempting to create a floating IP to " + obj_attr_list["name"] + "..."
cbdebug(_msg, True)
obj_attr_list["last_known_state"] = "about to create floating IP"
_fip = self.floating_ip_allocate(obj_attr_list)
_meta["experiment_id"] = obj_attr_list["experiment_id"]
if "cloud_floating_ip_uuid" in obj_attr_list :
_meta["cloud_floating_ip_uuid"] = obj_attr_list["cloud_floating_ip_uuid"]
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = \
_time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
self.vvcreate(obj_attr_list)
self.common_messages("VM", obj_attr_list, "creating", 0, '')
self.pre_vmcreate_process(obj_attr_list)
_mark_a = time()
_instance = self.oskconncompute[obj_attr_list["name"]].servers.create(name = obj_attr_list["cloud_vm_name"], \
block_device_mapping = obj_attr_list["block_device_mapping"], \
image = obj_attr_list["boot_volume_imageid1_instance"], \
flavor = obj_attr_list["flavor_instance"], \
security_groups = _security_groups, \
key_name = obj_attr_list["key_name"], \
scheduler_hints = None, \
availability_zone = obj_attr_list["availability_zone"], \
meta = _meta, \
config_drive = obj_attr_list["config_drive"], \
userdata = obj_attr_list["userdata"], \
nics = _netids, \
disk_config = "AUTO")
if _instance :
self.annotate_time_breakdown(obj_attr_list, "instance_creation_time", _mark_a)
sleep(int(obj_attr_list["update_frequency"]))
obj_attr_list["cloud_vm_uuid"] = '{0}'.format(_instance.id)
self.take_action_if_requested("VM", obj_attr_list, "provision_started")
while not self.floating_ip_attach(obj_attr_list, _instance) :
True
_time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs)
_mark_a = time()
self.annotate_time_breakdown(obj_attr_list, "instance_scheduling_time", _mark_a)
_mark_a = time()
self.annotate_time_breakdown(obj_attr_list, "port_creation_time", _mark_a)
if obj_attr_list["last_known_state"].count("ERROR") :
_fmsg = obj_attr_list["last_known_state"]
_status = 189
else :
if not len(obj_attr_list["block_device_mapping"]) and \
str(obj_attr_list["cloud_vv_uuid"]).lower() != "none" :
self.common_messages("VV", obj_attr_list, "attaching", _status, _fmsg)
# There is a weird bug on the python novaclient code. Don't change the
# following line, it is supposed to be "oskconncompute", even though
# is dealing with volumes. Will explain later.
_mark_a = time()
self.oskconncompute[obj_attr_list["name"]].volumes.create_server_volume(obj_attr_list["cloud_vm_uuid"], \
obj_attr_list["cloud_vv_uuid"], \
"/dev/vdd")
self.annotate_time_breakdown(obj_attr_list, "attach_volume_time", _mark_a)
if obj_attr_list["volume_creation_status"] :
_status = obj_attr_list["volume_creation_status"]
else :
_status = 0
if "admin_credentials" in obj_attr_list :
self.connect(obj_attr_list["access"], \
obj_attr_list["admin_credentials"], \
obj_attr_list["vmc_name"], \
{},
False, \
False, \
obj_attr_list["name"])
self.get_mac_address(obj_attr_list, _instance)
self.wait_for_instance_boot(obj_attr_list, _time_mark_prc)
self.get_host_and_instance_name(obj_attr_list)
if obj_attr_list["tenant"] != "default" :
del self.oskconncompute[obj_attr_list["name"]]
if "resource_limits" in obj_attr_list :
_status, _fmsg = self.set_cgroup(obj_attr_list)
else :
_status = 0
if str(obj_attr_list["force_failure"]).lower() == "true" :
_fmsg = "Forced failure (option FORCE_FAILURE set \"true\")"
_status = 916
else :
_fmsg = "Failed to obtain instance's (cloud assigned) uuid. The "
_fmsg += "instance creation failed for some unknown reason."
cberr(_fmsg)
_status = 100
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
if "mgt_003_provisioning_request_completed" in obj_attr_list :
self.annotate_time_breakdown(obj_attr_list, "instance_active_time", obj_attr_list["mgt_003_provisioning_request_completed"], False)
if "mgt_004_network_acessible" in obj_attr_list :
self.annotate_time_breakdown(obj_attr_list, "instance_reachable_time", obj_attr_list["mgt_004_network_acessible"], False)
if "flavor_instance" in obj_attr_list :
del obj_attr_list["flavor_instance"]
if "boot_volume_imageid1_instance" in obj_attr_list :
del obj_attr_list["boot_volume_imageid1_instance"]
if "availability_zone" in obj_attr_list :
obj_attr_list["availability_zone"] = str(obj_attr_list["availability_zone"])
if "block_device_mapping" in obj_attr_list :
obj_attr_list["block_device_mapping"] = str(obj_attr_list["block_device_mapping"])
if "cloud_vv_type" in obj_attr_list :
obj_attr_list["cloud_vv_type"] = str(obj_attr_list["cloud_vv_type"])
_status, _msg = self.common_messages("VM", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vmdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if int(obj_attr_list["instance_creation_status"]) :
_status, _fmsg = self.instance_cleanup_on_failure(obj_attr_list)
else :
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = \
_time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"],
{}, \
False, \
False, \
obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_max_tries = int(obj_attr_list["update_attempts"])
_curr_tries = 0
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VM", obj_attr_list, "destroying", 0, '')
self.floating_ip_delete(obj_attr_list)
self.retriable_instance_delete(obj_attr_list, _instance)
while _instance and _curr_tries < _max_tries :
_instance = self.get_instances(obj_attr_list, "vm", \
obj_attr_list["cloud_vm_name"])
if _instance :
if _instance.status != "ACTIVE" :
break
sleep(_wait)
_curr_tries += 1
else :
True
_status, _fmsg = self.vvdestroy(obj_attr_list)
_time_mark_drc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = \
_time_mark_drc - _time_mark_drs
self.take_action_if_requested("VM", obj_attr_list, "deprovision_finished")
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VM", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcapture(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
_time_mark_crs = int(time())
# Just in case the instance does not exist, make crc = crs
_time_mark_crc = _time_mark_crs
obj_attr_list["mgt_102_capture_request_sent"] | |
"VALUES (%s, %s);", (name, owner))
conn.commit()
cursor.close()
conn.close()
return flask.redirect(flask.url_for('album'))
# retrieve album name from id
def getAlbumName(id):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT name FROM albums WHERE id=%s;", id)
name = cursor.fetchone()[0]
cursor.close()
conn.close()
return name
# retrieve all photos in album
def getPhotosFromAlbum(album):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT caption, dir, id FROM photos WHERE album=%s;", album)
rows = cursor.fetchall()
photos = []
for row in rows:
caption = row[0]
dir = str('images' + row[1])
photoId = row[2]
userId = row[1].split("/")[1]
photoNo = row[1].split("/")[2].split(".")[0]
comments = getComments(photoId)
cursor.execute("SELECT tagId FROM phototags WHERE photoId=%s;", photoId)
rows = cursor.fetchall()
tags = []
for tagId in rows:
tags.append(getTag(tagId))
photo = (caption, dir, userId, photoNo, comments, photoId, tags)
photos.append(photo)
cursor.close()
conn.close()
return photos
# view page
@app.route('/view', methods=['GET', 'POST'])
@flask_login.login_required
def makeEdit():
if request.method == 'GET':
try:
id = request.args.get('album')
if id == '' or id is None:
return flask.redirect(flask.url_for('album'))
except:
return flask.redirect(flask.url_for('album'))
album = getAlbumName(id)
photos = getPhotosFromAlbum(id)
return render_template('view.html', album_name=album, album_id=id, photos=photos,
name=flask_login.current_user.id,
login=flask_login.current_user.is_authenticated)
try:
id = request.form.get('id')
name = request.form.get('name')
except:
return flask.redirect(flask.url_for('album'))
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("UPDATE albums SET name=%s WHERE id=%s;", (name, id))
conn.commit()
cursor.close()
conn.close()
return flask.redirect(flask.url_for('album'))
# delete album page
@app.route('/deletealbum', methods=['GET', 'POST'])
@flask_login.login_required
def deleteAlbum():
if request.method == 'GET':
return flask.redirect(flask.url_for('view'))
try:
id = request.form.get('id')
except:
return flask.redirect(flask.url_for('album'))
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("DELETE FROM albums WHERE id=%s;", id)
conn.commit()
cursor.close()
conn.close()
return flask.redirect(flask.url_for('album'))
# retrieve all photos in tag for that user
def getPhotosFromTag(tag, email):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
sql = "SELECT caption, dir FROM photos WHERE id IN " \
"(SELECT photoId FROM phototags WHERE tagId IN " \
"(SELECT id FROM tags WHERE word LIKE '%%%s%%')) " \
"AND album IN " \
"(SELECT id FROM albums WHERE owner=%s);" % (tag, getUserIdFromEmail(email))
cursor.execute(sql)
rows = cursor.fetchall()
photos = []
for row in rows:
userId = row[1].split("/")[1]
photoNo = row[1].split("/")[2].split(".")[0]
photos.append((row[0], str('images' + row[1]), userId, photoNo))
cursor.close()
conn.close()
return photos
# retrieve all photos in tag
def getAllPhotosFromTag(tag):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT caption, dir FROM photos WHERE id IN " +
"(SELECT photoId FROM phototags WHERE tagId IN "
"(SELECT id FROM tags WHERE word LIKE '%%%s%%'));" % tag)
rows = cursor.fetchall()
photos = []
for row in rows:
userId = row[1].split("/")[1]
photoNo = row[1].split("/")[2].split(".")[0]
photos.append((row[0], str('images' + row[1]), userId, photoNo))
cursor.close()
conn.close()
return photos
# tag page
@app.route('/tag', methods=['GET'])
def viewTag():
try:
tag = request.args.get('tag')
type = request.args.get('type')
if tag == '' or tag is None:
return flask.redirect(flask.url_for('album'))
except:
return flask.redirect(flask.url_for('album'))
if type == 'tagme':
if flask_login.current_user.is_authenticated:
photos = getPhotosFromTag(tag, flask_login.current_user.id)
return render_template('tag.html', tag_name=tag, photos=photos, viewall=False,
name=flask_login.current_user.id,
login=flask_login.current_user.is_authenticated)
else:
url = '/tag?type=tagall&tag=' + str(tag)
return flask.redirect(url)
else:
photos = getAllPhotosFromTag(tag)
if flask_login.current_user.is_authenticated:
return render_template('tag.html', tag_name=tag, photos=photos, viewall=True,
name=flask_login.current_user.id,
login=flask_login.current_user.is_authenticated)
else:
return render_template('tag.html', tag_name=tag, photos=photos, viewall=True,
login=flask_login.current_user.is_authenticated)
# retrieve all comments for a photo id
def getComments(photoId):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT text FROM comments WHERE photoId=%s;", photoId)
rows = cursor.fetchall()
comments = []
for row in rows:
comments.append(row[0])
cursor.close()
conn.close()
return comments
# retrieve photo from directory
def getPhoto(dir):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT caption, dir, id FROM photos WHERE dir LIKE '%%%s%%';" % str(dir))
row = cursor.fetchone()
caption = row[0]
dir = str('images' + row[1])
photoId = row[2]
userId = row[1].split("/")[1]
photoNo = row[1].split("/")[2].split(".")[0]
comments = getComments(photoId)
cursor.execute("SELECT tagId FROM phototags WHERE photoId=%s;", photoId)
rows = cursor.fetchall()
tags = []
for tagId in rows:
tags.append(getTag(tagId))
photo = (caption, dir, userId, photoNo, comments, photoId, tags)
cursor.close()
conn.close()
return photo
# get number of likes from photo id
def getLikeNumber(photoId):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT count(*) FROM likes WHERE photo=%s;", photoId)
count = cursor.fetchone()[0]
cursor.close()
conn.close()
return count
# check if user likes the photo
def checkLike(photoId, userId):
# use this to check if a email has already been registered
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
if cursor.execute("SELECT * FROM likes WHERE photo=%s AND user=%s;", (photoId, userId)):
cursor.close()
conn.close()
return True
else:
cursor.close()
conn.close()
return False
# view photo page
@app.route('/view/<int:userId>/<int:photoNo>', methods=['GET', 'POST'])
def editPhoto(userId, photoNo):
dir = '/' + str(userId) + '/' + str(photoNo)
photo = getPhoto(dir)
noOfLikes = getLikeNumber(photo[5])
if request.method == 'GET':
if flask_login.current_user.is_authenticated:
likes = checkLike(photo[5], getUserIdFromEmail(flask_login.current_user.id))
return render_template('photo.html', photo=photo, likes=likes, noOfLikes=noOfLikes,
name=flask_login.current_user.id,
login=flask_login.current_user.is_authenticated)
else:
return render_template('photo.html', photo=photo, noOfLikes=noOfLikes,
login=flask_login.current_user.is_authenticated)
try:
comment = request.form.get('comment')
photoId = request.form.get('photoId')
except:
return flask.redirect(request.path)
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
if flask_login.current_user.is_authenticated:
id = getUserIdFromEmail(flask_login.current_user.id)
else:
id = 0
cursor.execute("INSERT INTO comments " +
"(user, photoId, text) " +
"VALUES (%s, %s, %s);", (id, photoId, comment))
conn.commit()
cursor.close()
conn.close()
return flask.redirect(request.path)
# retrieve tag word from tag id
def getTag(tagId):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT word FROM tags WHERE id=%s;", tagId)
word = cursor.fetchone()[0]
conn.commit()
cursor.close()
return word
# find top tags
def findTopTags():
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT photoId, tagId, count(*) AS ct FROM phototags " +
"GROUP BY tagId ORDER BY ct DESC;")
rows = cursor.fetchall()
cursor.close()
conn.close()
tags = []
for photoId, tagId, ct in rows:
tags.append(getTag(tagId))
tags = tags[:10]
return tags
# retrieve email from user id
def getEmailFromUserId(id):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT email FROM users WHERE id=%s;", id)
email = cursor.fetchone()[0]
cursor.close()
conn.close()
return email
# retrieve all friends of user
def findFriends(email):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
id = getUserIdFromEmail(email)
cursor.execute("SELECT friendId AS frid, count(friendId) AS ct FROM friends " +
"WHERE friendId NOT IN (SELECT friendId FROM friends WHERE userId=%s) " +
"AND userId IN (SELECT friendId FROM friends WHERE userId=%s) " +
"AND NOT friendId=%s " +
"GROUP BY friendId " +
"ORDER BY ct DESC;", (id, id, id))
rows = cursor.fetchall()
cursor.close()
conn.close()
friends = []
for friendId, ct in rows:
friends.append(getEmailFromUserId(friendId))
return friends
# retrieve photo from id
def getPhotoFromId(photoId):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT caption, dir, id FROM photos WHERE id=%s;", photoId)
row = cursor.fetchone()
caption = row[0]
dir = str('images' + row[1])
userId = row[1].split("/")[1]
photoNo = row[1].split("/")[2].split(".")[0]
comments = getComments(photoId)
cursor.execute("SELECT tagId FROM phototags WHERE photoId=%s;", photoId)
rows = cursor.fetchall()
tags = []
for tagId in rows:
tags.append(getTag(tagId))
photo = (caption, dir, userId, photoNo, comments, photoId, tags)
cursor.close()
conn.close()
return photo
# 'you may also like' feature
def getRecommendedPhotos(email):
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
cursor.execute("SELECT photoid, count(photoId) AS ctr FROM phototags " +
"JOIN " +
"(SELECT tagId FROM phototags WHERE photoId IN " +
"(SELECT id FROM photos WHERE album IN " +
"(SELECT id FROM albums WHERE owner=%s)) "
"GROUP BY tagId ORDER BY count(tagId) DESC limit 5) t1 " +
"ON " +
"phototags.tagId = t1.tagId " +
"GROUP BY photoId ORDER BY ctr DESC;", getUserIdFromEmail(email))
rows = cursor.fetchall()
photoIdList = []
skipnext = False
for i in range(len(rows)):
if not skipnext:
if i < (len(rows) - 1):
if rows[i][1] == rows[i + 1][1]:
cursor.execute("SELECT photoId, count(photoId) FROM phototags WHERE " +
"photoId=%s or photoId=%s " +
"GROUP BY photoId " +
"ORDER BY count(photoId) ASC;", (rows[i][0], rows[i + 1][0]))
intermediateRows = cursor.fetchall()
if intermediateRows[0][1] == intermediateRows[1][1]:
photoIdList.append(intermediateRows[0][0])
photoIdList.append(intermediateRows[1][0])
else:
photoIdList.append(intermediateRows[0][0])
skipnext = True
else:
photoIdList.append(rows[i][0])
else:
photoIdList.append(rows[i][0])
else:
skipnext = False
cursor.close()
conn.close()
photos = []
for id in photoIdList:
photos.append(getPhotoFromId(id))
return photos
# explore page, shows popular tags, recommended friends and
# 'you may also like' photos
@app.route('/explore', methods=['GET'])
def explore():
tags = findTopTags()
users = findFriends(flask_login.current_user.id)
photos = getRecommendedPhotos(flask_login.current_user.id)
return render_template('explore.html', tags=tags, users=users, photos=photos,
name=flask_login.current_user.id,
login=flask_login.current_user.is_authenticated)
def getFriendResult(userEmail, friendEmail):
uesrId = getUserIdFromEmail(userEmail)
friendId = getUserIdFromEmail(friendEmail)
resultIds = []
db = getMysqlConnection()
conn = db['conn']
cursor = db['cursor']
if cursor.execute("SELECT friendId FROM friends WHERE " +
"userId=%s AND friendId=%s;", (uesrId, friendId)):
return True
elif cursor.execute("SELECT friendId FROM friends WHERE " +
"friendId=%s AND userId=%s;", (uesrId, friendId)):
return True
else:
return False
def getActivityFromId(id):
db | |
is not None:
for k, v in metrics.items():
self.tb_writer.add_scalar('metrics/{}'.format(k), v, step)
if heavy_logging:
max_B = 16
self.tb_writer.add_video('obs/input',
self.unnormalize(batch['obs_image']).transpose(0, 1)[:max_B], step)
if self.observation_model.decoder is not None:
self.tb_writer.add_video('obs/recon',
self.unnormalize(outputs['obs_recon']).transpose(0, 1)[:max_B], step)
self.tb_writer.add_video('obs/recon_post',
self.unnormalize(outputs['obs_recon_post']).transpose(0, 1)[:max_B], step)
self.tb_writer.add_video('obs/recon_prior',
self.unnormalize(outputs['obs_recon_prior']).transpose(0, 1)[:max_B], step)
return metrics, outputs
def log_video(self, video_tag, frames, step):
"""
Log a video to disk.
Args:
frames : List of (B, T, C, H, W)
step: training step.
video_tag: tag used for logging into tensorboard and as dir name for disk.
"""
self.tb_writer.add_video(video_tag, frames, step)
B, T, C, H, W = list(frames.shape)
frames = frames.permute(1, 2, 3, 0, 4).contiguous().view(T, C, H, B*W) # Stack batch along width.
video_dir = os.path.join(self.log_dir, video_tag)
os.makedirs(video_dir, exist_ok=True)
filename = os.path.join(video_dir, 'video_%08d.mp4' % step)
write_video_mp4(filename, frames)
def validate(self, step):
self.observation_model.eval()
if self.model is not None:
self.model.eval()
self.actor.eval()
self.critic.eval()
tic = time.time()
metrics = {}
# Collect data. One episode in each val environment.
replay_buffer = SequenceReplayBuffer()
num_episodes_per_val_env_for_reward = self.config.get('num_episodes_per_val_env_for_reward', 10)
sample_policy = self.config.get('val_stochastic_policy', False)
if sample_policy:
print('Using stochastic policy for val')
episode_reward = self.collect_data_from_actor(replay_buffer,
num_episodes_per_env=num_episodes_per_val_env_for_reward,
train=False, sample_policy=sample_policy)
metrics['episode_reward'] = episode_reward
# Take the first few episodes for computing the rest of the metrics. They are expensive to compute.
num_episodes_for_model = self.config.get('num_episodes_val_for_model', 5)
batch = replay_buffer.sample(num_episodes_for_model)
batch = self.prep_batch(batch, random_crop=False)
steps_per_episode = self.config['episode_steps'] // self.action_repeat
if not self.exclude_wm_loss:
with torch.no_grad():
loss, model_metrics, outputs = self.compute_loss(batch, train=False, decoding_for_viz=True)
metrics.update(model_metrics)
# Generate rollout from prior.
if self.observation_model.decoder is not None:
init_t = self.config['rollout_prior_init_t']
assert 0 < init_t < steps_per_episode - 1
init_state = dict([(k, v[init_t-1]) for k, v in outputs['posterior'].items()])
prior = self.model.dynamics.rollout_prior(init_state, batch['action'][init_t:, ...], deterministic=False)
# Decode to images.
latent = self.model.dynamics.get_state(prior, deterministic=False)
obs_recon_imagined = self.observation_model.decode(self.model.decoder(latent))
# Add the first init_t images from the posterior.
obs_recon_imagined = torch.cat([outputs['obs_recon_prior'][:init_t, :], obs_recon_imagined], dim=0)
elif self.observation_model.use_gating_network: # Even if model is None, we want outputs to have gating.
with torch.no_grad():
outputs = self.observation_model(batch) # (T, B, dims) # Used to visualize gating.
toc = time.time()
metrics.update({
'timing': toc - tic,
})
loss_str = ' '.join(['{}: {:.2f}'.format(k, v) for k, v in sorted(metrics.items())])
print('Val Iter {} {}'.format(step, loss_str))
if not self.debug and self.tb_writer is not None:
for k, v in metrics.items():
self.tb_writer.add_scalar('val_metrics/{}'.format(k), v, step)
obs = self.unnormalize(batch['obs_image']).transpose(0, 1) # (B, T, C, H, W)
if self.observation_model.use_gating_network:
obs_gating = outputs['obs_gating'].transpose(0, 1) # (B, T, F, 1, H, W)
obs_gating = obs_gating[:, :, -1, :, :, :] # The gating for the last frame.
obs_gating = (obs_gating * 255).to(torch.uint8)
obs_gating = obs_gating.expand_as(obs).contiguous() # replicate along RGB.
obs = torch.cat([obs, obs_gating], dim=3)
if self.model is not None and self.observation_model.decoder is not None:
obs_recon = self.unnormalize(outputs['obs_recon']).transpose(0, 1)
obs_recon_post = self.unnormalize(outputs['obs_recon_post']).transpose(0, 1)
obs_recon_prior = self.unnormalize(outputs['obs_recon_prior']).transpose(0, 1)
obs_recon_imagined = self.unnormalize(obs_recon_imagined).transpose(0, 1)
obs = torch.cat([obs, obs_recon, obs_recon_post, obs_recon_prior, obs_recon_imagined], dim=3)
self.log_video('obs/val', obs, step)
return -episode_reward
def collect_data_random_policy(self, replay_buffer, num_episodes_per_env=1, train=True):
steps_per_episode = self.config['episode_steps'] // self.action_repeat
env_containers = self.train_env_containers if train else self.val_env_containers
total_reward = 0
for env_container in env_containers:
action_low, action_high = env_container.get_action_limits()
action_dims = env_container.get_action_dims()
for _ in range(num_episodes_per_env):
obs = env_container.reset()
seq = []
for _ in range(steps_per_episode):
action = np.random.uniform(action_low, action_high, action_dims)
next_obs, reward, _, _ = env_container.step(action)
seq.append(dict(obs=obs, action=action, reward=reward))
obs = next_obs
total_reward += reward
replay_buffer.add(seq)
avg_reward = total_reward / (num_episodes_per_env * len(env_containers))
return avg_reward
def prep_batch(self, batch, random_crop=False):
""" Prepare batch of data for input to the model.
Inputs:
batch : Dict containing 'obs', etc.
Returns:
batch: Same dict, but with images randomly cropped, moved to GPU, normalized.
"""
for key in batch.keys():
batch[key] = batch[key].to(self.device)
obs_image_cropped = crop_image_tensor(batch['obs_image'], self.crop_height, self.crop_width,
random_crop=random_crop,
same_crop_across_time=self.same_crop_across_time,
padding=self.random_crop_padding)
if self.has_momentum_encoder:
batch['obs_image_2'] = crop_image_tensor(batch['obs_image'], self.crop_height, self.crop_width,
random_crop=random_crop,
same_crop_across_time=self.same_crop_across_time,
padding=self.random_crop_padding)
if 'obs_image_clean' in batch: # When we have paired distraction-free and distracting obs.
batch['obs_image_clean'] = crop_image_tensor(batch['obs_image_clean'], self.crop_height, self.crop_width, random_crop=False, same_crop_across_time=True, padding=0)
else:
batch['obs_image_clean'] = crop_image_tensor(batch['obs_image'], self.crop_height, self.crop_width, random_crop=False, same_crop_across_time=True, padding=0)
batch['obs_image'] = obs_image_cropped
if len(batch['obs_image'].shape) == 5: # (B, T, C, H, W) -> (T, B, C, H, W)
swap_first_two_dims = True
else: # (B, C, H, W) -> no change.
swap_first_two_dims = False
for key in batch.keys():
if swap_first_two_dims:
batch[key] = batch[key].transpose(0, 1)
batch[key] = batch[key].contiguous().float().detach()
batch['obs_image'] = self.normalize(batch['obs_image'])
if 'obs_image_clean' in batch:
batch['obs_image_clean'] = self.normalize(batch['obs_image_clean'])
if 'obs_imaage_2' in batch:
batch['obs_image_2'] = self.normalize(batch['obs_image_2'])
return batch
def collect_data_from_actor(self, replay_buffer, num_episodes_per_env=1, train=True, sample_policy=True):
steps_per_episode = self.config['episode_steps'] // self.action_repeat
self.observation_model.eval()
if self.model is not None:
self.model.eval()
self.actor.eval()
reward_total = 0
env_containers = self.train_env_containers if train else self.val_env_containers
num_env = len(env_containers)
for _ in range(num_episodes_per_env):
seq_list = []
obs_list = []
for env_container in env_containers:
obs = env_container.reset()
seq_list.append(list())
obs_list.append(dict(obs=obs))
posterior = None
action = None
for _ in range(steps_per_episode):
# Find the action to take for a batch of environments.
batch = torchify(obs_list) # Dict of (B, ...)
batch = self.prep_batch(batch, random_crop=False)
outputs = self.observation_model(batch)
obs_features = outputs['obs_features']
if self.model is not None: # If using a dynamics model.
latent, posterior = self.model.forward_one_step(obs_features, posterior, action,
deterministic_latent=self.sac_deterministic_state)
else:
latent = obs_features
action, _, _ = self.actor(latent, sample=sample_policy)
action_npy = action.detach().cpu().numpy() # (B, a_dims)
# Step each environment with the computed action.
for i, env_container in enumerate(env_containers):
current_action = action_npy[i]
obs, reward, _, _ = env_container.step(current_action)
seq_list[i].append(dict(obs=obs_list[i]['obs'], action=current_action, reward=reward))
obs_list[i]['obs'] = obs
reward_total += reward
for seq in seq_list:
replay_buffer.add(seq)
episode_reward = reward_total / (num_env * num_episodes_per_env)
return episode_reward
def update_target(self, target, critic, tau):
target_params_dict = dict(target.named_parameters())
for n, p in critic.named_parameters():
target_params_dict[n].data.copy_(
(1 - tau) * target_params_dict[n] + tau * p
)
def update_actor_critic_sac(self, batch, step, heavy_logging=False):
"""
Inputs:
batch : Dict containing keys ('action', 'obs', 'reward')
'action' : (T, B, action_dims)
'obs' : (T, B, C, H, W)
'reward': (T, B)
"""
metrics = {}
outputs = self.observation_model(batch) # (T, B, dims)
obs_features = outputs['obs_features']
batch['obs_features'] = obs_features
if self.model is not None:
outputs = self.model(batch) # Dict containing prior (stoch, logits), posterior(..)
states = self.model.dynamics.get_state(outputs['posterior'],
deterministic=self.sac_deterministic_state)
else:
states = obs_features
# Update critic (potentially including the encoder).
current_states = states[:-1]
if self.sac_detach_states:
current_states = current_states.detach()
current_actions = batch['action'][:-1]
reward = batch['reward'][:-1] # (T-1, B)
next_states = states[1:].detach()
alpha = torch.exp(self.log_alpha).detach()
gamma = self.config['gamma']
with torch.no_grad():
if torch.isnan(next_states).any():
raise Exception('Next states contains nan')
next_actions, next_action_log_probs, _ = self.actor(next_states)
target_q1, target_q2 = self.target_critic(next_states, next_actions)
target_v = torch.min(target_q1, target_q2) - alpha * next_action_log_probs
target_q = reward + gamma * target_v
q1, q2 = self.critic(current_states, current_actions) # (T-1, B)
critic_loss = F.mse_loss(q1, target_q) + F.mse_loss(q2, target_q)
self.optimizer_critic.zero_grad()
critic_loss.backward()
if 'max_grad_norm_critic' in self.config:
grad_norm = torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.config['max_grad_norm_critic'])
metrics['grad_norm_critic'] = grad_norm.item()
self.optimizer_critic.step()
# Update actor.
current_states_detached = current_states.detach() # Actor loss does not backpropagate into encoder or dynamics.
policy_actions, policy_action_log_probs, policy_action_std = self.actor(current_states_detached) # (T-1, B, action_dims)
q1, q2 = self.critic(current_states_detached, policy_actions)
q = torch.min(q1, q2)
q_loss = -q.mean()
entropy_loss = policy_action_log_probs.mean()
entropy_loss_wt = torch.exp(self.log_alpha).detach()
actor_loss = q_loss + entropy_loss_wt * entropy_loss
self.optimizer_actor.zero_grad()
actor_loss.backward()
if 'max_grad_norm_actor' in self.config:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.config['max_grad_norm_actor'])
metrics['grad_norm_actor'] = grad_norm.item()
self.optimizer_actor.step()
# Update alpha (adaptive entropy loss wt)
alpha_loss = -(torch.exp(self.log_alpha) * (self.target_entropy + entropy_loss.detach()))
self.optimizer_alpha.zero_grad()
alpha_loss.backward()
if 'max_grad_norm_log_alpha' in self.config:
grad_norm = torch.nn.utils.clip_grad_norm_([self.log_alpha], self.config['max_grad_norm_log_alpha'])
metrics['grad_norm_log_alpha'] = grad_norm.item()
self.optimizer_alpha.step()
if 'max_log_alpha' in self.config:
with torch.no_grad():
self.log_alpha.clamp_(max=self.config['max_log_alpha'])
if step % self.config['update_target_critic_after'] == 0:
tau = self.config.get('update_target_critic_tau', 1)
self.update_target(self.target_critic, self.critic, tau)
metrics.update({
'critic_loss': critic_loss.item(),
'actor_loss': actor_loss.item(),
'q_loss': q_loss.item(),
'entropy_loss': entropy_loss.item(),
'log_alpha': self.log_alpha.item(),
})
if not self.debug and self.tb_writer is not None:
for k, v in metrics.items():
self.tb_writer.add_scalar('rl_metrics/{}'.format(k), v, step)
if heavy_logging:
self.tb_writer.add_histogram('rl_metrics/reward', reward.view(-1), step)
self.tb_writer.add_histogram('rl_metrics/q_targets', target_q.view(-1), step)
self.tb_writer.add_histogram('rl_metrics/critic_scores', q.view(-1), step)
self.tb_writer.add_histogram('rl_metrics/action', policy_actions.view(-1), step)
self.tb_writer.add_histogram('rl_metrics/action_log_probs', policy_action_log_probs.view(-1), step)
self.tb_writer.add_histogram('rl_metrics/action_std', policy_action_std.view(-1), step)
return metrics
def train(self):
""" Train the model."""
# Setup replay buffer.
steps_per_episode = self.config['episode_steps'] // self.action_repeat
replay_buffer_size = self.config['replay_buffer_size']
num_episodes_in_replay_buffer = replay_buffer_size // steps_per_episode
replay_buffer = SequenceReplayBuffer(size=num_episodes_in_replay_buffer)
# Find out how many data collection iterations to do use.
max_steps = self.config['max_steps'] // self.action_repeat
num_iters = max_steps // (self.num_envs * steps_per_episode)
# How many gradients updates per iteration.
num_updates_per_iter = int(steps_per_episode * self.config.get('update_frequency_factor', 1.0))
random_crop = self.config.get('random_crop', False)
B = self.config['batch_size']
T = self.config['dynamics_seq_len']
train_step = 0
# Initial data collection.
initial_episodes_per_env = self.config['initial_data_steps'] // | |
<reponame>danmichaeljones/blendz
try:
from mpi4py import MPI
MPI_RANK = MPI.COMM_WORLD.Get_rank()
except:
MPI_RANK = 0
from builtins import *
import sys
import os
import warnings
from math import ceil
from multiprocessing import cpu_count
import itertools as itr
import numpy as np
from scipy.integrate import simps
import nestle
import emcee
from tqdm import tqdm
import dill
import blendz
from blendz import Configuration
from blendz.fluxes import Responses
from blendz.photometry import Photometry, SimulatedPhotometry
from blendz.utilities import incrementCount, Silence
try:
import pymultinest
PYMULTINEST_AVAILABLE = True
except ImportError:
PYMULTINEST_AVAILABLE = False
warnings.warn('PyMultinest not installed, so falling back to (slower) python implementation.'
+ ' See http://johannesbuchner.github.com/PyMultiNest/install.html for installation help.')
except (SystemExit, OSError):
PYMULTINEST_AVAILABLE = False
warnings.warn('PyMultinest failed to load, so falling back to (slower) python implementation.'
+ ' See http://johannesbuchner.github.com/PyMultiNest/install.html for installation help.')
class Photoz(object):
def __init__(self, model=None, photometry=None, config=None,\
load_state_path=None, **kwargs):
if load_state_path is not None:
self.loadState(load_state_path)
else:
#Warn user is config and either/or responses/photometry given that config ignored
if ((model is not None and config is not None) or
(photometry is not None and config is not None)):
warnings.warn('A configuration object was provided to Photoz object '
+ 'as well as a Model/Photometry object, though these '
+ 'should be mutually exclusive. The configuration '
+ 'provided will be ignored.')
#Responses and photometry given, merge their configs
if (model is not None) and (photometry is not None):
self.config = Configuration(**kwargs)
self.config.mergeFromOther(model.config)
self.config.mergeFromOther(photometry.config)
self.model = model
self.responses = self.model.responses
self.photometry = photometry
#Only responses given, use its config to load photometry
elif (model is not None) and (photometry is None):
self.config = Configuration(**kwargs)
self.config.mergeFromOther(model.config)
self.model = model
self.responses = self.model.responses
self.photometry = Photometry(config=self.config)
#Only photometry given, use its config to load responses
elif (model is None) and (photometry is not None):
self.config = Configuration(**kwargs)
self.config.mergeFromOther(photometry.config)
self.photometry = photometry
self.model = blendz.model.BPZ(config=self.config)
self.responses = self.model.responses
#Neither given, load both from provided (or default, if None) config
else:
self.config = Configuration(**kwargs)
if config is not None:
self.config.mergeFromOther(config)
self.photometry = Photometry(config=self.config)
self.model = blendz.model.BPZ(config=self.config)
self.responses = self.model.responses
# Get max_ref_mag_hi from photometry, which already deals with sigma vs fixed
self.max_ref_mag_hi = np.max([g.ref_mag_hi for g in self.photometry])
self.model.max_ref_mag_hi = self.max_ref_mag_hi
self.num_templates = self.responses.templates.num_templates
self.num_measurements = self.responses.filters.num_filters
self.num_galaxies = self.photometry.num_galaxies
self.tmp_ind_to_type_ind = self.responses.templates.tmp_ind_to_type_ind
self.possible_types = self.responses.templates.possible_types
self.num_types = len(self.possible_types)
#TODO: Check this
#This was defaulted to assume a single component, present in all measurements
#not sure why it needs to be set up here though, so comment out for now
#self.model._setMeasurementComponentMapping(None, 1)
#Set up empty dictionaries to put results into
self._samples = {}
self._logevd = {}
self._logevd_error = {}
for g in range(self.num_galaxies):
#Each value is a dictionary which will be filled by sample function
#The keys of this inner dictionary will be the number of blends for run
self._samples[g] = {}
self._logevd[g] = {}
self._logevd_error[g] = {}
def saveState(self, filepath):
"""Save this entire Photoz instance to file.
This saves the exact state of the current object, including all data and any
reults from sampling.
Args:
filepath (str): Path to file to save to.
"""
if isinstance(self.photometry, SimulatedPhotometry):
try:
current_seed = self.photometry.sim_seed.next()
self.photometry.sim_seed = current_seed
except:
warnings.warn('SimulatedPhotometry seed not saved.')
with open(filepath, 'wb') as f:
state = {key: val for key, val in self.__dict__.items() if key not in ['pbar', 'breakSilence']}
dill.dump(state, f)
#Put the random seed back how it was after the saving is done
if isinstance(self.photometry, SimulatedPhotometry):
try:
self.photometry.sim_seed = incrementCount(current_seed)
except:
pass
def loadState(self, filepath):
with open(filepath, 'rb') as f:
self.__dict__.update(dill.load(f))
#If the photometry is simulated, replace the seed currently saved as
#a number with the generator it was before saving
if isinstance(self.photometry, SimulatedPhotometry):
try:
current_seed = self.photometry.sim_seed
self.photometry.sim_seed = incrementCount(current_seed)
except:
warnings.warn('SimulatedPhotometry seed not loaded.')
def _lnLikelihood_flux(self, model_flux):
chi_sq = -1. * np.sum((self.photometry.current_galaxy.flux_data_noRef - model_flux)**2 / self.photometry.current_galaxy.flux_sigma_noRef**2)
return chi_sq
def _lnLikelihood_mag(self, total_ref_flux):
#Depending on the measurement-component mapping, the galaxy ref_flux/sigma
#can be an array of length either 1 or num_components.
#total_ref_flux should be of the same length
#TODO: No calls should be made where this isn't the case, so remove the assert
tmp_a = len(self.photometry.current_galaxy.ref_flux_data)
try:
tmp_b = len(total_ref_flux)
except TypeError:
total_ref_flux = np.array([total_ref_flux])
tmp_b = len(total_ref_flux)
assert(tmp_a == tmp_b)
chi_sq = -1. * np.sum((self.photometry.current_galaxy.ref_flux_data - total_ref_flux)**2 / self.photometry.current_galaxy.ref_flux_sigma**2)
return chi_sq
def _lnPosterior(self, params):
num_components = int(len(params) // 2)
redshifts = params[:num_components]
magnitudes = params[num_components:]
if not self.model._obeyPriorConditions(redshifts, magnitudes, self.photometry.current_galaxy.ref_mag_hi):
return -np.inf
else:
#Precalculate all quantities we'll need in the template loop
#Single interp call -> Shape = (N_template, N_band, N_component)
model_fluxes = self.responses.interp(redshifts)
priors = np.zeros((num_components, self.num_types))
for nb in range(num_components):
priors[nb, :] = self.model.lnPrior(redshifts[nb], magnitudes[nb])
redshift_correlation = np.log(1. + self.model.correlationFunction(redshifts))
#Get total flux in reference band = transform to flux & sum
# total_ref_flux should be either len 1 or len==num_components
# This should match the number of reference bands, so check
# If 1, add components together (blend). If not, treat seperately
if len(self.config.ref_band)==1:
total_ref_flux = np.sum(10.**(-0.4 * magnitudes))
else:
total_ref_flux = 10.**(-0.4 * magnitudes) #Array with len==len(magnitudes)
#Check whether the selection band is ref or not
#Only need this if it is
if np.all(self.config.ref_band == self.config.select_band):
selection_effect_ref = self.model.lnSelection(total_ref_flux,
self.photometry.current_galaxy)
#Loop over all templates - discrete marginalisation
#All log probabilities so (multiply -> add) and (add -> logaddexp)
lnProb = -np.inf
#At each iteration template_combo is a tuple of (T_1, T_2... T_num_components)
for template_combo in itr.product(*itr.repeat(range(self.num_templates), num_components)):
#One redshift/template/magnitude prior and model flux for each blend component
tmp = 0.
blend_flux = np.zeros(self.num_measurements)
component_scaling_norm = 0.
for nb in range(num_components):
T = template_combo[nb]
#TODO: Check this!
if len(self.config.ref_band)==1:
#Only one reference band but it's still an array, so get element
component_scaling = 10.**(-0.4*magnitudes[nb]) / model_fluxes[T, self.config.ref_band[0], nb]
else:
component_scaling = 10.**(-0.4*magnitudes[nb]) / model_fluxes[T, self.config.ref_band[nb], nb]
blend_flux += model_fluxes[T, :, nb] * component_scaling * self.model.mc_map_matrix[nb, :]
type_ind = self.tmp_ind_to_type_ind[T]
tmp += priors[nb, type_ind]
#Check whether the selection band is ref or not
#If not, we need to use the template fluxes
if np.all(self.config.ref_band == self.config.select_band):
tmp += selection_effect_ref
else:
select_flux = blend_flux[self.config.select_band]
tmp += self.model.lnSelection(select_flux,
self.photometry.current_galaxy)
#Remove ref_band from blend_fluxes, as that goes into the ref-mag
#likelihood, not the flux likelihood
blend_flux = blend_flux[self.config.non_ref_bands]
#Other terms only appear once per summation-step
tmp += redshift_correlation
tmp += self._lnLikelihood_flux(blend_flux)
tmp += self._lnLikelihood_mag(total_ref_flux)
#logaddexp contribution from this template to marginalise
lnProb = np.logaddexp(lnProb, tmp)
return lnProb - self.prior_norm
def _priorTransform(self, params):
'''
Transform params from [0, 1] uniform random to [min, max] uniform random,
where the min redshift is zero, max redshift is set in the configuration,
and the min/max magnitudes (numerically, not brightness) are set by configuration.
'''
num_components = int(len(params) // 2)
trans = np.ones(len(params))
trans[:num_components] = self.config.z_hi - self.config.z_lo
trans[num_components:] = self.photometry.current_galaxy.ref_mag_hi - self.config.ref_mag_lo
shift = np.zeros(len(params))
shift[:num_components] = self.config.z_lo
shift[num_components:] = self.config.ref_mag_lo
return (params * trans) + shift
def _priorTransform_multinest(self, cube, ndim, nparams):
'''
Transform params from [0, 1] uniform random to [0, max] uniform random,
where the max redshift is set in the configuration, and the max fraction
is 1.
'''
num_components = ndim // 2
trans = np.zeros(ndim)
trans[:num_components] = self.config.z_hi
trans[num_components:] = self.photometry.current_galaxy.ref_mag_hi - self.config.ref_mag_lo
shift = np.zeros(ndim)
shift[:num_components] = self.config.z_lo
shift[num_components:] = self.config.ref_mag_lo
for i in range(ndim):
cube[i] = (cube[i] * trans[i]) + shift[i]
def _lnPosterior_multinest(self, cube, ndim, nparams):
self.num_posterior_evals += 1
params = np.array([cube[i] for i in range(ndim)])
with self.breakSilence():
if (self.num_posterior_evals%self.num_between_print==0) and MPI_RANK==0:
self.pbar.set_description('[Cmp: {}/{}, Itr: {}] '.format(self.blend_count,
self.num_components_sampling,
self.num_posterior_evals))
self.pbar.refresh()
return self._lnPosterior(params)
def _sampleProgressUpdate(self, info):
if (info['it']%self.num_between_print==0) and MPI_RANK==0:
self.pbar.set_description('[Cmp: {}/{}, Itr: {}] '.format(self.blend_count,
self.num_components_sampling,
info['it']))
self.pbar.refresh()
def _full_lnPrior(self, params):
num_components = int(len(params) // 2)
redshifts = params[:num_components]
magnitudes = params[num_components:]
if not self.model._obeyPriorConditions(redshifts, magnitudes, self.photometry.current_galaxy.ref_mag_hi):
return -np.inf
else:
cmp_priors = np.zeros((num_components, self.num_types))
for nb in range(num_components):
cmp_priors[nb, :] = self.model.lnPrior(redshifts[nb], magnitudes[nb])
redshift_correlation = np.log(1. + self.model.correlationFunction(redshifts))
if len(self.config.ref_band)==1:
total_ref_flux = np.sum(10.**(-0.4 * magnitudes))
else:
total_ref_flux = 10.**(-0.4 * magnitudes) #Array with len==len(magnitudes)
#Check whether the selection band is ref or not
#If it is, we can use a single selection effect for every template
if np.all(self.config.ref_band == self.config.select_band):
selection_effect_ref = self.model.lnSelection(total_ref_flux,
self.photometry.current_galaxy)
#If not, we need to use the model fluxes, | |
<filename>testing/databaseTest.py
"""
This scripts runs tests on the database
"""
import os.path
import logging
from external.wip import work_in_progress
from rmgpy import settings
from rmgpy.data.rmg import RMGDatabase
from copy import copy, deepcopy
from rmgpy.data.base import LogicOr
from rmgpy.molecule import Group
from rmgpy.molecule.atomtype import atomTypes
import nose
import nose.tools
class TestDatabase(): # cannot inherit from unittest.TestCase if we want to use nose test generators
"""
Contains unit tests for the database for rigorous error checking.
"""
@classmethod
def setUpClass(cls):
"""
Load the database before running the tests.
"""
databaseDirectory = settings['database.directory']
cls.database = RMGDatabase()
cls.database.load(databaseDirectory, kineticsFamilies='all')
# These are generators, that call the methods below.
def test_kinetics(self):
for family_name, family in self.database.kinetics.families.iteritems():
test = lambda x: self.kinetics_checkCorrectNumberofNodesInRules(family_name)
test_name = "Kinetics family {0}: rules have correct number of nodes?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, None
test = lambda x: self.kinetics_checkNodesInRulesFoundInGroups(family_name)
test_name = "Kinetics family {0}: rules' nodes exist in the groups?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, None
test = lambda x: self.kinetics_checkGroupsFoundInTree(family_name)
test_name = "Kinetics family {0}: groups are in the tree with proper parents?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, None
test = lambda x: self.kinetics_checkGroupsNonidentical(family_name)
test_name = "Kinetics family {0}: groups are not identical?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, family_name
test = lambda x: self.kinetics_checkChildParentRelationships(family_name)
test_name = "Kinetics family {0}: parent-child relationships are correct?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, family_name
test = lambda x: self.kinetics_checkSiblingsForParents(family_name)
test_name = "Kinetics family {0}: sibling relationships are correct?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, family_name
test = lambda x: self.kinetics_checkCdAtomType(family_name)
test_name = "Kinetics family {0}: Cd, CS, CO, and Cdd atomtype used correctly?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, family_name
test = lambda x: self.kinetics_checkReactantAndProductTemplate(family_name)
test_name = "Kinetics family {0}: reactant and product templates correctly defined?".format(family_name)
test.description = test_name
self.compat_func_name = test_name
yield test, family_name
for depository in family.depositories:
test = lambda x: self.kinetics_checkAdjlistsNonidentical(depository)
test_name = "Kinetics {1} Depository: check adjacency lists are nonidentical?".format(family_name, depository.label)
test.description = test_name
self.compat_func_name = test_name
yield test, depository.label
for library_name, library in self.database.kinetics.libraries.iteritems():
test = lambda x: self.kinetics_checkAdjlistsNonidentical(library)
test_name = "Kinetics library {0}: check adjacency lists are nonidentical?".format(library_name)
test.description = test_name
self.compat_func_name = test_name
yield test, library_name
def test_thermo(self):
for group_name, group in self.database.thermo.groups.iteritems():
test = lambda x: self.general_checkNodesFoundInTree(group_name, group)
test_name = "Thermo groups {0}: nodes are in the tree with proper parents?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkGroupsNonidentical(group_name, group)
test_name = "Thermo groups {0}: nodes are nonidentical?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkChildParentRelationships(group_name, group)
test_name = "Thermo groups {0}: parent-child relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkSiblingsForParents(group_name, group)
test_name = "Thermo groups {0}: sibling relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkCdAtomType(group_name, group)
test_name = "Thermo groups {0}: Cd atomtype used correctly?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
def test_solvation(self):
for group_name, group in self.database.solvation.groups.iteritems():
test = lambda x: self.general_checkNodesFoundInTree(group_name, group)
test_name = "Solvation groups {0}: nodes are in the tree with proper parents?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkGroupsNonidentical(group_name, group)
test_name = "Solvation groups {0}: nodes are nonidentical?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkChildParentRelationships(group_name, group)
test_name = "Solvation groups {0}: parent-child relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkSiblingsForParents(group_name, group)
test_name = "Solvation groups {0}: sibling relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkCdAtomType(group_name, group)
test_name = "Solvation groups {0}: Cd atomtype used correctly?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
def test_statmech(self):
for group_name, group in self.database.statmech.groups.iteritems():
test = lambda x: self.general_checkNodesFoundInTree(group_name, group)
test_name = "Statmech groups {0}: nodes are in the tree with proper parents?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkGroupsNonidentical(group_name, group)
test_name = "Statmech groups {0}: nodes are nonidentical?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkChildParentRelationships(group_name, group)
test_name = "Statmech groups {0}: parent-child relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkSiblingsForParents(group_name, group)
test_name = "Statmech groups {0}: sibling relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkCdAtomType(group_name, group)
test_name = "Statmech groups {0}: Cd atomtype used correctly?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
def test_transport(self):
for group_name, group in self.database.transport.groups.iteritems():
test = lambda x: self.general_checkNodesFoundInTree(group_name, group)
test_name = "Transport groups {0}: nodes are in the tree with proper parents?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkGroupsNonidentical(group_name, group)
test_name = "Transport groups {0}: nodes are nonidentical?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkChildParentRelationships(group_name, group)
test_name = "Transport groups {0}: parent-child relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkSiblingsForParents(group_name, group)
test_name = "Transport groups {0}: sibling relationships are correct?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
test = lambda x: self.general_checkCdAtomType(group_name, group)
test_name = "Transport groups {0}: Cd, CS, CO, and Cdd atomtype used correctly?".format(group_name)
test.description = test_name
self.compat_func_name = test_name
yield test, group_name
# These are the actual tests, that don't start with a "test_" name:
def kinetics_checkCorrectNumberofNodesInRules(self, family_name):
"""
This test ensures that each rate rule contains the proper number of nodes according to the family it originates.
"""
family = self.database.kinetics.families[family_name]
expectedNumberNodes = len(family.getRootTemplate())
for label, entries in family.rules.entries.iteritems():
for entry in entries:
nodes = label.split(';')
nose.tools.assert_equal(len(nodes), expectedNumberNodes, "Wrong number of groups or semicolons in family {family} rule {entry}. Should be {num_nodes}".format(family=family_name, entry=entry, num_nodes=expectedNumberNodes))
def kinetics_checkNodesInRulesFoundInGroups(self, family_name):
"""
This test ensures that each rate rule contains nodes that exist in the groups and that they match the order of the forwardTemplate.
"""
family = self.database.kinetics.families[family_name]
# List of the each top node's descendants (including the top node)
topDescendants = []
for topNode in family.getRootTemplate():
nodes = [topNode]
nodes.extend(family.groups.descendants(topNode))
topDescendants.append(nodes)
topGroupOrder = ';'.join(topNode.label for topNode in family.getRootTemplate())
for label, entries in family.rules.entries.iteritems():
for entry in entries:
nodes = label.split(';')
for i, node in enumerate(nodes):
nose.tools.assert_true(node in family.groups.entries, "In {family} family, no group definition found for label {label} in rule {entry}".format(family=family_name, label=node, entry=entry))
nose.tools.assert_true(family.groups.entries[node] in topDescendants[i], "In {family} family, rule {entry} was found with groups out of order. The correct order for a rule should be subgroups of {top}.".format(family=family_name, entry=entry, top=topGroupOrder))
def kinetics_checkGroupsFoundInTree(self, family_name):
"""
This test checks whether groups are found in the tree, with proper parents.
"""
family = self.database.kinetics.families[family_name]
for nodeName, nodeGroup in family.groups.entries.iteritems():
nose.tools.assert_false('[' in nodeName or ']' in nodeName, "Group {group} in {family} family contains square brackets [ ] in the label, which are not allowed.".format(group=nodeName, family=family_name))
ascendParent = nodeGroup
# Check whether the node has proper parents unless it is the top reactant or product node
while ascendParent not in family.groups.top and ascendParent not in family.forwardTemplate.products:
child = ascendParent
ascendParent = ascendParent.parent
nose.tools.assert_true(ascendParent is not None, "Group {group} in {family} family was found in the tree without a proper parent.".format(group=child, family=family_name))
nose.tools.assert_true(child in ascendParent.children, "Group {group} in {family} family was found in the tree without a proper parent.".format(group=nodeName, family=family_name))
def kinetics_checkGroupsNonidentical(self, family_name):
"""
This test checks that the groups are non-identical.
"""
from rmgpy.data.base import Database
originalFamily = self.database.kinetics.families[family_name]
family = Database()
family.entries = originalFamily.groups.entries
entriesCopy = copy(family.entries)
for nodeName, nodeGroup in family.entries.iteritems():
del entriesCopy[nodeName]
for nodeNameOther, nodeGroupOther in entriesCopy.iteritems():
nose.tools.assert_false(family.matchNodeToNode(nodeGroup, nodeGroupOther), "Group {group} in {family} family was found to be identical to group {groupOther}".format(group=nodeName, family=family_name, groupOther=nodeNameOther))
def kinetics_checkChildParentRelationships(self, family_name):
"""
This test checks that groups' parent-child relationships are | |
<reponame>syrte/halotools
""" Common functions used to rotate three dimensional vectors
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
__all__ = ('elementwise_dot', 'elementwise_norm', 'normalized_vectors',
'angles_between_list_of_vectors', 'vectors_normal_to_planes',
'rotate_vector_collection', 'rotation_matrices_from_angles',
'rotation_matrices_from_vectors', 'vectors_between_list_of_vectors')
def elementwise_dot(x, y):
r""" Calculate the dot product between
each pair of elements in two input lists of 3d points.
Parameters
----------
x : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d points
y : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d points
Returns
-------
result : ndarray
Numpy array of shape (npts, ) storing the dot product between each
pair of corresponding points in x and y.
Examples
--------
>>> npts = int(1e3)
>>> x = np.random.random((npts, 3))
>>> y = np.random.random((npts, 3))
>>> dots = elementwise_dot(x, y)
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
return np.sum(x*y, axis=1)
def elementwise_norm(x):
r""" Calculate the normalization of each element in a list of 3d points.
Parameters
----------
x : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d points
Returns
-------
result : ndarray
Numpy array of shape (npts, ) storing the norm of each 3d point in x.
Examples
--------
>>> npts = int(1e3)
>>> x = np.random.random((npts, 3))
>>> norms = elementwise_norm(x)
"""
x = np.atleast_2d(x)
return np.sqrt(np.sum(x**2, axis=1))
def normalized_vectors(vectors):
r""" Return a unit-vector for each 3d vector in the input list of 3d points.
Parameters
----------
x : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d points
Returns
-------
normed_x : ndarray
Numpy array of shape (npts, 3)
Examples
--------
>>> npts = int(1e3)
>>> x = np.random.random((npts, 3))
>>> normed_x = normalized_vectors(x)
"""
vectors = np.atleast_2d(vectors)
npts = vectors.shape[0]
return vectors/elementwise_norm(vectors).reshape((npts, -1))
def angles_between_list_of_vectors(v0, v1, tol=1e-3):
r""" Calculate the angle between a collection of 3d vectors
Parameters
----------
v0 : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `v0` will be ignored.
v1 : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `v1` will be ignored.
tol : float, optional
Acceptable numerical error for errors in angle.
This variable is only used to round off numerical noise that otherwise
causes exceptions to be raised by the inverse cosine function.
Default is 0.001.
Returns
-------
angles : ndarray
Numpy array of shape (npts, ) storing the angles between each pair of
corresponding points in v0 and v1.
Returned values are in units of radians spanning [0, pi].
Examples
--------
>>> npts = int(1e4)
>>> v0 = np.random.random((npts, 3))
>>> v1 = np.random.random((npts, 3))
>>> angles = angles_between_list_of_vectors(v0, v1)
"""
dot = elementwise_dot(normalized_vectors(v0), normalized_vectors(v1))
# Protect against tiny numerical excesses beyond the range [-1 ,1]
mask1 = (dot > 1) & (dot < 1 + tol)
dot = np.where(mask1, 1., dot)
mask2 = (dot < -1) & (dot > -1 - tol)
dot = np.where(mask2, -1., dot)
return np.arccos(dot)
def vectors_normal_to_planes(x, y):
r""" Given a collection of 3d vectors x and y,
return a collection of 3d unit-vectors that are orthogonal to x and y.
Parameters
----------
x : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `x` will be ignored.
y : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `y` will be ignored.
Returns
-------
z : ndarray
Numpy array of shape (npts, 3). Each 3d vector in z will be orthogonal
to the corresponding vector in x and y.
Examples
--------
>>> npts = int(1e4)
>>> x = np.random.random((npts, 3))
>>> y = np.random.random((npts, 3))
>>> normed_z = angles_between_list_of_vectors(x, y)
"""
return normalized_vectors(np.cross(x, y))
def vectors_between_list_of_vectors(x, y, p):
r"""
Starting from two input lists of vectors, return a list of unit-vectors
that lie in the same plane as the corresponding input vectors,
and where the input `p` controls the angle between
the returned vs. input vectors.
Parameters
----------
x : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `x` will be ignored.
y : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Note that the normalization of `y` will be ignored.
p : ndarray
Numpy array of shape (npts, ) storing values in the closed interval [0, 1].
For values of `p` equal to zero, the returned vectors will be
exactly aligned with the input `x`; when `p` equals unity, the returned
vectors will be aligned with `y`.
Returns
-------
v : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d unit-vectors
lying in the plane spanned by `x` and `y`. The angle between `v` and `x`
will be equal to :math:`p*\theta_{\rm xy}`.
Examples
--------
>>> npts = int(1e4)
>>> x = np.random.random((npts, 3))
>>> y = np.random.random((npts, 3))
>>> p = np.random.uniform(0, 1, npts)
>>> v = vectors_between_list_of_vectors(x, y, p)
>>> angles_xy = angles_between_list_of_vectors(x, y)
>>> angles_xp = angles_between_list_of_vectors(x, v)
>>> assert np.allclose(angles_xy*p, angles_xp)
"""
assert np.all(p >= 0), "All values of p must be non-negative"
assert np.all(p <= 1), "No value of p can exceed unity"
z = vectors_normal_to_planes(x, y)
theta = angles_between_list_of_vectors(x, y)
angles = p*theta
rotation_matrices = rotation_matrices_from_angles(angles, z)
return normalized_vectors(rotate_vector_collection(rotation_matrices, x))
def rotation_matrices_from_angles(angles, directions):
r""" Calculate a collection of rotation matrices defined by
an input collection of rotation angles and rotation axes.
Parameters
----------
angles : ndarray
Numpy array of shape (npts, ) storing a collection of rotation angles
directions : ndarray
Numpy array of shape (npts, 3) storing a collection of rotation axes in 3d
Returns
-------
matrices : ndarray
Numpy array of shape (npts, 3, 3) storing a collection of rotation matrices
Examples
--------
>>> npts = int(1e4)
>>> angles = np.random.uniform(-np.pi/2., np.pi/2., npts)
>>> directions = np.random.random((npts, 3))
>>> rotation_matrices = rotation_matrices_from_angles(angles, directions)
Notes
-----
The function `rotate_vector_collection` can be used to efficiently
apply the returned collection of matrices to a collection of 3d vectors
"""
directions = normalized_vectors(directions)
angles = np.atleast_1d(angles)
npts = directions.shape[0]
sina = np.sin(angles)
cosa = np.cos(angles)
R1 = np.zeros((npts, 3, 3))
R1[:, 0, 0] = cosa
R1[:, 1, 1] = cosa
R1[:, 2, 2] = cosa
R2 = directions[..., None] * directions[:, None, :]
R2 = R2*np.repeat(1.-cosa, 9).reshape((npts, 3, 3))
directions *= sina.reshape((npts, 1))
R3 = np.zeros((npts, 3, 3))
R3[:, [1, 2, 0], [2, 0, 1]] -= directions
R3[:, [2, 0, 1], [1, 2, 0]] += directions
return R1 + R2 + R3
def rotation_matrices_from_vectors(v0, v1):
r""" Calculate a collection of rotation matrices defined by the unique
transformation rotating v1 into v2 about the mutually perpendicular axis.
Parameters
----------
v0 : ndarray
Numpy array of shape (npts, 3) storing a collection of initial vector orientations.
Note that the normalization of `v0` will be ignored.
v1 : ndarray
Numpy array of shape (npts, 3) storing a collection of final vectors.
Note that the normalization of `v1` will be ignored.
Returns
-------
matrices : ndarray
Numpy array of shape (npts, 3, 3) rotating each v0 into the corresponding v1
Examples
--------
>>> npts = int(1e4)
>>> v0 = np.random.random((npts, 3))
>>> v1 = np.random.random((npts, 3))
>>> rotation_matrices = rotation_matrices_from_vectors(v0, v1)
Notes
-----
The function `rotate_vector_collection` can be used to efficiently
apply the returned collection of matrices to a collection of 3d vectors
"""
v0 = normalized_vectors(v0)
v1 = normalized_vectors(v1)
directions = vectors_normal_to_planes(v0, v1)
angles = angles_between_list_of_vectors(v0, v1)
return rotation_matrices_from_angles(angles, directions)
def rotate_vector_collection(rotation_matrices, vectors, optimize=False):
r""" Given a collection of rotation matrices and a collection of 3d vectors,
apply each matrix to rotate the corresponding vector.
Parameters
----------
rotation_matrices : ndarray
Numpy array of shape (npts, 3, 3) storing a collection of rotation matrices
vectors : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Returns
-------
rotated_vectors | |
constant node types
__slots__ = ("value",)
class Num(Constant):
# inherited class for all numeric constant node types
__slots__ = ()
_translated_fields = {"n": "value"}
@property
def n(self):
# TODO phase out use of Num.n and remove this
return self.value
def validate(self):
if self.value < SizeLimits.MIN_INT256:
raise OverflowException("Value is below lower bound for all numeric types", self)
if self.value > SizeLimits.MAX_UINT256:
raise OverflowException("Value exceeds upper bound for all numeric types", self)
class Int(Num):
"""
An integer.
Attributes
----------
value : int
Value of the node, represented as an integer.
"""
__slots__ = ()
class Decimal(Num):
"""
A decimal.
Attributes
----------
value : decimal.Decimal
Value of the node, represented as a Decimal object.
"""
__slots__ = ()
def __init__(self, parent: Optional["VyperNode"] = None, **kwargs: dict):
super().__init__(parent, **kwargs)
if not isinstance(self.value, decimal.Decimal):
self.value = decimal.Decimal(self.value)
def to_dict(self):
ast_dict = super().to_dict()
ast_dict["value"] = self.node_source_code
return ast_dict
def validate(self):
if self.value.as_tuple().exponent < -MAX_DECIMAL_PLACES:
raise InvalidLiteral("Vyper supports a maximum of ten decimal points", self)
if self.value < SizeLimits.MIN_AST_DECIMAL:
raise OverflowException("Value is below lower bound for decimal types", self)
if self.value > SizeLimits.MAX_AST_DECIMAL:
raise OverflowException("Value exceeds upper bound for decimal types", self)
class Hex(Constant):
"""
A hexadecimal value, e.g. `0xFF`
Attributes
----------
value : str
Value of the node, represented as a string taken directly from the contract source.
"""
__slots__ = ()
_translated_fields = {"n": "value"}
def validate(self):
if "_" in self.value:
raise InvalidLiteral("Underscores not allowed in hex literals", self)
if len(self.value) % 2:
raise InvalidLiteral("Hex notation requires an even number of digits", self)
class Str(Constant):
__slots__ = ()
_translated_fields = {"s": "value"}
def validate(self):
for c in self.value:
if ord(c) >= 256:
raise InvalidLiteral(f"'{c}' is not an allowed string literal character", self)
@property
def s(self):
# TODO phase out use of Str.s and remove this
return self.value
class Bytes(Constant):
__slots__ = ()
_translated_fields = {"s": "value"}
def __init__(self, parent: Optional["VyperNode"] = None, **kwargs: dict):
super().__init__(parent, **kwargs)
if isinstance(self.value, str):
# convert hex string to bytes
length = len(self.value) // 2 - 1
self.value = int(self.value, 16).to_bytes(length, "big")
def to_dict(self):
ast_dict = super().to_dict()
ast_dict["value"] = f"0x{self.value.hex()}"
return ast_dict
@property
def s(self):
return self.value
class List(VyperNode):
__slots__ = ("elements",)
_translated_fields = {"elts": "elements"}
class Tuple(VyperNode):
__slots__ = ("elements",)
_translated_fields = {"elts": "elements"}
def validate(self):
if not self.elements:
raise InvalidLiteral("Cannot have an empty tuple", self)
class Dict(VyperNode):
__slots__ = ("keys", "values")
class NameConstant(Constant):
__slots__ = ("value",)
class Name(VyperNode):
__slots__ = ("id",)
class Expr(VyperNode):
__slots__ = ("value",)
class UnaryOp(VyperNode):
__slots__ = ("op", "operand")
def evaluate(self) -> VyperNode:
"""
Attempt to evaluate the unary operation.
Returns
-------
Int | Decimal
Node representing the result of the evaluation.
"""
if isinstance(self.op, Not) and not isinstance(self.operand, NameConstant):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
if isinstance(self.op, USub) and not isinstance(self.operand, (Int, Decimal)):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
value = self.op._op(self.operand.value)
_validate_numeric_bounds(self, value)
return type(self.operand).from_node(self, value=value)
class USub(VyperNode):
__slots__ = ()
_description = "negation"
_op = operator.neg
class Not(VyperNode):
__slots__ = ()
_op = operator.not_
class BinOp(VyperNode):
__slots__ = ("left", "op", "right")
def evaluate(self) -> VyperNode:
"""
Attempt to evaluate the arithmetic operation.
Returns
-------
Int | Decimal
Node representing the result of the evaluation.
"""
left, right = self.left, self.right
if type(left) is not type(right):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
if not isinstance(left, (Int, Decimal)):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
value = self.op._op(left.value, right.value)
_validate_numeric_bounds(self, value)
return type(left).from_node(self, value=value)
class Add(VyperNode):
__slots__ = ()
_description = "addition"
_pretty = "+"
_op = operator.add
class Sub(VyperNode):
__slots__ = ()
_description = "subtraction"
_pretty = "-"
_op = operator.sub
class Mult(VyperNode):
__slots__ = ()
_description = "multiplication"
_pretty = "*"
def _op(self, left, right):
assert type(left) is type(right)
value = left * right
if isinstance(left, decimal.Decimal):
# ensure that the result is truncated to MAX_DECIMAL_PLACES
return value.quantize(
decimal.Decimal(f"{1:0.{MAX_DECIMAL_PLACES}f}"), decimal.ROUND_DOWN
)
else:
return value
class Div(VyperNode):
__slots__ = ()
_description = "division"
_pretty = "/"
def _op(self, left, right):
# evaluate the operation using true division or floor division
assert type(left) is type(right)
if not right:
raise ZeroDivisionException("Division by zero")
if isinstance(left, decimal.Decimal):
value = left / right
if value < 0:
# the EVM always truncates toward zero
value = -(-left / right)
# ensure that the result is truncated to MAX_DECIMAL_PLACES
return value.quantize(
decimal.Decimal(f"{1:0.{MAX_DECIMAL_PLACES}f}"), decimal.ROUND_DOWN
)
else:
value = left // right
if value < 0:
return -(-left // right)
return value
class Mod(VyperNode):
__slots__ = ()
_description = "modulus"
_pretty = "%"
def _op(self, left, right):
if not right:
raise ZeroDivisionException("Modulo by zero")
value = abs(left) % abs(right)
if left < 0:
value = -value
return value
class Pow(VyperNode):
__slots__ = ()
_description = "exponentiation"
_pretty = "**"
def _op(self, left, right):
if isinstance(left, decimal.Decimal):
raise TypeMismatch("Cannot perform exponentiation on decimal values.", self._parent)
if right < 0:
raise InvalidOperation("Cannot calculate a negative power", self._parent)
return int(left ** right)
class BitAnd(VyperNode):
__slots__ = ()
_description = "bitwise and"
_pretty = "&"
_op = operator.and_
class BitOr(VyperNode):
__slots__ = ()
_description = "bitwise or"
_pretty = "|"
_op = operator.or_
class BoolOp(VyperNode):
__slots__ = ("op", "values")
def evaluate(self) -> VyperNode:
"""
Attempt to evaluate the boolean operation.
Returns
-------
NameConstant
Node representing the result of the evaluation.
"""
if next((i for i in self.values if not isinstance(i, NameConstant)), None):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
values = [i.value for i in self.values]
if None in values:
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
value = self.op._op(values)
return NameConstant.from_node(self, value=value)
class And(VyperNode):
__slots__ = ()
_description = "logical and"
_op = all
class Or(VyperNode):
__slots__ = ()
_description = "logical or"
_op = any
class Compare(VyperNode):
"""
A comparison of two values.
Attributes
----------
left : VyperNode
The left-hand value in the comparison.
op : VyperNode
The comparison operator.
right : VyperNode
The right-hand value in the comparison.
"""
__slots__ = ("left", "op", "right")
def __init__(self, *args, **kwargs):
if len(kwargs["ops"]) > 1 or len(kwargs["comparators"]) > 1:
_raise_syntax_exc("Cannot have a comparison with more than two elements", kwargs)
kwargs["op"] = kwargs.pop("ops")[0]
kwargs["right"] = kwargs.pop("comparators")[0]
super().__init__(*args, **kwargs)
def evaluate(self) -> VyperNode:
"""
Attempt to evaluate the comparison.
Returns
-------
NameConstant
Node representing the result of the evaluation.
"""
left, right = self.left, self.right
if not isinstance(left, Constant):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
if isinstance(self.op, (In, NotIn)):
if not isinstance(right, List):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
if next((i for i in right.elements if not isinstance(i, Constant)), None):
raise UnfoldableNode("Node contains invalid field(s) for evaluation")
if len(set([type(i) for i in right.elements])) > 1:
raise UnfoldableNode("List contains multiple literal types")
value = self.op._op(left.value, [i.value for i in right.elements])
return NameConstant.from_node(self, value=value)
if not isinstance(left, type(right)):
raise UnfoldableNode("Cannot compare different literal types")
if not isinstance(self.op, (Eq, NotEq)) and not isinstance(left, (Int, Decimal)):
raise TypeMismatch(f"Invalid literal types for {self.op.description} comparison", self)
value = self.op._op(left.value, right.value)
return NameConstant.from_node(self, value=value)
class Eq(VyperNode):
__slots__ = ()
_description = "equality"
_op = operator.eq
class NotEq(VyperNode):
__slots__ = ()
_description = "non-equality"
_op = operator.ne
class Lt(VyperNode):
__slots__ = ()
_description = "less than"
_op = operator.lt
class LtE(VyperNode):
__slots__ = ()
_description = "less-or-equal"
_op = operator.le
class Gt(VyperNode):
__slots__ = ()
_description = "greater than"
_op = operator.gt
class GtE(VyperNode):
__slots__ = ()
_description = "greater-or-equal"
_op = operator.ge
class In(VyperNode):
__slots__ = ()
_description = "membership"
def _op(self, left, right):
return left in right
class NotIn(VyperNode):
__slots__ = ()
_description = "exclusion"
def _op(self, left, right):
return left not in right
class Call(VyperNode):
__slots__ = ("func", "args", "keywords", "keyword")
class keyword(VyperNode):
__slots__ = ("arg", "value")
class Attribute(VyperNode):
__slots__ = ("attr", "value")
class Subscript(VyperNode):
__slots__ = ("slice", "value")
def evaluate(self) -> VyperNode:
"""
Attempt to evaluate the subscript.
This method reduces an indexed reference to a literal array into the value
within the array, e.g. `["foo", "bar"][1]` becomes `"bar"`
Returns
-------
VyperNode
Node representing the result of the evaluation.
"""
if not isinstance(self.value, List):
raise UnfoldableNode("Subscript object is not a literal list")
elements = self.value.elements
if len(set([type(i) for i in elements])) > 1:
raise UnfoldableNode("List contains multiple node types")
idx = self.slice.get("value.value")
if not isinstance(idx, | |
import sys
import cPickle
import warnings
import numpy as np
from scipy.stats import pearsonr
from matplotlib import pyplot as plt
import emcee
from emcee.utils import MPIPool
from pysb.integrate import Solver
from bax_insertion.models import multiconf
def posterior(position, gf):
"""A generic log posterior function."""
post_val = prior(position, gf) + likelihood(position, gf)
return post_val
def negative_posterior(position, gf):
"""A generic negative log posterior function.
Use the negative log posterior when using a minimization (rather than
probability maximization) algorithm.
"""
post_val = prior(position, gf) + likelihood(position, gf)
print("posterior: %s" % post_val)
return -post_val
def prior(position, gf):
"""A generic prior function."""
prior_prob = 0
for i, prior in enumerate(gf.priors):
prior_prob += prior.pdf(position[i])
return -prior_prob
def likelihood(position, gf):
# A generic objective function
# The cumulative error over all timecourses
err = 0
obs_func = getattr(gf.builder, 'obs_func', None)
# Iterate over each condition (1st dimension of the data matrix)
for cond_ix in range(gf.data.shape[0]):
# Create a placeholder for a time offset, if there is one
timeoffset = None
# Set the parameters appropriately for the simulation:
# Iterate over the globally fit parameters
for g_ix, p in enumerate(gf.builder.global_params):
p.value = 10 ** position[g_ix]
if p.name == 'timeoffset':
timeoffset = 10 ** position[g_ix]
# Iterate over the locally fit parameter_s
for l_ix, p in enumerate(gf.builder.local_params):
ix_offset = len(gf.builder.global_params) + \
cond_ix * len(gf.builder.local_params)
p.value = 10 ** position[l_ix + ix_offset]
# Now fill in the initial condition parameters
if gf.params is not None:
for p_name, values in gf.params.iteritems():
p = gf.builder.model.parameters[p_name]
p.value = values[cond_ix]
# Reset the timespan by adding one additional pt at the beginning
if timeoffset:
tspan = np.insert(gf.time, 0, -timeoffset)
else:
tspan = gf.time
# Now run the simulation
if callable(obs_func):
ydict = obs_func(tspan)
else:
gf.solver.tspan = tspan
gf.solver.run()
if gf.use_expr:
ydict = gf.solver.yexpr
else:
ydict = gf.solver.yobs
# Calculate the squared error over all the observables
for obs_ix, obs_name in enumerate(gf.obs_name):
# Get the observable timecourse from the dict/recarray
ysim = ydict[obs_name]
# If we're using a time offset, skip the first point (the offset)
# for the purposes of comparing to data
if timeoffset:
ysim = ysim[1:]
# If integrator fails to converge, the results will contain NaN
if np.any(np.isnan(ysim)):
err = -np.inf
continue
# Get the data slice we want
data = gf.data[cond_ix, obs_ix, :]
# Get the appropriate SD for this data slice
sigma = gf.data_sigma[cond_ix, obs_ix]
# Calculate the log-likelihood
loglkl = ((data - ysim) ** 2) / (2. * sigma ** 2)
# Filter out the NaNs...
filt_loglkl = loglkl[~np.isnan(loglkl)]
# Take the sum
err += -np.sum(filt_loglkl)
return err
class GlobalFit(object):
"""Fit of PySB model to a set of multiple timecourses, with a
mix of globally and locally fit parameters.
Parameters
----------
builder : pysb.builder.Builder
Builder containing the model to fit. Should contain an attribute
builder.global_params for the parameters that are to be fit globally.
time : np.array
The time vector.
data : Three-dimensional np.array
The experimental timecourses to fit. The first dimension corresponds
to the number of experimental conditions; the second dimension
corresponds to the number of observables (in a given timecourse set);
the third dimension corresponds to the timepoints.
data_sigma : np.array
Array of values with dimension corresponding to data indicating the
standard deviation of the data.
params : dict of lists, or None
The keys to the dict should be names of parameters in the PySB model
(e.g., initial conditions); each value should be a list containing
values for the parameter for each of the entries in the data list. The
length of each value in the dict should match the length of the data
list. If None, indicates that there are no local initial conditions.
obs_name : string
The name of the model observable to compare against the data.
obs_type : string, "Expression" or "Observable"
Indicates whether the named expression/observable specified by
obs_name is to be found in the model's set of Expression objects
or Observable objects.
Attributes
----------
result : None or scipy.optimize.minimize fit result object
The result field is initialized to None and is assigned the results
of fitting after the :py:meth:`fit` method completes successfully.
use_expr : boolean
Based on the obs_type argument. True if the named observable is an
Expression, False if it is an Observable.
priors : list of priors
solver : pysb.integrate.Solver
A solver object used to run the model.
"""
def __init__(self, builder, time, data, data_sigma, params, obs_name,
obs_type='Expression'):
# Check that the dimensions of everything that has been provided matches
# Check that the time vector matches the 3rd dimension of the data
# vector
if len(time) != data.shape[2]:
raise ValueError("Length of time vector must match the length "
"of each data vector.")
# Check that we don't have more than one condition but only set of
# initial conditions
if params is None and data.shape[0] != 1:
raise ValueError("There are no initial condition parameters but "
"there is more than one condition in the data "
"matrix.")
# Check that the number of initial conditions specified in the params
# dict matches the first dimension of the data matrix
if params is not None:
for p, vals in params.iteritems():
if not len(vals) == data.shape[0]:
raise ValueError("Each parameter in the params dict must "
"have an entry for each entry in the "
"data list.")
# Check that the number of observables matches the 2nd dimension of
# the data matrix
if len(obs_name) != data.shape[1]:
raise ValueError("The number of observables (%s) must match the "
"second dimension of the data matrix (%s)" %
(len(obs_name), data.shape[1]))
# Check that there is a sigma in the data_sigma matrix for every
# timecourse in data
if data_sigma.shape[0] != data.shape[0] and \
data_sigma.shape[1] != data.shape[1]:
raise ValueError("data_sigma must specify an error SD for every "
"timecourse in the data matrix.")
self.builder = builder
self.time = time
self.data = data
self.data_sigma = data_sigma
self.params = params
self.obs_name = obs_name
self.result = None
if obs_type == 'Expression':
self.use_expr = True
elif obs_type == 'Observable':
self.use_expr = False
else:
raise ValueError('obs_type must be Expression or Observable.')
if self.builder.model.parameters.get('timeoffset'):
use_time_offset = True
else:
use_time_offset = False
self.init_solver(use_time_offset=use_time_offset)
# Used to keep track of the number of steps run
self.nstep = 0
# Build up a list of priors corresponding to the global and local
# parameters
self.priors = []
# Iterate over the globally fit parameters
for g_ix, p in enumerate(self.builder.global_params):
try:
prior_index = self.builder.estimate_params.index(p)
self.priors.append(self.builder.priors[prior_index])
except ValueError:
raise ValueError(
'The parameter %s, in global_params, must also be '
'present in estimate_params.' % p.name)
# Iterate over the locally fit parameters
for data_ix, data in enumerate(self.data):
for l_ix, p in enumerate(self.builder.local_params):
try:
prior_index = self.builder.estimate_params.index(p)
self.priors.append(self.builder.priors[prior_index])
except ValueError:
raise ValueError(
'The parameter %s, in local_params, must also be '
'present in estimate_params.')
def __getstate__(self):
# Clear solver since it causes problems with pickling
state = self.__dict__.copy()
if 'solver' in state:
del state['solver']
return state
def __setstate__(self, state):
# Re-init the solver which we didn't pickle
self.__dict__.update(state)
if self.builder.model.parameters.get('timeoffset'):
use_time_offset = True
else:
use_time_offset = False
self.init_solver(use_time_offset=use_time_offset)
def init_solver(self, use_time_offset=False):
"""Initialize solver from model and tspan."""
Solver._use_inline = True
# If we're using a time offset, note that it doesn't matter what value
# goes in here, since it will be filled in by the fitting.
if use_time_offset:
tspan = np.insert(self.time, 0, 0)
else:
tspan = self.time
self.solver = Solver(self.builder.model, tspan)
def plot_func_single(self, x, data_ix, ax=None, alpha=1.0):
x = 10 ** x
s = Solver(self.builder.model, self.time)
# Set the parameters appropriately for the simulation:
# Iterate over the globally fit parameters
for g_ix, p in enumerate(self.builder.global_params):
p.value = x[g_ix]
# Iterate over the locally fit parameters
for l_ix, p in enumerate(self.builder.local_params):
ix_offset = len(self.builder.global_params) + \
data_ix * len(self.builder.local_params)
p.value = x[l_ix + ix_offset]
# Now fill in the initial condition parameters
for p_name, values in self.params.iteritems():
p = self.builder.model.parameters[p_name]
p.value = values[data_ix]
# Now run the simulation
s.run()
# Plot | |
for reserved instances,
and at least equal to a given value for on-demand instances.
This is used mainly in phase II to ensure that reserved instances
are fixed, or to allow to keep at least some number of on-demand
instances running from previous timeslots, when using "guided"
strategies"."""
if self.fixed_vms is None: # No fixed instances, we are in PhaseI
return
for ins, value in self.fixed_vms.items():
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
== value,
"Reserved instance class {} " "is fixed to {}".format(ins, value),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
>= value,
"On-demand instance class {} is at least {} "
"when workload is {}".format(ins, value, load),
)
def limit_instances_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per limiting set restriction.
If the limiting set provides a max_vms > 0, then the sum of all
instances which are member of that limiting set should be limited
to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_vms == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic]
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load]
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_vms,
"Max instances for limiting set {} "
"when workload is {}".format(cloud, load),
)
def limit_cores_per_limiting_set_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_cores`` per limiting set restriction.
If the limiting set provides a max_cores > 0, then the sum of all
instance cores among all instance classes which are member of that
limiting set should be limited to that maximum."""
for cloud in self.cooked.limiting_sets:
if cloud.max_cores == 0:
continue # No restriction for this limiting set
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[app, ic] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_res
if cloud in ic.limiting_sets
]
+ [
self.cooked.map_dem[app, ic, load] * ic.cores
for app in self.system.apps
for ic in self.cooked.instances_dem
if cloud in ic.limiting_sets
]
)
<= cloud.max_cores,
"Max cores for limiting set {} "
"when workload is {}".format(cloud, load),
)
def solve(self, *args, **kwargs):
"""Calls PuLP solver.
Args:
*args: positional args passed to ``LpProblem.solve()``
\\**kwargs: keyword args passed to ``LpProblem.solve()``.
Returns:
the value returned by ``LpProblem.solve()``.
"""
self.solver_called = True
return self.pulp_problem.solve(*args, **kwargs)
def get_status(self) -> Status:
"""Returns the status of the problem"""
if not self.solver_called:
return Status.unsolved
return pulp_to_malloovia_status(self.pulp_problem.status)
def get_cost(self) -> float:
"""Gets the cost of the problem, obtained after solving it.
Returns:
The cost of the optimal solution found by PuLP.
Raises:
ValueError: when the problem is yet unsolved.
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
return pulp.value(self.pulp_problem.objective)
def get_allocation(self) -> AllocationInfo:
"""Retrieves the allocation given by the solution of the LP problem.
Returns:
The allocation given by the solution.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
workload_tuples = []
repeats = []
allocation = []
for load, repeat in self.load_hist.items():
workload_tuples.append(load)
repeats.append(repeat)
workload_allocation = []
for app in self.system.apps:
row = list(
self.cooked.map_res[app, i].varValue
for i in self.cooked.instances_res
)
row.extend(
self.cooked.map_dem[app, i, load].varValue
for i in self.cooked.instances_dem
)
workload_allocation.append(tuple(row))
allocation.append(tuple(workload_allocation))
return AllocationInfo(
apps=tuple(self.system.apps),
instance_classes=tuple(
self.cooked.instances_res + self.cooked.instances_dem
),
workload_tuples=workload_tuples,
repeats=repeats,
values=tuple(allocation),
units="vms",
)
def get_reserved_allocation(self) -> ReservedAllocation:
"""Retrieves the allocation of reserved instances from the solution of the LP problem.
Returns:
The total number of reserved instance classes of each
type to be purchased for the whole reservation period.
Raises:
ValueError: if no solution is available (unsolved or infeasible problem)
"""
# Returns the solution as a list of numbers, each one
# representing the required number of vms of each reserved type, stored
# in the field "vms_number" of the object.
# This number is valid for any workload tuple, and for every timeslot
# in the reservation period. Also, it does not depend on the applications
# because it is the total number of reserved instances for all apps.
# The returned class also stores the list "instance_classes" which provides
# the instance class associated with each index in the above table.
# So, if r is the value returned, the value of r.vms_number[i]
# (being i an integer) is the number of VMs to be allocated
# from reserved instance class r.instance_classes[i], for every
# timeslot and for the set of all apps.
# This is all the information required for PhaseII.
if self.pulp_problem.status != pulp.LpStatusOptimal:
raise ValueError("Cannot get the cost when the status is not optimal")
allocation: List[float] = []
for _ in self.load_hist: # Loop over all possible workloads
workload_allocation: List[float] = []
for iclass in self.cooked.instances_res:
i_allocation = sum(
self.cooked.map_res[app, iclass].varValue
for app in self.system.apps
)
workload_allocation.append(i_allocation)
# The obtained allocation MUST be the same for any workload
assert allocation == [] or allocation == workload_allocation
allocation = workload_allocation
return ReservedAllocation(
instance_classes=tuple(self.cooked.instances_res),
vms_number=tuple(allocation),
)
class ShortReprTuple(tuple):
"""This class implements a tuple whose repr is not standard
but uses instead the hash of the tuple, to ensure a constant
length of the repr.
This is required to store keys in the histogram, because they
are used to name LP variables which otherwise would have
a name too long for the solver if the number of apps is large.
"""
def __repr__(self):
return str(hash(self))
def get_load_hist_from_load(workloads: Sequence[Workload]) -> MallooviaHistogram:
"""Computes the histogram of the workloads.
Args:
workloads: a sequence of :class:`Workload` objects, each one
containing the fields ``app`` (which identifies the app producing this
workload) and ``values`` (which stores a sequence of numbers representing
the workload for each timeslot for that app).
Returns:
A dictionary where the key is the workload for one timeslot,
expressed as a tuple with one element for each application, and the value
is the number of timeslots in which that workload was found.
"""
hist = MallooviaHistogram()
hist.apps = tuple(w.app for w in workloads)
timeslots = len(workloads[0].values)
# Ensure that all workloads have the same length and units
assert all(
len(w.values) == timeslots for w in workloads
), "All workloads should have the same length"
# Iterate over tuples of loads, one tuple per timeslot
workload_tuples = zip(*(w.values for w in workloads))
for load in workload_tuples:
hist[ShortReprTuple(load)] += 1
return hist
def reorder_workloads(
workloads: Sequence[Workload], apps: Sequence[App]
) -> Sequence[Workload]:
"""Returns the a new workload list ordered as the list of apps.
Args:
workloads: Sequence of workloads to reorder
apps: Sequence of apps which dictate the new ordering
Returns:
A new sequence of workloads, ordered by app in the order given by apps argument.
"""
map_apps_workloads = {workload.app: workload for workload in workloads}
ordered_workloads = []
for app in apps:
ordered_workloads.append(map_apps_workloads[app])
return tuple(ordered_workloads)
class MallooviaLpMaximizeTimeslotPerformance(MallooviaLp):
"""Find the allocation which maximizes performance for a single timeslot.
This problem is the dual of MallooviaLp. Instead of minimizing the cost
while providing the minimum performances, the problem to solve now is
to maximize the performance without breaking the limits.
The class inherits from Malloovia the initialization methods as well as
the ones to get the cost and allocation of the solution, but overrides
the function to be optimized and some of the constraints.
"""
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize (maximize in this case).
The function to optimize is the performance of the deployment. However, since
the system is composed to several applications, no single "performance" exists.
The solution is to maximize the "fraction of performance fulfilled", i.e., the
sum of `X(_a,_ic,_l)*_ic.performance/_l[a]` among all `_a` and `_ic`.
"""
workloads = {wl.app: wl.values[0] for wl in self.workloads}
self.pulp_problem += (
lpSum(
| |
short_domain = short_domain[:-1]
short_domain = short_domain + "_"
ctr = 0
found = 0
for t in port_types:
if t.startswith(short_domain):
found = ctr
iter = self.network_port_type_combolist.append()
self.network_port_type_combolist.set_value(iter, 0, t)
ctr += 1
self.network_port_type_combobox.set_active(found)
except AttributeError:
pass
self.network_tcp_button.set_active(True)
self.network_mls_entry.set_text("s0")
def login_seuser_combobox_change(self, combo, *args):
seuser = self.combo_get_active_text(combo)
if self.login_mls_entry.get_text() == "":
for u in sepolicy.get_selinux_users():
if seuser == u['name']:
self.login_mls_entry.set_text(u.get('range', ''))
def user_roles_combobox_change(self, combo, *args):
serole = self.combo_get_active_text(combo)
if self.user_mls_entry.get_text() == "":
for u in sepolicy.get_all_roles():
if serole == u['name']:
self.user_mls_entry.set_text(u.get('range', ''))
def get_selected_iter(self):
iter = None
if not self.treeview:
return None
row = self.treeview.get_selection()
if not row:
return None
treesort, iter = row.get_selected()
if iter:
iter = treesort.convert_iter_to_child_iter(iter)
if iter:
iter = self.treefilter.convert_iter_to_child_iter(iter)
return iter
def cursor_changed(self, *args):
self.modify_button.set_sensitive(False)
iter = self.get_selected_iter()
if iter == None:
self.modify_button.set_sensitive(False)
return
if not self.liststore[iter] or not self.liststore[iter][-1]:
return
self.modify_button.set_sensitive(self.liststore[iter][-1])
def login_init_dialog(self, *args):
self.show_popup(self.login_popup_window)
self.login_seuser_combolist.clear()
users = sepolicy.get_all_users()
users.sort()
for u in users:
iter = self.login_seuser_combolist.append()
self.login_seuser_combolist.set_value(iter, 0, str(u))
self.login_name_entry.set_text("")
self.login_mls_entry.set_text("")
def user_init_dialog(self, *args):
self.show_popup(self.user_popup_window)
self.user_roles_combolist.clear()
roles = sepolicy.get_all_roles()
roles.sort()
for r in roles:
iter = self.user_roles_combolist.append()
self.user_roles_combolist.set_value(iter, 0, str(r))
self.user_name_entry.set_text("")
self.user_mls_entry.set_text("")
def on_disable_ptrace(self, checkbutton):
if self.finish_init:
update_buffer = "boolean -m -%d deny_ptrace" % checkbutton.get_active()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
self.error(e)
self.ready_mouse()
def on_show_modified_only(self, checkbutton):
length = self.liststore.get_n_columns()
def dup_row(row):
l = []
for i in range(0, length):
l.append(row[i])
return l
append_list = []
if self.opage == BOOLEANS_PAGE:
if not checkbutton.get_active():
return self.boolean_initialize(self.application)
for row in self.liststore:
if row[2] in self.cust_dict["boolean"]:
append_list.append(dup_row(row))
if self.opage == FILES_PAGE:
ipage = self.inner_notebook_files.get_current_page()
if not checkbutton.get_active():
if ipage == EXE_PAGE:
return self.executable_files_initialize(self.application)
if ipage == WRITABLE_PAGE:
return self.writable_files_initialize(self.application)
if ipage == APP_PAGE:
return self.application_files_initialize(self.application)
for row in self.liststore:
if (row[0], row[2]) in self.cust_dict["fcontext"]:
append_list.append(row)
if self.opage == NETWORK_PAGE:
if not checkbutton.get_active():
return self.network_initialize(self.application)
for row in self.liststore:
if (row[0], row[1]) in self.cust_dict["port"]:
append_list.append(dup_row(row))
if self.opage == FILE_EQUIV_PAGE:
if not checkbutton.get_active() == True:
return self.file_equiv_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["fcontext-equiv"]:
append_list.append(dup_row(row))
if self.opage == USER_PAGE:
if not checkbutton.get_active():
return self.user_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["user"]:
append_list.append(dup_row(row))
if self.opage == LOGIN_PAGE:
if not checkbutton.get_active() == True:
return self.login_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["login"]:
append_list.append(dup_row(row))
self.liststore.clear()
for row in append_list:
iter = self.liststore.append()
for i in range(0, length):
self.liststore.set_value(iter, i, row[i])
def init_modified_files_liststore(self, tree, app, ipage, operation, path, fclass, ftype):
iter = tree.append(None)
tree.set_value(iter, 0, path)
tree.set_value(iter, 1, ftype)
tree.set_value(iter, 2, fclass)
def restore_to_default(self, *args):
print("restore to default clicked...")
def invalid_entry_retry(self, *args):
self.closewindow(self.error_check_window)
self.files_popup_window.set_sensitive(True)
self.network_popup_window.set_sensitive(True)
def error_check_files(self, insert_txt):
if len(insert_txt) == 0 or insert_txt[0] != '/':
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("The entry '%s' is not a valid path. Paths must begin with a '/'.")) % insert_txt)
return True
return False
def error_check_network(self, port):
try:
pnum = int(port)
if pnum < 1 or pnum > 65536:
raise ValueError
except ValueError:
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("Port number must be between 1 and 65536")))
return True
return False
def show_more_types(self, *args):
if self.finish_init:
if self.combo_get_active_text(self.files_type_combobox) == _('More...'):
self.files_popup_window.hide()
self.moreTypes_window_files.show()
def update_to_login(self, *args):
self.close_popup()
seuser = self.combo_get_active_text(self.login_seuser_combobox)
mls_range = self.login_mls_entry.get_text()
name = self.login_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.login_liststore.get_value(iter, 0)
oldseuser = self.login_liststore.get_value(iter, 1)
oldrange = self.login_liststore.get_value(iter, 2)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldseuser)
self.liststore.set_value(iter, 2, oldrange)
self.cur_dict["login"][name] = {"action": "-m", "range": mls_range, "seuser": seuser, "oldrange": oldrange, "oldseuser": oldseuser, "oldname": oldname}
else:
iter = self.liststore.append(None)
self.cur_dict["login"][name] = {"action": "-a", "range": mls_range, "seuser": seuser}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, seuser)
self.liststore.set_value(iter, 2, mls_range)
self.new_updates()
def update_to_user(self, *args):
self.close_popup()
roles = self.combo_get_active_text(self.user_roles_combobox)
level = self.user_mls_level_entry.get_text()
mls_range = self.user_mls_entry.get_text()
name = self.user_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.user_liststore.get_value(iter, 0)
oldroles = self.user_liststore.get_value(iter, 1)
oldlevel = self.user_liststore.get_value(iter, 1)
oldrange = self.user_liststore.get_value(iter, 3)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldroles)
self.liststore.set_value(iter, 2, oldlevel)
self.liststore.set_value(iter, 3, oldrange)
self.cur_dict["user"][name] = {"action": "-m", "range": mls_range, "level": level, "role": roles, "oldrange": oldrange, "oldlevel": oldlevel, "oldroles": oldroles, "oldname": oldname}
else:
iter = self.liststore.append(None)
if mls_range or level:
self.cur_dict["user"][name] = {"action": "-a", "range": mls_range, "level": level, "role": roles}
else:
self.cur_dict["user"][name] = {"action": "-a", "role": roles}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, roles)
self.liststore.set_value(iter, 2, level)
self.liststore.set_value(iter, 3, mls_range)
self.new_updates()
def update_to_file_equiv(self, *args):
self.close_popup()
dest = self.file_equiv_dest_entry.get_text()
src = self.file_equiv_source_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
olddest = self.unmarkup(self.liststore.set_value(iter, 0))
oldsrc = self.unmarkup(self.liststore.set_value(iter, 1))
self.cur_dict["fcontext-equiv"][dest] = {"action": "-m", "src": src, "oldsrc": oldsrc, "olddest": olddest}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext-equiv"][dest] = {"action": "-a", "src": src}
self.liststore.set_value(iter, 0, self.markup(dest))
self.liststore.set_value(iter, 1, self.markup(src))
def update_to_files(self, *args):
self.close_popup()
self.files_add = True
# Insert Function will be used in the future
path = self.files_path_entry.get_text()
if self.error_check_files(path):
return
setype = self.combo_get_active_text(self.files_type_combobox)
mls = self.files_mls_entry.get_text()
tclass = self.combo_get_active_text(self.files_class_combobox)
if self.modify:
iter = self.get_selected_iter()
oldpath = self.unmark(self.liststore.get_value(iter, 0))
oldsetype = self.unmark(self.liststore.set_value(iter, 1))
oldtclass = self.liststore.get_value(iter, 2)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-m", "type": setype, "oldtype": oldsetype, "oldpath": oldpath, "oldclass": oldtclass}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-a", "type": setype}
self.liststore.set_value(iter, 0, self.markup(path))
self.liststore.set_value(iter, 1, self.markup(setype))
self.liststore.set_value(iter, 2, self.markup(tclass))
self.files_add = False
self.recursive_path_toggle.set_active(False)
self.new_updates()
def update_to_network(self, *args):
self.network_add = True
ports = self.network_ports_entry.get_text()
if self.error_check_network(ports):
return
if self.network_tcp_button.get_active():
protocol = "tcp"
else:
protocol = "udp"
setype = self.combo_get_active_text(self.network_port_type_combobox)
mls = self.network_mls_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldports = self.unmark(self.liststore.get_value(iter, 0))
oldprotocol = self.unmark(self.liststore.get_value(iter, 1))
oldsetype = self.unmark(self.liststore.set_value(iter, 2))
self.cur_dict["port"][(ports, protocol)] = {"action": "-m", "type": setype, "mls": mls, "oldtype": oldsetype, "oldprotocol": oldprotocol, "oldports": oldports}
else:
iter = self.liststore.append(None)
self.cur_dict["port"][(ports, protocol)] = {"action": "-a", "type": setype, "mls": mls}
self.liststore.set_value(iter, 0, ports)
self.liststore.set_value(iter, 1, protocol)
self.liststore.set_value(iter, 2, setype)
self.network_add = False
self.network_popup_window.hide()
self.window.set_sensitive(True)
self.new_updates()
def delete_button_clicked(self, *args):
operation = "Add"
self.window.set_sensitive(False)
if self.opage == NETWORK_PAGE:
self.network_delete_liststore.clear()
port_dict = self.cust_dict["port"]
for ports, protocol in port_dict:
setype = port_dict[(ports, protocol)]["type"]
iter = self.network_delete_liststore.append()
self.network_delete_liststore.set_value(iter, 1, ports)
self.network_delete_liststore.set_value(iter, 2, protocol)
self.network_delete_liststore.set_value(iter, 3, setype)
self.show_popup(self.network_delete_window)
return
if self.opage == FILES_PAGE:
self.files_delete_liststore.clear()
fcontext_dict = self.cust_dict["fcontext"]
for path, tclass in fcontext_dict:
setype = fcontext_dict[(path, tclass)]["type"]
iter = self.files_delete_liststore.append()
self.files_delete_liststore.set_value(iter, 1, path)
self.files_delete_liststore.set_value(iter, 2, setype)
self.files_delete_liststore.set_value(iter, 3, sepolicy.file_type_str[tclass])
self.show_popup(self.files_delete_window)
return
if self.opage == USER_PAGE:
self.user_delete_liststore.clear()
user_dict = self.cust_dict["user"]
for user in user_dict:
roles = user_dict[user]["role"]
mls = user_dict[user].get("range", "")
level = user_dict[user].get("level", "")
iter = self.user_delete_liststore.append()
self.user_delete_liststore.set_value(iter, 1, user)
self.user_delete_liststore.set_value(iter, 2, roles)
self.user_delete_liststore.set_value(iter, 3, level)
self.user_delete_liststore.set_value(iter, 4, mls)
self.show_popup(self.user_delete_window)
return
if self.opage == LOGIN_PAGE:
self.login_delete_liststore.clear()
login_dict = self.cust_dict["login"]
for login in login_dict:
seuser = login_dict[login]["seuser"]
mls = login_dict[login].get("range", "")
iter = self.login_delete_liststore.append()
self.login_delete_liststore.set_value(iter, 1, seuser)
self.login_delete_liststore.set_value(iter, 2, login)
self.login_delete_liststore.set_value(iter, 3, mls)
self.show_popup(self.login_delete_window)
return
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_delete_liststore.clear()
for items in self.file_equiv_liststore:
if items[2]:
iter = self.file_equiv_delete_liststore.append()
self.file_equiv_delete_liststore.set_value(iter, 1, self.unmarkup(items[0]))
self.file_equiv_delete_liststore.set_value(iter, 2, self.unmarkup(items[1]))
self.show_popup(self.file_equiv_delete_window)
return
def on_save_delete_clicked(self, *args):
self.close_popup()
if self.opage == NETWORK_PAGE:
for delete in self.network_delete_liststore:
if delete[0]:
self.cur_dict["port"][(delete[1], delete[2])] = {"action": "-d", "type": delete[3]}
if self.opage == FILES_PAGE:
for delete in self.files_delete_liststore:
if delete[0]:
self.cur_dict["fcontext"][(delete[1], reverse_file_type_str[delete[3]])] = {"action": "-d", "type": delete[2]}
if self.opage == USER_PAGE:
for delete in self.user_delete_liststore:
if delete[0]:
self.cur_dict["user"][delete[1]] = {"action": "-d", "role": delete[2], "range": delete[4]}
if self.opage == LOGIN_PAGE:
for delete in self.login_delete_liststore:
if delete[0]:
self.cur_dict["login"][delete[2]] = {"action": "-d", "login": delete[2], "seuser": delete[1], "range": delete[3]}
if self.opage == FILE_EQUIV_PAGE:
for delete in self.file_equiv_delete_liststore:
if delete[0]:
self.cur_dict["fcontext-equiv"][delete[1]] = {"action": "-d", "src": delete[2]}
self.new_updates()
def on_save_delete_file_equiv_clicked(self, *args):
for delete in self.files_delete_liststore:
print(delete[0], delete[1], delete[2],)
def on_toggle_update(self, cell, path, model):
model[path][0] = not model[path][0]
def ipage_delete(self, liststore, key):
ctr = 0
for items in liststore:
if items[0] == key[0] and items[2] == key[1]:
iter = liststore.get_iter(ctr)
liststore.remove(iter)
return
ctr += 1
def on_toggle(self, cell, path, model):
if not path:
return
iter = self.boolean_filter.get_iter(path)
iter = self.boolean_filter.convert_iter_to_child_iter(iter)
name = model.get_value(iter, 2)
model.set_value(iter, 0, not model.get_value(iter, 0))
active = model.get_value(iter, 0)
if name in self.cur_dict["boolean"]:
del(self.cur_dict["boolean"][name])
else:
self.cur_dict["boolean"][name] = {"active": active}
self.new_updates()
def get_advanced_filter_data(self, entry, *args):
self.filter_txt = entry.get_text()
self.advanced_search_filter.refilter()
def get_filter_data(self, windows, *args):
#search for desired item
# The txt that the use rinputs into the filter is stored in filter_txt
self.filter_txt = windows.get_text()
self.treefilter.refilter()
def update_gui(self, *args):
self.update = True
self.update_treestore.clear()
for bools in self.cur_dict["boolean"]:
operation = self.cur_dict["boolean"][bools]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6375, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AbsoluteDeleteOption
from ._models_py3 import AdHocBackupRuleOptions
from ._models_py3 import AdhocBackupTriggerOption
from ._models_py3 import AdhocBasedTaggingCriteria
from ._models_py3 import AdhocBasedTriggerContext
from ._models_py3 import AzureBackupDiscreteRecoveryPoint
from ._models_py3 import AzureBackupFindRestorableTimeRangesRequest
from ._models_py3 import AzureBackupFindRestorableTimeRangesRequestResource
from ._models_py3 import AzureBackupFindRestorableTimeRangesResponse
from ._models_py3 import AzureBackupFindRestorableTimeRangesResponseResource
from ._models_py3 import AzureBackupJob
from ._models_py3 import AzureBackupJobResource
from ._models_py3 import AzureBackupJobResourceList
from ._models_py3 import AzureBackupParams
from ._models_py3 import AzureBackupRecoveryPoint
from ._models_py3 import AzureBackupRecoveryPointBasedRestoreRequest
from ._models_py3 import AzureBackupRecoveryPointResource
from ._models_py3 import AzureBackupRecoveryPointResourceList
from ._models_py3 import AzureBackupRecoveryTimeBasedRestoreRequest
from ._models_py3 import AzureBackupRehydrationRequest
from ._models_py3 import AzureBackupRestoreRequest
from ._models_py3 import AzureBackupRestoreWithRehydrationRequest
from ._models_py3 import AzureBackupRule
from ._models_py3 import AzureOperationalStoreParameters
from ._models_py3 import AzureRetentionRule
from ._models_py3 import BackupCriteria
from ._models_py3 import BackupInstance
from ._models_py3 import BackupInstanceResource
from ._models_py3 import BackupInstanceResourceList
from ._models_py3 import BackupParameters
from ._models_py3 import BackupPolicy
from ._models_py3 import BackupSchedule
from ._models_py3 import BackupVault
from ._models_py3 import BackupVaultResource
from ._models_py3 import BackupVaultResourceList
from ._models_py3 import BaseBackupPolicy
from ._models_py3 import BaseBackupPolicyResource
from ._models_py3 import BaseBackupPolicyResourceList
from ._models_py3 import BasePolicyRule
from ._models_py3 import CheckNameAvailabilityRequest
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import ClientDiscoveryDisplay
from ._models_py3 import ClientDiscoveryForLogSpecification
from ._models_py3 import ClientDiscoveryForProperties
from ._models_py3 import ClientDiscoveryForServiceSpecification
from ._models_py3 import ClientDiscoveryResponse
from ._models_py3 import ClientDiscoveryValueForSingleApi
from ._models_py3 import CopyOnExpiryOption
from ._models_py3 import CopyOption
from ._models_py3 import CustomCopyOption
from ._models_py3 import DataStoreInfoBase
from ._models_py3 import DataStoreParameters
from ._models_py3 import Datasource
from ._models_py3 import DatasourceSet
from ._models_py3 import Day
from ._models_py3 import DeleteOption
from ._models_py3 import DppIdentityDetails
from ._models_py3 import DppResource
from ._models_py3 import DppResourceList
from ._models_py3 import DppTrackedResource
from ._models_py3 import DppTrackedResourceList
from ._models_py3 import DppWorkerRequest
from ._models_py3 import Error
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ExportJobsResult
from ._models_py3 import FeatureValidationRequest
from ._models_py3 import FeatureValidationRequestBase
from ._models_py3 import FeatureValidationResponse
from ._models_py3 import FeatureValidationResponseBase
from ._models_py3 import ImmediateCopyOption
from ._models_py3 import InnerError
from ._models_py3 import ItemLevelRestoreCriteria
from ._models_py3 import ItemLevelRestoreTargetInfo
from ._models_py3 import JobExtendedInfo
from ._models_py3 import JobSubTask
from ._models_py3 import OperationExtendedInfo
from ._models_py3 import OperationJobExtendedInfo
from ._models_py3 import OperationResource
from ._models_py3 import PatchResourceRequestInput
from ._models_py3 import PolicyInfo
from ._models_py3 import PolicyParameters
from ._models_py3 import ProtectionStatusDetails
from ._models_py3 import RangeBasedItemLevelRestoreCriteria
from ._models_py3 import RecoveryPointDataStoreDetails
from ._models_py3 import RecoveryPointsFilters
from ._models_py3 import RestorableTimeRange
from ._models_py3 import RestoreFilesTargetInfo
from ._models_py3 import RestoreJobRecoveryPointDetails
from ._models_py3 import RestoreTargetInfo
from ._models_py3 import RestoreTargetInfoBase
from ._models_py3 import RetentionTag
from ._models_py3 import ScheduleBasedBackupCriteria
from ._models_py3 import ScheduleBasedTriggerContext
from ._models_py3 import SourceLifeCycle
from ._models_py3 import StorageSetting
from ._models_py3 import SupportedFeature
from ._models_py3 import SystemData
from ._models_py3 import TaggingCriteria
from ._models_py3 import TargetCopySetting
from ._models_py3 import TargetDetails
from ._models_py3 import TriggerBackupRequest
from ._models_py3 import TriggerContext
from ._models_py3 import UserFacingError
from ._models_py3 import ValidateForBackupRequest
from ._models_py3 import ValidateRestoreRequestObject
except (SyntaxError, ImportError):
from ._models import AbsoluteDeleteOption # type: ignore
from ._models import AdHocBackupRuleOptions # type: ignore
from ._models import AdhocBackupTriggerOption # type: ignore
from ._models import AdhocBasedTaggingCriteria # type: ignore
from ._models import AdhocBasedTriggerContext # type: ignore
from ._models import AzureBackupDiscreteRecoveryPoint # type: ignore
from ._models import AzureBackupFindRestorableTimeRangesRequest # type: ignore
from ._models import AzureBackupFindRestorableTimeRangesRequestResource # type: ignore
from ._models import AzureBackupFindRestorableTimeRangesResponse # type: ignore
from ._models import AzureBackupFindRestorableTimeRangesResponseResource # type: ignore
from ._models import AzureBackupJob # type: ignore
from ._models import AzureBackupJobResource # type: ignore
from ._models import AzureBackupJobResourceList # type: ignore
from ._models import AzureBackupParams # type: ignore
from ._models import AzureBackupRecoveryPoint # type: ignore
from ._models import AzureBackupRecoveryPointBasedRestoreRequest # type: ignore
from ._models import AzureBackupRecoveryPointResource # type: ignore
from ._models import AzureBackupRecoveryPointResourceList # type: ignore
from ._models import AzureBackupRecoveryTimeBasedRestoreRequest # type: ignore
from ._models import AzureBackupRehydrationRequest # type: ignore
from ._models import AzureBackupRestoreRequest # type: ignore
from ._models import AzureBackupRestoreWithRehydrationRequest # type: ignore
from ._models import AzureBackupRule # type: ignore
from ._models import AzureOperationalStoreParameters # type: ignore
from ._models import AzureRetentionRule # type: ignore
from ._models import BackupCriteria # type: ignore
from ._models import BackupInstance # type: ignore
from ._models import BackupInstanceResource # type: ignore
from ._models import BackupInstanceResourceList # type: ignore
from ._models import BackupParameters # type: ignore
from ._models import BackupPolicy # type: ignore
from ._models import BackupSchedule # type: ignore
from ._models import BackupVault # type: ignore
from ._models import BackupVaultResource # type: ignore
from ._models import BackupVaultResourceList # type: ignore
from ._models import BaseBackupPolicy # type: ignore
from ._models import BaseBackupPolicyResource # type: ignore
from ._models import BaseBackupPolicyResourceList # type: ignore
from ._models import BasePolicyRule # type: ignore
from ._models import CheckNameAvailabilityRequest # type: ignore
from ._models import CheckNameAvailabilityResult # type: ignore
from ._models import ClientDiscoveryDisplay # type: ignore
from ._models import ClientDiscoveryForLogSpecification # type: ignore
from ._models import ClientDiscoveryForProperties # type: ignore
from ._models import ClientDiscoveryForServiceSpecification # type: ignore
from ._models import ClientDiscoveryResponse # type: ignore
from ._models import ClientDiscoveryValueForSingleApi # type: ignore
from ._models import CopyOnExpiryOption # type: ignore
from ._models import CopyOption # type: ignore
from ._models import CustomCopyOption # type: ignore
from ._models import DataStoreInfoBase # type: ignore
from ._models import DataStoreParameters # type: ignore
from ._models import Datasource # type: ignore
from ._models import DatasourceSet # type: ignore
from ._models import Day # type: ignore
from ._models import DeleteOption # type: ignore
from ._models import DppIdentityDetails # type: ignore
from ._models import DppResource # type: ignore
from ._models import DppResourceList # type: ignore
from ._models import DppTrackedResource # type: ignore
from ._models import DppTrackedResourceList # type: ignore
from ._models import DppWorkerRequest # type: ignore
from ._models import Error # type: ignore
from ._models import ErrorAdditionalInfo # type: ignore
from ._models import ExportJobsResult # type: ignore
from ._models import FeatureValidationRequest # type: ignore
from ._models import FeatureValidationRequestBase # type: ignore
from ._models import FeatureValidationResponse # type: ignore
from ._models import FeatureValidationResponseBase # type: ignore
from ._models import ImmediateCopyOption # type: ignore
from ._models import InnerError # type: ignore
from ._models import ItemLevelRestoreCriteria # type: ignore
from ._models import ItemLevelRestoreTargetInfo # type: ignore
from ._models import JobExtendedInfo # type: ignore
from ._models import JobSubTask # type: ignore
from ._models import OperationExtendedInfo # type: ignore
from ._models import OperationJobExtendedInfo # type: ignore
from ._models import OperationResource # type: ignore
from ._models import PatchResourceRequestInput # type: ignore
from ._models import PolicyInfo # type: ignore
from ._models import PolicyParameters # type: ignore
from ._models import ProtectionStatusDetails # type: ignore
from ._models import RangeBasedItemLevelRestoreCriteria # type: ignore
from ._models import RecoveryPointDataStoreDetails # type: ignore
from ._models import RecoveryPointsFilters # type: ignore
from ._models import RestorableTimeRange # type: ignore
from ._models import RestoreFilesTargetInfo # type: ignore
from ._models import RestoreJobRecoveryPointDetails # type: ignore
from ._models import RestoreTargetInfo # type: ignore
from ._models import RestoreTargetInfoBase # type: ignore
from ._models import RetentionTag # type: ignore
from ._models import ScheduleBasedBackupCriteria # type: ignore
from ._models import ScheduleBasedTriggerContext # type: ignore
from ._models import SourceLifeCycle # type: ignore
from ._models import StorageSetting # type: ignore
from ._models import SupportedFeature # type: ignore
from ._models import SystemData # type: ignore
from ._models import TaggingCriteria # type: ignore
from ._models import TargetCopySetting # type: ignore
from ._models import TargetDetails # type: ignore
from ._models import TriggerBackupRequest # type: ignore
from ._models import TriggerContext # type: ignore
from ._models import UserFacingError # type: ignore
from ._models import ValidateForBackupRequest # type: ignore
from ._models import ValidateRestoreRequestObject # type: ignore
from ._data_protection_client_enums import (
AbsoluteMarker,
CreatedByType,
CurrentProtectionState,
DataStoreTypes,
DayOfWeek,
FeatureSupportStatus,
FeatureType,
Month,
ProvisioningState,
RecoveryOption,
RehydrationPriority,
RehydrationStatus,
RestoreSourceDataStoreType,
RestoreTargetLocationType,
SourceDataStoreType,
Status,
StorageSettingStoreTypes,
StorageSettingTypes,
WeekNumber,
)
__all__ = [
'AbsoluteDeleteOption',
'AdHocBackupRuleOptions',
'AdhocBackupTriggerOption',
'AdhocBasedTaggingCriteria',
'AdhocBasedTriggerContext',
'AzureBackupDiscreteRecoveryPoint',
'AzureBackupFindRestorableTimeRangesRequest',
'AzureBackupFindRestorableTimeRangesRequestResource',
'AzureBackupFindRestorableTimeRangesResponse',
'AzureBackupFindRestorableTimeRangesResponseResource',
'AzureBackupJob',
'AzureBackupJobResource',
'AzureBackupJobResourceList',
'AzureBackupParams',
'AzureBackupRecoveryPoint',
'AzureBackupRecoveryPointBasedRestoreRequest',
'AzureBackupRecoveryPointResource',
'AzureBackupRecoveryPointResourceList',
'AzureBackupRecoveryTimeBasedRestoreRequest',
'AzureBackupRehydrationRequest',
'AzureBackupRestoreRequest',
'AzureBackupRestoreWithRehydrationRequest',
'AzureBackupRule',
'AzureOperationalStoreParameters',
'AzureRetentionRule',
'BackupCriteria',
'BackupInstance',
'BackupInstanceResource',
'BackupInstanceResourceList',
'BackupParameters',
'BackupPolicy',
'BackupSchedule',
'BackupVault',
'BackupVaultResource',
'BackupVaultResourceList',
'BaseBackupPolicy',
'BaseBackupPolicyResource',
'BaseBackupPolicyResourceList',
'BasePolicyRule',
'CheckNameAvailabilityRequest',
'CheckNameAvailabilityResult',
'ClientDiscoveryDisplay',
'ClientDiscoveryForLogSpecification',
'ClientDiscoveryForProperties',
'ClientDiscoveryForServiceSpecification',
'ClientDiscoveryResponse',
'ClientDiscoveryValueForSingleApi',
'CopyOnExpiryOption',
| |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.192298,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.87659,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0179935,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.216821,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.126694,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.13945,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.224928,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.113536,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.477915,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.140067,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.3032,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0239352,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00584917,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.047833,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0432582,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0717682,
'Execution Unit/Register Files/Runtime Dynamic': 0.0491074,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.10527,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.282031,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.43125,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00100859,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000416708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000621407,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00383566,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00885802,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0415852,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.64518,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.102457,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.141242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.99207,
'Instruction Fetch Unit/Runtime Dynamic': 0.297978,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0355386,
'L2/Runtime Dynamic': 0.0161514,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.44317,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.614337,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.62742,
'Load Store Unit/Runtime Dynamic': 0.845782,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0962131,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.192426,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0341463,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0346774,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.164467,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0168044,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.379234,
'Memory Management Unit/Runtime Dynamic': 0.0514818,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.9269,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0629632,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00705786,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0705958,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
<reponame>yuxiang-zhou/menpo
from warnings import warn
import numpy as np
from menpo.base import Vectorizable, MenpoDeprecationWarning
from menpo.transform.base import (Alignment, ComposableTransform,
VComposable, VInvertible)
class HomogFamilyAlignment(Alignment):
r"""
Simple subclass of Alignment that adds the ability to create a copy of an
alignment class without the alignment behavior.
Note that subclasses should inherit from :map:`HomogFamilyAlignment` first
to have the correct copy behavior.
"""
def as_non_alignment(self):
r"""
Returns a copy of this transform without its alignment nature.
Returns
-------
transform : :map:`Homogeneous` but not :map:`Alignment` subclass
A version of this transform with the same transform behavior but
without the alignment logic.
"""
raise NotImplementedError()
def copy(self):
r"""
Generate an efficient copy of this :map:`HomogFamilyAlignment`.
Returns
-------
new_transform : ``type(self)``
A copy of this object
"""
new = self.__class__.__new__(self.__class__)
# Shallow copy everything except the h_matrix
new.__dict__ = self.__dict__.copy()
new._h_matrix = new._h_matrix.copy()
return new
def pseudoinverse(self):
r"""
The pseudoinverse of the transform - that is, the transform that
results from swapping source and target, or more formally, negating
the transforms parameters. If the transform has a true inverse this
is returned instead.
Returns
-------
transform : ``type(self)``
The inverse of this transform.
"""
selfcopy = self.copy()
selfcopy._h_matrix = self._h_matrix_pseudoinverse()
selfcopy._source, selfcopy._target = selfcopy._target, selfcopy._source
return selfcopy
class Homogeneous(ComposableTransform, Vectorizable, VComposable, VInvertible):
r"""
A simple ``n``-dimensional homogeneous transformation.
Adds a unit homogeneous coordinate to points, performs the dot
product, re-normalizes by division by the homogeneous coordinate,
and returns the result.
Can be composed with another :map:`Homogeneous`, so long as the
dimensionality matches.
Parameters
----------
h_matrix : ``(n_dims + 1, n_dims + 1)`` `ndarray`
The homogeneous matrix defining this transform.
copy : `bool`, optional
If ``False``, avoid copying ``h_matrix``. Useful for performance.
skip_checks : `bool`, optional
If ``True``, avoid sanity checks on the ``h_matrix``. Useful for
performance.
"""
def __init__(self, h_matrix, copy=True, skip_checks=False):
self._h_matrix = None
# Delegate setting to the most specialized setter method possible
self._set_h_matrix(h_matrix, copy=copy, skip_checks=skip_checks)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity matrix Homogeneous transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`Homogeneous`
The identity matrix transform.
"""
return Homogeneous(np.eye(n_dims + 1))
@property
def h_matrix_is_mutable(self):
r"""Deprecated
``True`` iff :meth:`set_h_matrix` is permitted on this type of
transform.
If this returns ``False`` calls to :meth:`set_h_matrix` will raise
a ``NotImplementedError``.
:type: `bool`
"""
warn('the public API for mutable operations is deprecated '
'and will be removed in a future version of Menpo. '
'Create a new transform instead.', MenpoDeprecationWarning)
return False
def from_vector(self, vector):
"""
Build a new instance of the object from its vectorized state.
``self`` is used to fill out the missing state required to rebuild a
full object from it's standardized flattened state. This is the default
implementation, which is a ``deepcopy`` of the object followed by a call
to :meth:`from_vector_inplace()`. This method can be overridden for a
performance benefit if desired.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
Flattened representation of the object.
Returns
-------
transform : :class:`Homogeneous`
An new instance of this class.
"""
# avoid the deepcopy with an efficient copy
self_copy = self.copy()
self_copy._from_vector_inplace(vector)
return self_copy
def __str__(self):
rep = self._transform_str() + '\n'
rep += str(self.h_matrix)
return rep
def _transform_str(self):
r"""
A string representation explaining what this homogeneous transform
does. Has to be implemented by base classes.
Returns
-------
string : `str`
String representation of transform.
"""
return 'Homogeneous'
@property
def h_matrix(self):
r"""
The homogeneous matrix defining this transform.
:type: ``(n_dims + 1, n_dims + 1)`` `ndarray`
"""
return self._h_matrix
def set_h_matrix(self, value, copy=True, skip_checks=False):
r"""Deprecated
Deprecated - do not use this method - you are better off just creating
a new transform!
Updates ``h_matrix``, optionally performing sanity checks.
Note that it won't always be possible to manually specify the
``h_matrix`` through this method, specifically if changing the
``h_matrix`` could change the nature of the transform. See
:attr:`h_matrix_is_mutable` for how you can discover if the
``h_matrix`` is allowed to be set for a given class.
Parameters
----------
value : `ndarray`
The new homogeneous matrix to set.
copy : `bool`, optional
If ``False``, do not copy the h_matrix. Useful for performance.
skip_checks : `bool`, optional
If ``True``, skip checking. Useful for performance.
Raises
------
NotImplementedError
If :attr:`h_matrix_is_mutable` returns ``False``.
"""
warn('the public API for mutable operations is deprecated '
'and will be removed in a future version of Menpo. '
'Create a new transform instead.', MenpoDeprecationWarning)
if self.h_matrix_is_mutable:
self._set_h_matrix(value, copy=copy, skip_checks=skip_checks)
else:
raise NotImplementedError(
"h_matrix cannot be set on {}".format(self._transform_str()))
def _set_h_matrix(self, value, copy=True, skip_checks=False):
r"""
Actually updates the ``h_matrix``, optionally performing sanity checks.
Called by :meth:`set_h_matrix` on classes that have
:attr:`h_matrix_is_mutable` as ``True``.
Every subclass should invoke this method internally when the
``h_matrix`` needs to be set in order to get the most sanity checking
possible.
Parameters
----------
value : `ndarray`
The new homogeneous matrix to set
copy : `bool`, optional
If ``False``, do not copy the h_matrix. Useful for performance.
skip_checks : `bool`, optional
If ``True``, skip checking. Useful for performance.
"""
if copy:
value = value.copy()
self._h_matrix = value
@property
def n_dims(self):
r"""
The dimensionality of the data the transform operates on.
:type: `int`
"""
return self.h_matrix.shape[1] - 1
@property
def n_dims_output(self):
r"""
The output of the data from the transform.
:type: `int`
"""
# doesn't have to be a square homogeneous matrix...
return self.h_matrix.shape[0] - 1
def _apply(self, x, **kwargs):
# convert to homogeneous
h_x = np.hstack([x, np.ones([x.shape[0], 1])])
# apply the transform
h_y = h_x.dot(self.h_matrix.T)
# normalize and return
return (h_y / h_y[:, -1][:, None])[:, :-1]
def _as_vector(self):
return self.h_matrix.ravel()
def _from_vector_inplace(self, vector):
"""
Update the state of this object from a vector form.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
Flattened representation of this object
"""
self._set_h_matrix(vector.reshape(self.h_matrix.shape),
copy=True, skip_checks=True)
@property
def composes_inplace_with(self):
r"""
:class:`Homogeneous` can swallow composition with any other
:class:`Homogeneous`, subclasses will have to override and be more
specific.
"""
return Homogeneous
def compose_after_from_vector_inplace(self, vector):
self.compose_after_inplace(self.from_vector(vector))
@property
def composes_with(self):
r"""
Any Homogeneous can compose with any other Homogeneous.
"""
return Homogeneous
# noinspection PyProtectedMember
def _compose_before(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
t : :class:`Homogeneous`
Transform to be applied **after** self
Returns
-------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_before is just to
# deal with the complexities of maintaining the correct class of
# transform upon composition
if isinstance(t, type(self)):
# He is a subclass of me - I can swallow him.
# What if I'm an Alignment though? Rules of composition state we
# have to produce a non-Alignment result. Nasty, but we check
# here to save a lot of repetition.
if isinstance(self, HomogFamilyAlignment):
new_self = self.as_non_alignment()
else:
new_self = self.copy()
new_self._compose_before_inplace(t)
elif isinstance(self, type(t)):
# I am a subclass of him - he can swallow me
new_self = t._compose_after(self)
elif isinstance(self, Similarity) and isinstance(t, Similarity):
# we're both in the Similarity family
new_self = Similarity(self.h_matrix)
new_self._compose_before_inplace(t)
elif isinstance(self, Affine) and isinstance(t, Affine):
# we're both in the Affine family
new_self = Affine(self.h_matrix)
new_self._compose_before_inplace(t)
else:
# at least one of us is Homogeneous
new_self = Homogeneous(self.h_matrix)
new_self._compose_before_inplace(t)
return new_self
# noinspection PyProtectedMember
def _compose_after(self, t):
r"""
Chains an Homogeneous family transform with another transform of the
same family, producing a new transform that is the composition of
the two.
.. note::
The type of the returned transform is always the first common
ancestor between self and transform.
Any Alignment will be lost.
Parameters
----------
t : :class:`Homogeneous`
Transform to be applied **before** self
Returns
-------
transform : :class:`Homogeneous`
The resulting homogeneous transform.
"""
# note that this overload of the basic _compose_after is just to
# deal with | |
<reponame>johnathanlouie/PyAeonDB
from typing import List, Set, Dict, Tuple
import csv
import os
import json
import time
import datetime
Table = List[str]
Index = Dict[str, List[int]]
Fuzzy = Dict[str, List[str]]
ROOT_PATH = "C:/Arcology/AeonDB"
TABLE_DIR = "C:/Arcology/AeonDB/%s"
TABLE_PATH = "C:/Arcology/AeonDB/%s/table.txt"
INDEX_PATH = "C:/Arcology/AeonDB/%s/index.txt"
FUZZY_PATH = "C:/Arcology/AeonDB/%s/fuzzy.txt"
FUZZY2_PATH = "C:/Arcology/AeonDB/%s/fuzzy2.txt"
g_tables: Dict[str, Table] = dict()
g_indices: Dict[str, Index] = dict()
g_fuzzyDict: Dict[str, Fuzzy] = dict()
g_fuzzyDict2: Dict[str, Fuzzy] = dict()
def readTable(tableName: str) -> Table:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(TABLE_PATH % tableName))
def writeTable(tableName: str, table: Table) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(table, open(TABLE_PATH % tableName, 'w+'))
return None
def readIndex(tableName: str) -> Index:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(INDEX_PATH % tableName))
def writeIndex(tableName: str, index: Index) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(index, open(INDEX_PATH % tableName, 'w+'))
return None
def readFuzzy(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY_PATH % tableName))
def writeFuzzy(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY_PATH % tableName, 'w+'))
return None
def readFuzzy2(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY2_PATH % tableName))
def writeFuzzy2(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY2_PATH % tableName, 'w+'))
return None
def listTables() -> List[str]:
os.makedirs(ROOT_PATH, exist_ok=True)
return os.listdir(ROOT_PATH)
def timestamp() -> str:
return datetime.datetime.fromtimestamp(time.time()).strftime("%m/%d/%Y %H:%M:%S")
g_cmdHelpMap = {
"createtable" : "createTable {tableDesc}",
"getrows" : "getRows {tableName} {key} {count}",
"importtable" : "importTable {tableName} {CSV filespec}",
"listtables" : "listTables",
"indextable" : "indexTable {tableName}",
"find" : "find {tableName} {term1 term2 term3...}",
"fuzzysearch" : "fuzzySearch {tableName} {term1 term2 term3...}",
"quit" : "quit"
}
def printHelp() -> None:
for help in g_cmdHelpMap.values():
print(help)
return
def toBigrams(s: str) -> Set[str]:
ngrams = set()
if len(s) < 2:
ngrams.add(s)
return ngrams
for i in range(len(s) - 1):
ngrams.add(s[i:i+2])
return ngrams
def dicesCoefficient(a: Set[str], b: Set[str]) -> float:
return float(2 * len(a.intersection(b))) / float(len(a) + len(b))
def preprocess(s: str) -> str:
s = s.replace("~", " ")
s = s.replace("`", " ")
s = s.replace("!", " ")
s = s.replace("@", " ")
s = s.replace("#", " ")
s = s.replace("$", " ")
s = s.replace("%", " ")
s = s.replace("^", " ")
s = s.replace("&", " ")
s = s.replace("*", " ")
s = s.replace("(", " ")
s = s.replace(")", " ")
s = s.replace("-", " ")
s = s.replace("_", " ")
s = s.replace("+", " ")
s = s.replace("=", " ")
s = s.replace("{", " ")
s = s.replace("}", " ")
s = s.replace("[", " ")
s = s.replace("]", " ")
s = s.replace("|", " ")
s = s.replace("\\", " ")
s = s.replace(";", " ")
s = s.replace(":", " ")
s = s.replace('"', " ")
s = s.replace("'", " ")
s = s.replace("<", " ")
s = s.replace(">", " ")
s = s.replace(",", " ")
s = s.replace(".", " ")
s = s.replace("/", " ")
s = s.replace("?", " ")
s = s.replace("1", " ")
s = s.replace("2", " ")
s = s.replace("3", " ")
s = s.replace("4", " ")
s = s.replace("5", " ")
s = s.replace("6", " ")
s = s.replace("7", " ")
s = s.replace("8", " ")
s = s.replace("9", " ")
s = s.replace("0", " ")
return s
def createIndex(table: Table) -> Tuple[Index, Fuzzy, Fuzzy]:
startTime = time.time()
index: Index = dict()
fuzzy1: Fuzzy = dict()
fuzzy2: Fuzzy = dict()
fuzzy3: Dict[str, Set[str]] = dict()
for rowId in range(len(table)):
row = table[rowId]
row = preprocess(row).lower()
terms = set(row.split())
if "" in terms:
terms.remove("")
for term in terms:
if term not in index:
index.update({term: list()})
rowIds = index.get(term)
if rowId not in rowIds:
rowIds.append(rowId)
if term not in fuzzy3:
atLeastOneBigram = set()
bigrams = toBigrams(term)
fuzzy3.update({term: bigrams})
for bigram in bigrams:
if bigram not in fuzzy2:
fuzzy2.update({bigram: list()})
bigramList = fuzzy2.get(bigram)
bigramList.append(term)
atLeastOneBigram.update(bigramList)
related = list()
for term2 in atLeastOneBigram:
if term == term2:
related.append(term2)
elif dicesCoefficient(fuzzy3.get(term), fuzzy3.get(term2)) > 0.6:
related.append(term2)
fuzzy1.get(term2).append(term)
fuzzy1.update({term: related})
print("Indexed row %d of %d." % (rowId, len(table)))
print("Indexing Time: " + str(time.time() - startTime))
return index, fuzzy1, fuzzy2
def importCsv(filename: str) -> Table:
table = [" ".join(row) for row in csv.reader(open(filename))]
table.pop(0)
return table
def expandQuery(term: str, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy) -> Set[int]:
rowIds = set()
relateds = set()
if term not in fuzzy:
possiblyRelateds = set()
bigrams = toBigrams(term)
for bigram in bigrams:
if bigram in fuzzy2:
possiblyRelateds.update(fuzzy2.get(bigram))
for pRelated in possiblyRelateds:
if dicesCoefficient(toBigrams(pRelated), bigrams) > 0.6:
relateds.add(pRelated)
else:
relateds = fuzzy.get(term)
for related in relateds:
rowIds.update(index.get(related))
return rowIds
def find(keyTerms: Set[str], table: Table, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy, isFuzzy: bool) -> Table:
lowKeyTerms = {term.lower() for term in keyTerms}
rowIds = set()
results = list()
first = lowKeyTerms.pop()
if isFuzzy:
rowIds.update(expandQuery(first, index, fuzzy, fuzzy2))
elif first in index:
rowIds.update(index.get(first))
else:
return results
for word in lowKeyTerms:
if isFuzzy:
rowIds.intersection_update(expandQuery(word, index, fuzzy, fuzzy2))
elif word in index:
rowIds.intersection_update(index.get(word))
else:
return results
for i in rowIds:
results.append(table[i])
return results
def loadAllTables() -> None:
tableNames = listTables()
for tableName in tableNames:
print("%s Log.info: Table %s: Backup volume offline. Waiting for new volume." % (timestamp(), tableName))
try:
table = readTable(tableName)
g_tables.update({tableName: table})
print("%s Log.info: Table %s: Recovered %d rows." % (timestamp(), tableName, len(table)))
except OSError:
print("%s Log.info: Table %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Table %s: File is corrupted." % (timestamp(), tableName))
try:
index = readIndex(tableName)
g_indices.update({tableName: index})
print("%s Log.info: Index %s: Recovered %d terms." % (timestamp(), tableName, len(index)))
except OSError:
print("%s Log.info: Index %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Index %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy = readFuzzy(tableName)
g_fuzzyDict.update({tableName: fuzzy})
print("%s Log.info: Fuzzy %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy)))
except OSError:
print("%s Log.info: Fuzzy %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy2 = readFuzzy2(tableName)
g_fuzzyDict2.update({tableName: fuzzy2})
print("%s Log.info: Fuzzy2 %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy2)))
except OSError:
print("%s Log.info: Fuzzy2 %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy2 %s: File is corrupted." % (timestamp(), tableName))
print("AeonDB ready. %d tables available." % len(tableNames))
return None
def prompt() -> List[str]:
args = input(" : ").split()
args[0] = args[0].lower()
return args
def main() -> None:
print("%s AeonDB 1.0 beta 65" % timestamp())
print(u"%s Copyright © 2011-2018 by Kronosaur Productions LLC. All Rights Reserved." % timestamp())
loadAllTables()
args = prompt()
while args[0] != "quit":
# createtable
if args[0] == "createtable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# getrows
elif args[0] == "getrows":
if len(args) < 4:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# importtable
elif args[0] == "importtable":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
csvName = args[2]
csvName = csvName.replace('"', "")
csvName = csvName.replace("'", "")
csvName = csvName.replace("/", "\\")
try:
tableObj = importCsv(csvName)
print("Imported %d rows to table %s." % (len(tableObj), args[1]))
g_tables.update({args[1] : tableObj})
print("Saving table %s to file." % args[1])
writeTable(args[1], tableObj)
except:
print("Failed to import table. Check URI.")
# listtables
elif args[0] == "listtables":
if len(args) < 1:
print(g_cmdHelpMap.get(args[0]))
else:
for x in listTables():
print(x)
# indextable
elif args[0] == "indextable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] in g_tables:
tableIndex, tableFuzzy1, tableFuzzy2 = createIndex(g_tables.get(args[1]))
g_indices.update({args[1] : tableIndex})
g_fuzzyDict.update({args[1] : tableFuzzy1})
g_fuzzyDict2.update({args[1] : tableFuzzy2})
try:
print("Saving index %s." % args[1])
writeIndex(args[1], tableIndex)
print("Saving fuzzy %s." % args[1])
writeFuzzy(args[1], tableFuzzy1)
print("Saving fuzzy2 %s." % args[1])
writeFuzzy2(args[1], tableFuzzy2)
except:
print("Failed to write index to file.")
else:
print("Table %s does not exist." % args[1])
# find
elif args[0] == "find":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), False)
for row in results:
print(row)
print("Found %d rows." % len(results))
# fuzzysearch
elif args[0] == "fuzzysearch":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif | |
- m.b756 <= 0)
m.c805 = Constraint(expr= - m.b276 + m.b277 - m.b757 <= 0)
m.c806 = Constraint(expr= - m.b277 + m.b278 - m.b758 <= 0)
m.c807 = Constraint(expr= - m.b278 + m.b279 - m.b759 <= 0)
m.c808 = Constraint(expr= - m.b279 + m.b280 - m.b760 <= 0)
m.c809 = Constraint(expr= - m.b280 + m.b281 - m.b761 <= 0)
m.c810 = Constraint(expr= - m.b281 + m.b282 - m.b762 <= 0)
m.c811 = Constraint(expr= - m.b282 + m.b283 - m.b763 <= 0)
m.c812 = Constraint(expr= - m.b283 + m.b284 - m.b764 <= 0)
m.c813 = Constraint(expr= - m.b284 + m.b285 - m.b765 <= 0)
m.c814 = Constraint(expr= - m.b285 + m.b286 - m.b766 <= 0)
m.c815 = Constraint(expr= - m.b286 + m.b287 - m.b767 <= 0)
m.c816 = Constraint(expr= - m.b287 + m.b288 - m.b768 <= 0)
m.c817 = Constraint(expr= - m.b288 + m.b289 - m.b769 <= 0)
m.c818 = Constraint(expr= m.b290 - m.b770 <= 0)
m.c819 = Constraint(expr= - m.b290 + m.b291 - m.b771 <= 0)
m.c820 = Constraint(expr= - m.b291 + m.b292 - m.b772 <= 0)
m.c821 = Constraint(expr= - m.b292 + m.b293 - m.b773 <= 0)
m.c822 = Constraint(expr= - m.b293 + m.b294 - m.b774 <= 0)
m.c823 = Constraint(expr= - m.b294 + m.b295 - m.b775 <= 0)
m.c824 = Constraint(expr= - m.b295 + m.b296 - m.b776 <= 0)
m.c825 = Constraint(expr= - m.b296 + m.b297 - m.b777 <= 0)
m.c826 = Constraint(expr= - m.b297 + m.b298 - m.b778 <= 0)
m.c827 = Constraint(expr= - m.b298 + m.b299 - m.b779 <= 0)
m.c828 = Constraint(expr= - m.b299 + m.b300 - m.b780 <= 0)
m.c829 = Constraint(expr= - m.b300 + m.b301 - m.b781 <= 0)
m.c830 = Constraint(expr= - m.b301 + m.b302 - m.b782 <= 0)
m.c831 = Constraint(expr= - m.b302 + m.b303 - m.b783 <= 0)
m.c832 = Constraint(expr= - m.b303 + m.b304 - m.b784 <= 0)
m.c833 = Constraint(expr= - m.b304 + m.b305 - m.b785 <= 0)
m.c834 = Constraint(expr= - m.b305 + m.b306 - m.b786 <= 0)
m.c835 = Constraint(expr= - m.b306 + m.b307 - m.b787 <= 0)
m.c836 = Constraint(expr= - m.b307 + m.b308 - m.b788 <= 0)
m.c837 = Constraint(expr= - m.b308 + m.b309 - m.b789 <= 0)
m.c838 = Constraint(expr= - m.b309 + m.b310 - m.b790 <= 0)
m.c839 = Constraint(expr= - m.b310 + m.b311 - m.b791 <= 0)
m.c840 = Constraint(expr= - m.b311 + m.b312 - m.b792 <= 0)
m.c841 = Constraint(expr= - m.b312 + m.b313 - m.b793 <= 0)
m.c842 = Constraint(expr= m.b314 - m.b794 <= 0)
m.c843 = Constraint(expr= - m.b314 + m.b315 - m.b795 <= 0)
m.c844 = Constraint(expr= - m.b315 + m.b316 - m.b796 <= 0)
m.c845 = Constraint(expr= - m.b316 + m.b317 - m.b797 <= 0)
m.c846 = Constraint(expr= - m.b317 + m.b318 - m.b798 <= 0)
m.c847 = Constraint(expr= - m.b318 + m.b319 - m.b799 <= 0)
m.c848 = Constraint(expr= - m.b319 + m.b320 - m.b800 <= 0)
m.c849 = Constraint(expr= - m.b320 + m.b321 - m.b801 <= 0)
m.c850 = Constraint(expr= - m.b321 + m.b322 - m.b802 <= 0)
m.c851 = Constraint(expr= - m.b322 + m.b323 - m.b803 <= 0)
m.c852 = Constraint(expr= - m.b323 + m.b324 - m.b804 <= 0)
m.c853 = Constraint(expr= - m.b324 + m.b325 - m.b805 <= 0)
m.c854 = Constraint(expr= - m.b325 + m.b326 - m.b806 <= 0)
m.c855 = Constraint(expr= - m.b326 + m.b327 - m.b807 <= 0)
m.c856 = Constraint(expr= - m.b327 + m.b328 - m.b808 <= 0)
m.c857 = Constraint(expr= - m.b328 + m.b329 - m.b809 <= 0)
m.c858 = Constraint(expr= - m.b329 + m.b330 - m.b810 <= 0)
m.c859 = Constraint(expr= - m.b330 + m.b331 - m.b811 <= 0)
m.c860 = Constraint(expr= - m.b331 + m.b332 - m.b812 <= 0)
m.c861 = Constraint(expr= - m.b332 + m.b333 - m.b813 <= 0)
m.c862 = Constraint(expr= - m.b333 + m.b334 - m.b814 <= 0)
m.c863 = Constraint(expr= - m.b334 + m.b335 - m.b815 <= 0)
m.c864 = Constraint(expr= - m.b335 + m.b336 - m.b816 <= 0)
m.c865 = Constraint(expr= - m.b336 + m.b337 - m.b817 <= 0)
m.c866 = Constraint(expr= m.b338 - m.b818 <= 0)
m.c867 = Constraint(expr= - m.b338 + m.b339 - m.b819 <= 0)
m.c868 = Constraint(expr= - m.b339 + m.b340 - m.b820 <= 0)
m.c869 = Constraint(expr= - m.b340 + m.b341 - m.b821 <= 0)
m.c870 = Constraint(expr= - m.b341 + m.b342 - m.b822 <= 0)
m.c871 = Constraint(expr= - m.b342 + m.b343 - m.b823 <= 0)
m.c872 = Constraint(expr= - m.b343 + m.b344 - m.b824 <= 0)
m.c873 = Constraint(expr= - m.b344 + m.b345 - m.b825 <= 0)
m.c874 = Constraint(expr= - m.b345 + m.b346 - m.b826 <= 0)
m.c875 = Constraint(expr= - m.b346 + m.b347 - m.b827 <= 0)
m.c876 = Constraint(expr= - m.b347 + m.b348 - m.b828 <= 0)
m.c877 = Constraint(expr= - m.b348 + m.b349 - m.b829 <= 0)
m.c878 = Constraint(expr= - m.b349 + m.b350 - m.b830 <= 0)
m.c879 = Constraint(expr= - m.b350 + m.b351 - m.b831 <= 0)
m.c880 = Constraint(expr= - m.b351 + m.b352 - m.b832 <= 0)
m.c881 = Constraint(expr= - m.b352 + m.b353 - m.b833 <= 0)
m.c882 = Constraint(expr= - m.b353 + m.b354 - m.b834 <= 0)
m.c883 = Constraint(expr= - m.b354 + m.b355 - m.b835 <= 0)
m.c884 = Constraint(expr= - m.b355 + m.b356 - m.b836 <= 0)
m.c885 = Constraint(expr= - m.b356 + m.b357 - m.b837 <= 0)
m.c886 = Constraint(expr= - m.b357 + m.b358 - m.b838 <= 0)
m.c887 = Constraint(expr= - m.b358 + m.b359 - m.b839 <= 0)
m.c888 = Constraint(expr= - m.b359 + m.b360 - m.b840 <= 0)
m.c889 = Constraint(expr= - m.b360 + m.b361 - m.b841 <= 0)
m.c890 = Constraint(expr= m.b362 - m.b842 <= 0)
m.c891 = Constraint(expr= - m.b362 + m.b363 - m.b843 <= 0)
m.c892 = Constraint(expr= - m.b363 + m.b364 - m.b844 <= 0)
m.c893 = Constraint(expr= - m.b364 + m.b365 - m.b845 <= 0)
m.c894 = Constraint(expr= - m.b365 + m.b366 - m.b846 <= 0)
m.c895 = Constraint(expr= - m.b366 + m.b367 - m.b847 <= 0)
m.c896 = Constraint(expr= - m.b367 + m.b368 - m.b848 <= 0)
m.c897 = Constraint(expr= - m.b368 + m.b369 - m.b849 <= 0)
m.c898 = Constraint(expr= - m.b369 + m.b370 - m.b850 <= 0)
m.c899 = Constraint(expr= - m.b370 + m.b371 - m.b851 <= 0)
m.c900 = Constraint(expr= - m.b371 + m.b372 - m.b852 <= 0)
m.c901 = Constraint(expr= - m.b372 + m.b373 - m.b853 <= 0)
m.c902 = Constraint(expr= - m.b373 + m.b374 - m.b854 <= 0)
m.c903 = Constraint(expr= - m.b374 + m.b375 - m.b855 <= 0)
m.c904 = Constraint(expr= - m.b375 + m.b376 - m.b856 <= 0)
m.c905 = Constraint(expr= - m.b376 + m.b377 - m.b857 <= 0)
m.c906 = Constraint(expr= - m.b377 + m.b378 - m.b858 <= 0)
m.c907 = Constraint(expr= - m.b378 + m.b379 - m.b859 <= 0)
m.c908 = Constraint(expr= - m.b379 + m.b380 - m.b860 <= 0)
m.c909 = Constraint(expr= - m.b380 + m.b381 - m.b861 <= 0)
m.c910 = Constraint(expr= - m.b381 + m.b382 - m.b862 <= 0)
m.c911 = Constraint(expr= - m.b382 + m.b383 - m.b863 <= 0)
m.c912 = Constraint(expr= - m.b383 + m.b384 - m.b864 <= 0)
m.c913 = Constraint(expr= - m.b384 + m.b385 - m.b865 <= 0)
m.c914 = Constraint(expr= m.b386 - m.b866 <= 0)
m.c915 = Constraint(expr= - m.b386 + m.b387 - m.b867 <= 0)
m.c916 = Constraint(expr= - m.b387 + m.b388 - m.b868 <= 0)
m.c917 = Constraint(expr= - m.b388 + m.b389 - m.b869 <= 0)
m.c918 = Constraint(expr= - m.b389 + m.b390 - m.b870 <= 0)
m.c919 = Constraint(expr= - m.b390 + m.b391 - m.b871 <= 0)
m.c920 = Constraint(expr= - m.b391 + m.b392 - m.b872 <= 0)
m.c921 = Constraint(expr= - m.b392 + m.b393 - m.b873 <= 0)
m.c922 = Constraint(expr= - m.b393 + m.b394 - m.b874 <= 0)
m.c923 = Constraint(expr= - m.b394 + m.b395 - m.b875 <= 0)
m.c924 = Constraint(expr= - m.b395 + m.b396 - m.b876 <= 0)
m.c925 = Constraint(expr= - m.b396 + m.b397 - m.b877 <= 0)
m.c926 = Constraint(expr= - m.b397 + m.b398 - m.b878 <= 0)
m.c927 = Constraint(expr= - m.b398 + m.b399 - m.b879 <= 0)
m.c928 = Constraint(expr= - m.b399 + m.b400 - m.b880 <= 0)
m.c929 = Constraint(expr= - m.b400 + m.b401 - m.b881 <= 0)
m.c930 = Constraint(expr= - m.b401 + m.b402 - m.b882 <= 0)
m.c931 = Constraint(expr= - m.b402 + m.b403 - m.b883 <= 0)
m.c932 = Constraint(expr= - m.b403 + m.b404 - m.b884 <= 0)
m.c933 = Constraint(expr= - m.b404 + m.b405 - m.b885 <= 0)
m.c934 = | |
"""Plot Offline RL."""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns # Daniel: not sure if needed
# https://stackoverflow.com/questions/43080259/
# no-outlines-on-bins-of-matplotlib-histograms-or-seaborn-distplots/43080772
plt.rcParams["patch.force_edgecolor"] = True
import pandas as pd
from copy import deepcopy
import os
import os.path as osp
import itertools
import numpy as np
import sys
import gym
import json
import time
import pickle
from spinup.user_config import DEFAULT_DATA_DIR as DDD
# Matplotlib stuff
titlesize = 32
xsize = 30
ysize = 30
ticksize = 28
legendsize = 23
er_alpha = 0.25
lw = 3
COLORS = ['red', 'blue', 'yellow', 'cyan', 'purple', 'black', 'brown', 'pink',
'silver', 'green', 'darkblue', 'orange']
COLORS_LINE = list(COLORS)
# Env Names
ENV_NAMES = ['ant', 'halfcheetah', 'hopper', 'walker2d']
def parse_to_get_data_type(pt_file):
"""Parse the .txt file to get the data type."""
base = os.path.basename(pt_file)
parsed = base.replace('-dtype-train.txt','')
parsed = parsed.split('-')
for idx,item in enumerate(parsed):
if item == 'noise':
return parsed[idx+1] # The NEXT item has noise type.
print(f'Something went wrong: {pt_file}')
sys.exit()
def smooth(data, window=1):
"""Try to smooth in a similar way as spinup's normal plotting code."""
if window > 1:
y = np.ones(window)
x = np.asarray(data)
z = np.ones(len(x))
smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same')
return smoothed_x
else:
return data
def sanity_checks(config, progress, teacher_path):
"""Some sanity checks on config.json and progress.txt, etc.
This will act as an extra layer of protection in case we have scp errors.
Remember that `config` was saved as we ran Offline RL. And that we have to
consider different top-level directories. But the first instance of 'data'
if it is not actually /data? If this is causing problems, just ignore. TBH,
better to have these checks in the original offline RL running.
"""
assert config['buffer_path'][0] == '/', config['buffer_path']
assert teacher_path[0] == '/', teacher_path
# Remove everything up to the first instance of 'data'.
def remove_leading_paths(path):
path_split = path.split('/') # ['', 'data', ...]
for i in range(2, len(path_split)):
if path_split[i] == 'data':
return '/'.join(path_split[i:])
print(f'Something wrong happened: {path}')
sys.exit()
# The buffer path should be within the teacher path.
buffer_path = remove_leading_paths(config['buffer_path'])
teacher_path = remove_leading_paths(teacher_path)
assert teacher_path in buffer_path, f'{teacher_path} not in {buffer_path}'
def plot(args, teacher_seed_dir, student_dirs):
"""We fix the teacher, and plot performance of it, and its students.
To cycle through images, look at all teacher directories, or anything that
does NOT have 'offline' in its name -- those are students. I'm including sanity
checks on the student directories to catch any potential copy / paste errors.
For teachers, we first consider a directory which stores teacher informations, with
subdirs corresponding to seeds. Example: '/data/spinup/data/halfcheetah_td3_act0-1'
means HalfCheetah TD3 teacher, trained with act_noise=0.1. we then consider a seed
inside, e.g., 'halfcheetah_td3_act0-1_s10' (for seed 10) and that's `teacher_seed_dir`.
There may be multiple ways we generated data buffers for that teacher, so plot
student performance together to compare.
Args:
teacher_seed_dir: (str) see description above.
student_dirs: (list) student directories that potentially should be in the plot.
These are of the form: <student_exp>/<teacher_base_with_seed>_<seed>/
This list contains all with env name matching that of the teacher, then
we filter to check (a) which teacher we used, then (b) which of these
students have data buffers from (a).
"""
window = args.window
nrows, ncols = 1, 2
fig, ax = plt.subplots(nrows, ncols, sharey=True, squeeze=False, figsize=(11*ncols, 8*nrows))
# Derive original Online DeepRL teacher results from 'progress.txt'.
prog_file = osp.join(teacher_seed_dir, 'progress.txt')
assert os.path.exists(prog_file), prog_file
teacher_data = pd.read_table(prog_file)
teacher_base = os.path.basename(teacher_seed_dir)
# Teacher performance. Test performance matches spinup's plots, so that's good.
ret_train = smooth(teacher_data['AverageEpRet'], window)
ret_test = smooth(teacher_data['AverageTestEpRet'], window)
label_train = f'{teacher_base} (Train)'
label_test = f'{teacher_base} (Test)'
ax[0,0].plot(ret_train, lw=lw, color=COLORS[0], label=label_train)
ax[0,0].plot(ret_test, lw=lw, color=COLORS[1], label=label_test)
# Next: consider other buffers we ran, these are from the last snapshot, and where
# we roll out the policies. Ignore any 'valid' files. These .txt files have only
# one row of statistics, so use `[...].iloc[0]` to get the row, then index by key.
# NOTE: on Jan 23, I switched to saving buffers in the same 'buffer' directory.
teacher_buf_dir1 = osp.join(teacher_seed_dir, 'buffer')
teacher_buf_dir2 = osp.join(teacher_seed_dir, 'rollout_buffer_txts')
if os.path.exists(teacher_buf_dir1):
teacher_buffers1 = sorted([osp.join(teacher_buf_dir1, x)
for x in os.listdir(teacher_buf_dir1) if 'valid' not in x and '.txt' in x])
else:
teacher_buffers1 = []
if os.path.exists(teacher_buf_dir2):
teacher_buffers2 = sorted([osp.join(teacher_buf_dir2, x)
for x in os.listdir(teacher_buf_dir2) if 'valid' not in x and '.txt' in x])
else:
teacher_buffers2 = []
teacher_buffers = sorted(teacher_buffers1 + teacher_buffers2)
if len(teacher_buffers) > 0:
# Filter teacher buffers so we only get most relevant one to the left subplot.
if len(args.list_distrs) > 0:
new_bufs_T = []
for d in teacher_buffers:
is_in_distr = False
for distr in args.list_distrs:
if distr in d:
is_in_distr = True
break
if is_in_distr:
new_bufs_T.append(d)
teacher_buffers = new_bufs_T # override
print('Buffers:')
# Horizontal dashed line for datasets. Will need to parse the noise description.
# This is the DATA-GENERATING POLICY performance and good to sanity check.
for tidx,tb in enumerate(teacher_buffers):
print(f'\t{tb}')
data_type = parse_to_get_data_type(tb)
tb_data = pd.read_table(tb)
buf_ret = tb_data.iloc[0]['AverageEpRet']
label = f'{data_type} [{buf_ret:0.0f}]'
if tidx < len(COLORS_LINE):
ax[0,0].axhline(buf_ret, ls='dashdot', lw=lw, color=COLORS_LINE[tidx], label=label)
else:
print(f'Skipping teacher {tb} due to too many buffers')
# NOW deal with students for this particular teacher, or any of its buffers [?].
# Student should be: <student_exp>/<teacher_base_with_seed>_<seed>/
# We MUST have `teacher_base` in the directory after <student_exp>.
sidx = 0
for sd in student_dirs:
student_subdirs = sorted([osp.join(sd,x) for x in os.listdir(sd) if teacher_base in x])
if len(student_subdirs) == 0:
continue
# Now `student_subdirs`, for THIS PARTICULAR, student, should only vary w/random seed.
# Combine all student runs together with random seeds. Note: smoothing applied BEFORE
# we append to `student_stats`, and before we take the mean / std for the plot.
print(f'\nStudent: {sd} has seeds:')
student_stats = []
print(student_subdirs)
for s_sub_dir in student_subdirs:
print(f'\t{s_sub_dir}')
prog_file = osp.join(s_sub_dir, 'progress.txt')
config_file = osp.join(s_sub_dir, 'config.json')
assert os.path.exists(prog_file), prog_file
assert os.path.exists(config_file), config_file
with open(config_file, 'rb') as fh:
config_data = json.load(fh)
student_data = pd.read_table(prog_file)
sanity_checks(config=config_data,
progress=student_data,
teacher_path=teacher_seed_dir)
student_result = smooth(student_data['AverageTestEpRet'], window)
student_stats.append(student_result)
# break
# Extract label which is the <student_exp> not the <teacher_base_with_seed> portion.
# However, we probably don't need the env name in `tail` as that's in the title.
head, tail = os.path.split(sd)
for name in ENV_NAMES:
if name in tail:
tail = tail.replace(f'{name}_', '')
s_label = f'{tail}\n(x{len(student_stats)})' # newline due to space
# Shape is (num_seeds, num_recordings=250), usually 250 due to (1M steps = 250 epochs).
student_stats = np.array(student_stats)
student_ret = np.mean(student_stats, axis=0)
student_std = np.std(student_stats, axis=0)
nb_seeds = student_stats.shape[0]
s_label += f' [{student_ret[-1]:0.1f}]'
s_label = s_label.replace('offline_', 'off_')
# Actually plot (with error regions if applicable). Standard error of mean.
x_vals = np.arange(len(student_ret))
ax[0,1].plot(x_vals, student_ret, lw=lw, color=COLORS[sidx], label=s_label)
if len(student_stats) > 1:
ax[0,1].fill_between(x_vals,
student_ret - (student_std / np.sqrt(nb_seeds)),
student_ret + (student_std / np.sqrt(nb_seeds)),
color=COLORS[sidx],
alpha=0.5)
sidx += 1
ax[0,0].set_title(f'Teacher {args.name}', size=titlesize)
ax[0,1].set_title(f'Students {args.name}', size=titlesize)
ax[0,0].set_xlabel('Train Epochs', size=xsize)
ax[0,1].set_xlabel('Train Epochs', size=xsize)
ax[0,0].set_ylabel('TestEpReturn', size=ysize)
ax[0,1].set_ylabel('TestEpReturn', size=ysize)
for r in range(nrows):
for c in range(ncols):
leg = ax[r,c].legend(loc="best", ncol=1, prop={'size':legendsize})
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
ax[r,c].tick_params(axis='x', labelsize=ticksize)
ax[r,c].tick_params(axis='y', labelsize=ticksize)
plt.tight_layout()
fig_suffix = 'plot_offline_BCQ_rl.png'
if args.add_naive:
fig_suffix = fig_suffix.replace('.png', '_naive.png')
if args.add_np:
fig_suffix = fig_suffix.replace('.png', '_np.png')
if len(args.list_distrs) > 0:
fig_suffix = fig_suffix.replace('.png', f'_{args.list_distrs}.png')
figname = osp.join(teacher_seed_dir, fig_suffix)
plt.savefig(figname)
print(f'\nSAVED FIGURE: {figname}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str)
parser.add_argument('--window', type=int, default=3)
parser.add_argument('--add_naive', action='store_true', help='Plot runs w/naive BCQ')
parser.add_argument('--add_np', action='store_true', help='Plot runs w/noise predictor')
# 0 or more values expected => creates a list to help filter by distribution.
parser.add_argument('--list_distrs', nargs="*", type=str, default=[])
args = parser.parse_args()
assert args.name in ENV_NAMES, f'Error, please use a name in: {ENV_NAMES}'
# Scan for relevant directories in the DEFAULT_DATA_DIR w/the env_name.
if not os.path.exists(DDD):
print(f'{DDD} does not exist! Please check.')
directories = [osp.join(DDD,x) for x in os.listdir(DDD) if args.name in x]
# ------------------------------ FILTER TEACHER/STUDENT DIRECTORIES ------------------------------ #
# Key assumption: 'offline' is in the directory name for Offline RL!
# Key assumption: each directory within dirs_T is for a random seed.
dirs_T = sorted([d for d in directories if 'offline_' not in d])
dirs_S = sorted([d for d in directories if 'offline_' in d])
# Further filter the student directory. It's a bit clumsy.
new_dirs_S = []
for d | |
#!/usr/bin/env python
from io import StringIO
import unittest
import numpy as np
from weblogo import data
from weblogo.matrix import AlphabeticArray, Motif, SubMatrix
from weblogo.seq import protein_alphabet, Alphabet, unambiguous_protein_alphabet
from . import data_stream
class test_AlphabeticArray(unittest.TestCase):
def test_create(self):
matrix = AlphabeticArray((protein_alphabet, protein_alphabet))
matrix['A', 'C'] = 10
assert matrix[0, 1] == 10.0
class test_Motif(unittest.TestCase):
def test_read_transfac_alphabet_superset(self):
with data_stream("transfac_matrix.txt") as f:
Motif.read_transfac(f, alphabet='TCGA')
# Supplied alphabet can be superset of defacto alphabet.
# Reverts to defacto alphabet
with data_stream("transfac_matrix.txt") as f:
Motif.read_transfac(f, alphabet='TCGAXYZ')
def test_read_transfac(self):
f = data_stream("transfac_matrix.txt")
m = Motif.read_transfac(f)
f.close()
assert m[3, 'A'] == 0.0
assert m[0, 'G'] == 2.0
assert np.shape(m.array) == (12, 4)
f.close()
f = data_stream("transfac_matrix2.txt")
m = Motif.read_transfac(f)
f.close()
assert m[3, 'A'] == 3.0
assert m[0, 'G'] == 152.0
assert np.shape(m.array) == (15, 4)
# this one has extra Ps on start of each line
f = data_stream("transfac_matrix3.txt")
m = Motif.read_transfac(f)
f.close()
def test_reindex(self):
f = data_stream("transfac_matrix.txt")
m = Motif.read_transfac(f)
f.close()
m2 = m.reindex("TCGA")
assert (str(m2.alphabet) == "TCGA")
for k in range(0, 12):
for i, a in enumerate("AGCT"):
assert m[k, a] == m2[k, a]
def test_reverse(self):
f = data_stream("transfac_matrix.txt")
m = Motif.read_transfac(f)
f2 = data_stream("transfac_matrix.txt")
m2 = Motif.read_transfac(f2)
m2.reverse()
(K, N) = np.shape(m2)
for k in range(0, K):
for n in range(0, N):
assert (m[k, n] == m2[K - k - 1, n])
f.close()
f2.close()
def test_complement(self):
f = data_stream("transfac_matrix.txt")
m = Motif.read_transfac(f)
f2 = data_stream("transfac_matrix.txt")
m2 = Motif.read_transfac(f2)
m2.complement()
(K, N) = np.shape(m2)
for k in range(0, K):
assert (m[k, 'A'] == m2[k, 'T'])
assert (m[k, 'G'] == m2[k, 'C'])
assert (m[k, 'C'] == m2[k, 'G'])
assert (m[k, 'T'] == m2[k, 'A'])
f.close()
f2.close()
def test_reverse_complement(self):
f = data_stream("transfac_matrix.txt")
m = Motif.read_transfac(f)
f2 = data_stream("transfac_matrix.txt")
m2 = Motif.read_transfac(f2)
m.complement()
m.reverse()
m2.reverse_complement()
assert (m.array == m2.array).all()
f.close()
f2.close()
class test_SubMatrix(unittest.TestCase):
def test_create(self):
ab = 'ABCD'
ar = np.asarray([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
s = SubMatrix(ab, ar)
assert s[0, 0] == 1
assert s['A', 'A'] == 1
assert s['B', 'C'] == 7
s['B', 'C'] = -1
assert s['B', 'C'] == -1
def test_get(self):
ab = Alphabet('ABCD')
ar = np.asarray([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
s = SubMatrix(ab, ar)
s1 = 'DCCBBBAAA'
s2 = 'BA'
v = s.index((s1, s2))
# print v
for m, i in enumerate(s1):
for n, j in enumerate(s2):
assert s[i, j] == v[m, n]
def test_get_subMatrix(self):
ab = Alphabet('ABCD')
ar = np.asarray([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
mat = SubMatrix(ab, ar)
mat2 = mat.reindex('ABC')
assert np.all(mat2.array == np.asarray([[1, 2, 3], [5, 6, 7], [9, 10, 11]]))
mat2 = mat.reindex('BA')
assert np.all(mat2.array == np.asarray([[6, 5], [2, 1]]))
mat2 = mat.reindex(Alphabet('BA'))
assert np.all(mat2.array == np.asarray([[6, 5], [2, 1]]))
def test_fail_get(self):
ab = Alphabet('ABCD')
ar = np.asarray([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
s = SubMatrix(ab, ar)
self.assertRaises(IndexError, s.__getitem__, ('E', 'A'))
self.assertRaises(IndexError, s.__getitem__, ('5', '6'))
# FIXME
self.assertRaises(IndexError, s.index, ('E', 'A'))
def test_repr(self):
ab = Alphabet('ABCD')
ar = np.asarray([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
s = SubMatrix(ab, ar)
repr(s)
# print string
def test_read(self):
f = StringIO(test_matrix1)
mat = SubMatrix.read(f)
assert mat['a', 'a'] == 4
def test_read_asymmetric_fail(self):
f = StringIO(test_matrix4)
self.assertRaises(ValueError, SubMatrix.read, f)
def test_read_alphabets(self):
# incompatable alphabets
f = StringIO(test_matrix3)
self.assertRaises(ValueError,
SubMatrix.read, f)
f = StringIO(test_matrix3)
SubMatrix.read(f, alphabet=Alphabet('ARNDCQEGHILKMFPSTWYV'))
f2 = StringIO(test_matrix1)
self.assertRaises(ValueError,
SubMatrix.read, f2, unambiguous_protein_alphabet)
def test_read_corrupt(self):
f = StringIO(test_matrix2)
self.assertRaises(ValueError,
SubMatrix.read, f)
def test_read_pam(self):
f = data.data_stream("pam250")
mat = SubMatrix.read(f)
self.assertEqual(mat[0, 0], 2.0)
f.close()
f = data.data_stream("pam120")
mat = SubMatrix.read(f)
self.assertEqual(mat[4, 5], -7)
f.close()
def test_read_blosum(self):
f = data.data_stream("blosum80")
mat = SubMatrix.read(f)
self.assertEqual(mat[0, 10], -3)
f.close()
f = data.data_stream("blosum62")
mat = SubMatrix.read(f)
self.assertEqual(mat[4, 5], -4)
f.close()
def test_read_blast(self):
# New style blast matrices have letters at beginning of lines and a '*'
f = data_stream("blosum35.blast.new")
mat = SubMatrix.read(f)
self.assertEqual(mat[4, 5], -3)
f.close()
# Matrices formatted for old blast have a '*' (stop)
# column and no letters at the beggining of lines
f = data_stream("blosum35.blast")
mat = SubMatrix.read(f)
self.assertEqual(mat[0, 10], -2)
self.assertEqual(mat.array.shape, (23, 23))
f.close()
# For comparison, we'll also parse a matrix without '*'
f = data_stream("pam250.mat")
mat = SubMatrix.read(f)
self.assertEqual(mat[4, 5], -5)
f.close()
test_matrix1 = """# A Test Matrix
# More comments
# And blank line should be ignored
A 4 -2 -2 -2 0 -1 -1 -1 -2 -2 -2 -2 -1 -2 -1 0 -1 -3 -2 -1 -2 -1 -1
R -2 6 -1 -1 -4 1 0 -3 0 -3 -3 2 -2 -3 -2 -1 -1 -2 -2 -3 3 -1 -1
N -2 -1 7 1 -3 0 0 -1 0 -5 -4 0 -3 -4 -2 0 -1 -3 -2 -4 3 -1 -1
D -2 -1 1 7 -4 0 1 -1 -1 -6 -5 0 -4 -5 -1 0 -1 -4 -3 -5 0 -2 -2
C 0 -4 -3 -4 12 -3 -4 -3 -3 -1 -2 -4 -1 -2 -3 -2 -1 -2 -2 0 -3 5 -2
Q -1 1 0 0 -3 6 1 -2 0 -3 -3 1 -2 -3 -1 0 -1 -2 -2 -3 0 1 -1
E -1 0 0 1 -4 1 5 -2 -1 -4 -4 1 -3 -4 -1 -1 -1 -3 -3 -4 0 -1 -1
G -1 -3 -1 -1 -3 -2 -2 7 -2 -6 -5 -2 -4 -5 -2 -1 -2 -4 -4 -5 -2 -2 -2
H -2 0 0 -1 -3 0 -1 -2 9 -3 -3 -1 -2 -1 -2 -1 -1 0 0 -3 0 -1 -1
I -2 -3 -5 -6 -1 -3 -4 -6 -3 5 2 -4 1 0 -4 -4 -2 -1 -1 3 -4 -2 -2
L -2 -3 -4 -5 -2 -3 -4 -5 -3 2 5 -3 2 1 -3 -3 -2 -1 -1 1 -4 -2 -2
K -2 2 0 0 -4 1 1 -2 -1 -4 -3 5 -2 -4 -1 -1 -1 -3 -3 -3 1 -1 -1
M -1 -2 -3 -4 -1 -2 -3 -4 -2 1 2 -2 7 1 -3 -2 -1 0 0 1 -3 -2 -1
F -2 -3 -4 -5 -2 -3 -4 -5 -1 0 1 -4 1 7 -3 -3 -2 3 3 0 -3 -2 -1
P -1 -2 -2 -1 -3 -1 -1 -2 -2 -4 -3 -1 -3 -3 8 -1 -2 -3 -3 -3 -2 -2 -2
S 0 -1 0 0 -2 0 -1 -1 -1 -4 -3 -1 -2 -3 -1 4 1 -3 -2 -3 0 -1 -1
T -1 -1 -1 -1 -1 -1 -1 -2 -1 -2 -2 -1 -1 -2 -2 1 5 -2 -2 -1 -1 -1 -1
W -3 -2 -3 -4 -2 -2 -3 -4 0 -1 -1 -3 0 3 -3 -3 -2 12 3 -2 -3 -2 -1
Y -2 -2 -2 -3 -2 -2 -3 -4 0 -1 -1 -3 0 3 -3 -2 -2 3 8 -2 -2 -2 -1
V -1 -3 -4 -5 0 -3 -4 -5 -3 3 1 -3 1 0 -3 -3 -1 -2 -2 5 -4 -2 -2
B -2 3 3 0 -3 0 0 -2 0 -4 -4 1 -3 -3 -2 0 -1 -3 -2 -4 3 -1 -1
Z -1 -1 -1 -2 5 1 -1 -2 -1 -2 -2 -1 -2 -2 -2 -1 -1 -2 -2 -2 -1 3 -1
X -1 -1 -1 -2 -2 -1 -1 -2 -1 -2 -2 -1 -1 -1 -2 -1 -1 -1 -1 -2 -1 -1 -1
"""
test_matrix2 = """# An invalid Test Matrix
# Its got a non-numerical value in it. Is the correct exception raised?
# And blank line should be ignored
A 4 -2 -2 -2 0 -1 -1 -1 -2 -2 -2 -2 -1 -2 -1 0 | |
NaN
image N string NaN
imagescale N boolstring NaN
orientation N double NaN
regular N bool NaN
samplepoints N int NaN
shape N shape NaN
shapefile N string NaN
sides N int NaN
skew N double NaN
width N double NaN
z N double NaN
"""
# TODO: make a gridsearchable config for layouts
@staticmethod
def get_param_info_list():
param_info_list = [
# GENERAL
ut.ParamInfo(
'splines',
'spline',
valid_values=[
'none',
'line',
'polyline',
'curved',
'ortho',
'spline',
],
),
ut.ParamInfo('pack', True),
ut.ParamInfo('packmode', 'cluster'),
# ut.ParamInfo('nodesep', ?),
# NOT DOT
ut.ParamInfo(
'overlap', 'prism', valid_values=['true', 'false', 'prism', 'ipsep']
),
ut.ParamInfo('sep', 1 / 8),
ut.ParamInfo('esep', 1 / 8), # stricly less than sep
# NEATO ONLY
ut.ParamInfo('mode', 'major', valid_values=['heir', 'KK', 'ipsep']),
# kwargs['diredgeconstraints'] = 'heir'
# kwargs['inputscale'] = kwargs.get('inputscale', 72)
# kwargs['Damping'] = kwargs.get('Damping', .1)
# DOT ONLY
ut.ParamInfo('rankdir', 'LR', valid_values=['LR', 'RL', 'TB', 'BT']),
ut.ParamInfo('ranksep', 2.5),
ut.ParamInfo('nodesep', 2.0),
ut.ParamInfo('clusterrank', 'local', valid_values=['local', 'global'])
# OUTPUT ONLY
# kwargs['dpi'] = kwargs.get('dpi', 1.0)
]
return param_info_list
except Exception:
pass
def get_explicit_graph(graph):
"""
Args:
graph (nx.Graph)
"""
import copy
def get_nx_base(graph):
import networkx as nx
if isinstance(graph, nx.MultiDiGraph):
base_class = nx.MultiDiGraph
elif isinstance(graph, nx.MultiGraph):
base_class = nx.MultiGraph
elif isinstance(graph, nx.DiGraph):
base_class = nx.DiGraph
elif isinstance(graph, nx.Graph):
base_class = nx.Graph
else:
assert False
return base_class
base_class = get_nx_base(graph)
explicit_graph = base_class()
explicit_graph.graph = copy.deepcopy(graph.graph)
explicit_nodes = graph.nodes(data=True)
explicit_edges = [
(n1, n2, data)
for (n1, n2, data) in graph.edges(data=True)
if data.get('implicit', False) is not True
]
explicit_graph.add_nodes_from(explicit_nodes)
explicit_graph.add_edges_from(explicit_edges)
return explicit_graph
def get_nx_layout(graph, layout, layoutkw=None, verbose=None):
import networkx as nx
if layoutkw is None:
layoutkw = {}
layout_info = {}
if layout == 'custom':
edge_keys = list(
reduce(
set.union,
[set(edge[-1].keys()) for edge in graph.edges(data=True)],
set([]),
)
)
node_keys = list(
reduce(
set.union,
[set(node[-1].keys()) for node in graph.nodes(data=True)],
set([]),
)
)
graph_keys = list(graph.graph.keys())
layout_info = {
'graph': {k: graph.graph.get(k) for k in graph_keys},
'node': {k: nx.get_node_attributes(graph, k) for k in node_keys},
'edge': {k: nx.get_edge_attributes(graph, k) for k in edge_keys},
}
# Post checks
node_info = layout_info['node']
if 'size' not in node_info:
if 'width' in node_info and 'height' in node_info:
node_info['size'] = {
node: (node_info['width'][node], node_info['height'][node])
for node in graph.nodes()
}
# node_info['size'] = list(zip(node_info['width'],
# node_info['height']))
elif layout == 'agraph':
# PREFERED LAYOUT WITH MOST CONTROL
_, layout_info = nx_agraph_layout(graph, verbose=verbose, **layoutkw)
else:
raise ValueError('Undefined layout = %r' % (layout,))
return layout_info
def apply_graph_layout_attrs(graph, layout_info):
import networkx as nx
def noneish(v):
isNone = v is None
isNoneStr = isinstance(v, str) and v.lower() == 'none'
return isNone or isNoneStr
for key, vals in layout_info['node'].items():
vals = {n: v for n, v in vals.items() if not noneish(n)}
nx.set_node_attributes(graph, name=key, values=vals)
for key, vals in layout_info['edge'].items():
vals = {e: v for e, v in vals.items() if not noneish(e)}
nx.set_edge_attributes(graph, name=key, values=vals)
graph_attrs = {k: v for k, v in layout_info['graph'].items() if not noneish(k)}
graph.graph.update(graph_attrs)
def patch_pygraphviz():
"""
Hacks around a python3 problem in 1.3.1 of pygraphviz
"""
import pygraphviz
if pygraphviz.__version__ != '1.3.1':
return
if hasattr(pygraphviz.agraph.AGraph, '_run_prog_patch'):
return
def _run_prog(self, prog='nop', args=''):
"""Apply graphviz program to graph and return the result as a string.
>>> A = AGraph()
>>> s = A._run_prog() # doctest: +SKIP
>>> s = A._run_prog(prog='acyclic') # doctest: +SKIP
Use keyword args to add additional arguments to graphviz programs.
"""
from pygraphviz.agraph import shlex, subprocess, PipeReader, warnings
runprog = r'"%s"' % self._get_prog(prog)
cmd = ' '.join([runprog, args])
dotargs = shlex.split(cmd)
p = subprocess.Popen(
dotargs,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=False,
)
(child_stdin, child_stdout, child_stderr) = (p.stdin, p.stdout, p.stderr)
# Use threading to avoid blocking
data = []
errors = []
threads = [PipeReader(data, child_stdout), PipeReader(errors, child_stderr)]
for t in threads:
t.start()
self.write(child_stdin)
child_stdin.close()
for t in threads:
t.join()
if not data:
raise IOError(b''.join(errors))
if len(errors) > 0:
warnings.warn(str(b''.join(errors)), RuntimeWarning)
return b''.join(data)
# Patch error in pygraphviz
pygraphviz.agraph.AGraph._run_prog_patch = _run_prog
pygraphviz.agraph.AGraph._run_prog_orig = pygraphviz.agraph.AGraph._run_prog
pygraphviz.agraph.AGraph._run_prog = _run_prog
def make_agraph(graph_):
# FIXME; make this not an inplace operation
import networkx as nx
import pygraphviz
patch_pygraphviz()
# Convert to agraph format
num_nodes = len(graph_)
is_large = num_nodes > LARGE_GRAPH
if is_large:
logger.info(
'Making agraph for large graph %d nodes. ' 'May take time' % (num_nodes)
)
ut.nx_ensure_agraph_color(graph_)
# Reduce size to be in inches not pixels
# FIXME: make robust to param settings
# Hack to make the w/h of the node take thae max instead of
# dot which takes the minimum
shaped_nodes = [n for n, d in graph_.nodes(data=True) if 'width' in d]
node_dict = ut.nx_node_dict(graph_)
node_attrs = ut.dict_take(node_dict, shaped_nodes)
width_px = np.array(ut.take_column(node_attrs, 'width'))
height_px = np.array(ut.take_column(node_attrs, 'height'))
scale = np.array(ut.dict_take_column(node_attrs, 'scale', default=1.0))
inputscale = 72.0
width_in = width_px / inputscale * scale
height_in = height_px / inputscale * scale
width_in_dict = dict(zip(shaped_nodes, width_in))
height_in_dict = dict(zip(shaped_nodes, height_in))
nx.set_node_attributes(graph_, name='width', values=width_in_dict)
nx.set_node_attributes(graph_, name='height', values=height_in_dict)
ut.nx_delete_node_attr(graph_, name='scale')
# Check for any nodes with groupids
node_to_groupid = nx.get_node_attributes(graph_, 'groupid')
if node_to_groupid:
groupid_to_nodes = ut.group_items(*zip(*node_to_groupid.items()))
else:
groupid_to_nodes = {}
# Initialize agraph format
# import utool
# utool.embed()
ut.nx_delete_None_edge_attr(graph_)
agraph = nx.nx_agraph.to_agraph(graph_)
# Add subgraphs labels
# TODO: subgraph attrs
group_attrs = graph_.graph.get('groupattrs', {})
for groupid, nodes in groupid_to_nodes.items():
# subgraph_attrs = {}
subgraph_attrs = group_attrs.get(groupid, {}).copy()
cluster_flag = True
# FIXME: make this more natural to specify
if 'cluster' in subgraph_attrs:
cluster_flag = subgraph_attrs['cluster']
del subgraph_attrs['cluster']
# subgraph_attrs = dict(rankdir='LR')
# subgraph_attrs = dict(rankdir='LR')
# subgraph_attrs['rank'] = 'min'
# subgraph_attrs['rank'] = 'source'
name = groupid
if cluster_flag:
# graphviz treast subgraphs labeld with cluster differently
name = 'cluster_' + groupid
else:
name = groupid
agraph.add_subgraph(nodes, name, **subgraph_attrs)
import re
for node in graph_.nodes():
anode = pygraphviz.Node(agraph, node)
# TODO: Generally fix node positions
ptstr_ = anode.attr['pos']
if ptstr_ is not None and len(ptstr_) > 0 and not ptstr_.endswith('!'):
ptstr = ptstr_.strip('[]').strip(' ').strip('()')
ptstr_list = [x.rstrip(',') for x in re.split(r'\s+', ptstr)]
pt_list = list(map(float, ptstr_list))
pt_arr = np.array(pt_list) / inputscale
new_ptstr_list = list(map(str, pt_arr))
new_ptstr_ = ','.join(new_ptstr_list)
if anode.attr['pin'] is True:
anode.attr['pin'] = 'true'
if anode.attr['pin'] == 'true':
new_ptstr = new_ptstr_ + '!'
else:
new_ptstr = new_ptstr_
anode.attr['pos'] = new_ptstr
if graph_.graph.get('ignore_labels', False):
for node in graph_.nodes():
anode = pygraphviz.Node(agraph, node)
if 'label' in anode.attr:
try:
del anode.attr['label']
except KeyError:
pass
return agraph
def _groupby_prelayout(graph_, layoutkw, groupby):
"""
sets `pin` attr of `graph_` inplace in order to nodes according to
specified layout.
"""
import networkx as nx
has_pins = any(
[v.lower() == 'true' for v in nx.get_node_attributes(graph_, 'pin').values()]
)
has_pins &= all('pos' in d for n, d in graph_.nodes(data=True))
if not has_pins:
# Layout groups separately
node_to_group = nx.get_node_attributes(graph_, groupby)
group_to_nodes = ut.invert_dict(node_to_group, unique_vals=False)
subgraph_list = []
def subgraph_grid(subgraphs, hpad=None, vpad=None):
n_cols = int(np.ceil(np.sqrt(len(subgraphs))))
columns = [
ut.stack_graphs(chunk, vert=False, pad=hpad)
for chunk in ut.ichunks(subgraphs, n_cols)
]
new_graph = ut.stack_graphs(columns, vert=True, pad=vpad)
return new_graph
group_grid = graph_.graph.get('group_grid', None)
for group, nodes in group_to_nodes.items():
if group_grid:
subnode_list = [graph_.subgraph([node]) for node in nodes]
for sub in subnode_list:
sub.graph.update(graph_.graph)
nx_agraph_layout(sub, inplace=True, groupby=None, **layoutkw)
subgraph = subgraph_grid(subnode_list)
# subgraph = graph_.subgraph(nodes)
else:
subgraph = graph_.subgraph(nodes)
subgraph.graph.update(graph_.graph)
nx_agraph_layout(subgraph, inplace=True, groupby=None, **layoutkw)
subgraph_list.append(subgraph)
hpad = graph_.graph.get('hpad', None)
vpad = graph_.graph.get('vpad', None)
graph_ = subgraph_grid(subgraph_list, hpad, vpad)
# graph_ = ut.stack_graphs(subgraph_list)
nx.set_node_attributes(graph_, name='pin', values='true')
return True, graph_
else:
return False, graph_
# logger.info('WARNING: GROUPING WOULD CLOBBER PINS. NOT GROUPING')
def nx_agraph_layout(
orig_graph, inplace=False, verbose=None, return_agraph=False, groupby=None, **layoutkw
):
r"""
Uses graphviz and custom code to determine position attributes of nodes and
edges.
Args:
groupby (str): if not None then nodes will be grouped by this
attributes and groups will be layed out separately and then stacked
together in a grid
Ignore:
orig_graph = graph
graph = layout_graph
References:
http://www.graphviz.org/content/attrs
http://www.graphviz.org/doc/info/attrs.html
CommandLine:
python -m wbia.plottool.nx_helpers nx_agraph_layout --show
Doctest:
>>> # FIXME failing-test (22-Jul-2020) This test is failing and it's not clear how to fix it
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(module:pygraphviz)
>>> from wbia.plottool.nx_helpers import * # NOQA
>>> import wbia.plottool as pt
>>> import networkx as nx
>>> import utool as ut
>>> n, s = 9, 4
>>> offsets = list(range(0, (1 + | |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import journal
class Mechanism(object):
from MechanismExceptions import DuplicateElement, DuplicateSpecies, DuplicateThermalProperties
# housekeeping
def name(self):
return self._name
def externalThermoDatabase(self, filename):
self._thermdb = filename
return
def externalTransDatabase(self, filename):
self._transdb = filename
return
def printStatistics(self):
print "Mechanism '%s'" % self._name
print " elements:", self._elements.size()
print " species:", self._species.size()
print " reactions:", self._reactions.size()
# elements
def newElement(self, symbol, weight=None, locator=None):
duplicate = self._elements.find(symbol)
element = self._elements.element(symbol, weight, locator)
if duplicate and element.locator:
raise self.DuplicateElement(symbol)
return element
def element(self, symbol=None):
return self._elements.find(symbol)
# species
def newSpecies(self, symbol, locator=None):
duplicate = self._species.find(symbol)
species = self._species.species(symbol, locator)
if duplicate:
raise self.DuplicateSpecies(symbol)
return species
def species(self, symbol=None):
return self._species.find(symbol)
# thermal properties are recorded directly in the species
def thermoAll(self, flag=None):
if not flag:
self._externalDb = self._readExternalThermoDb()
self._thermoRange = self._externalDb.thermoRange()
return self._externalDb
# trigger the ingestion of therm.dat
def thermoDone(self):
unresolvedSpecies = []
for species in self._species.find():
if not species.thermo:
if not self._externalDb:
self._externalDb = self._readExternalThermoDb()
resolution = self._externalDb.species(species.symbol)
resolution.trans = species.trans
if not resolution:
unresolvedSpecies.append(species)
else:
self._info.log(
"resolving species '%s' against '%s'" % (species.symbol, self._thermdb))
self._species.replace(species.symbol, species, resolution)
if unresolvedSpecies:
warning = journal.warning("fuego")
warning.line("unresolved species in mechanism")
warning.line("species: %s" % [ x.symbol for x in unresolvedSpecies])
return 0
def thermoRange(self, range=None):
if range:
self._thermoRange = range
return self._thermoRange
# reactions
def newReaction(self, id, locator=None):
return self._reactions.reaction(id, locator)
def reaction(self, species=None, id=None):
if not self._sorted:
print '*** WARNING: reactions have not been sorted'
return self._reactions.find(species, id)
def _sort_reactions(self):
n = [0]
rs = []
rs_unsorted = self._reactions.find()
i = 0
# troe
for r in rs_unsorted:
if r not in rs:
if r.low and r.troe and not r.rev:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
# sri
for r in rs_unsorted:
if r not in rs:
if r.low and r.sri and not r.rev:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
# lindemann
for r in rs_unsorted:
if r not in rs:
if r.low and not r.rev:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
# three-body:
for r in rs_unsorted:
if r not in rs:
if r.thirdBody and not r.low and not r.rev:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
# simplest case
for r in rs_unsorted:
if r not in rs:
if not r.rev and not r.low and not r.thirdBody:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
# everything else
for r in rs_unsorted:
if r not in rs:
i+=1
r.orig_id = r.id
r.id = i
rs.append(r)
n.append(i)
for r in rs:
self._reactions.replace2(r,r.id-1,r)
self._sorted = True
return n
#HARI======================================
# These are routines to sort reactions
# and improve memory locality
#==========================================
def _reorder_reaction_set_tsp(self,rset):
import numpy as npy
nReactions = len(rset)
nSpecies = len(self.species())
reactionmat=npy.zeros((nReactions,nSpecies))
for i,reaction in zip(range(nReactions),rset):
agents = list(set(reaction.reactants+reaction.products))
for a in agents:
symbol, coefficient = a
reactionmat[i][self.species(symbol).id]=coefficient
new_to_old_map=self._tsp_solve(reactionmat,0.001)
#new_to_old_map=self._cluster_solve(reactionmat)
print(new_to_old_map)
return(new_to_old_map)
def _sort_reactions_within_type_tsp(self,n):
#note--------------------------------------------------------
#Species ids, ie sp.id starts with 0
#while reaction ids, ie reaction.id starts with 1
#although when doing mechanism.reaction(id=i), i should be
#index starting with 0. this is because the "find()" function
#in reactionSet class queries within the entity array
#------------------------------------------------------------
#sort within each type
#=====================
rs = self._reactions.find()
itroe = n[0:2]
isri = n[1:3]
ilindemann = n[2:4]
i3body = n[3:5]
isimple = n[4:6]
ispecial = n[5:7]
ntroe = itroe[1] - itroe[0]
nsri = isri[1] - isri[0]
nlindemann = ilindemann[1] - ilindemann[0]
n3body = i3body[1] - i3body[0]
nsimple = isimple[1] - isimple[0]
nspecial = ispecial[1] - ispecial[0]
troe_order = self._reorder_reaction_set_tsp(rs[itroe[0]:itroe[1]])+itroe[0]
sri_order = self._reorder_reaction_set_tsp(rs[isri[0]:isri[1]])+isri[0]
lind_order = self._reorder_reaction_set_tsp(rs[ilindemann[0]:ilindemann[1]])+ilindemann[0]
thbody_order = self._reorder_reaction_set_tsp(rs[i3body[0]:i3body[1]])+i3body[0]
simple_order = self._reorder_reaction_set_tsp(rs[isimple[0]:isimple[1]])+isimple[0]
special_order = self._reorder_reaction_set_tsp(rs[ispecial[0]:ispecial[1]])+ispecial[0]
new_to_old_map = troe_order.tolist()+sri_order.tolist()+\
lind_order.tolist()+thbody_order.tolist()+simple_order.tolist()+special_order.tolist()
self._reorder_reactions_from_map(new_to_old_map)
def _sort_reactions_within_type_random(self,n):
import numpy as npy
#note--------------------------------------------------------
#Species ids, ie sp.id starts with 0
#while reaction ids, ie reaction.id starts with 1
#although when doing mechanism.reaction(id=i), i should be
#index starting with 0. this is because the "find()" function
#in reactionSet class queries within the entity array
#------------------------------------------------------------
#sort within each type
#=====================
rs = self._reactions.find()
itroe = n[0:2]
isri = n[1:3]
ilindemann = n[2:4]
i3body = n[3:5]
isimple = n[4:6]
ispecial = n[5:7]
ntroe = itroe[1] - itroe[0]
nsri = isri[1] - isri[0]
nlindemann = ilindemann[1] - ilindemann[0]
n3body = i3body[1] - i3body[0]
nsimple = isimple[1] - isimple[0]
nspecial = ispecial[1] - ispecial[0]
troe_order = npy.random.permutation(ntroe) + itroe[0]
sri_order = npy.random.permutation(nsri) + isri[0]
lind_order = npy.random.permutation(nlindemann) + ilindemann[0]
thbody_order = npy.random.permutation(n3body) + i3body[0]
simple_order = npy.random.permutation(nsimple) + isimple[0]
special_order = npy.random.permutation(nspecial) + ispecial[0]
new_to_old_map = troe_order.tolist()+sri_order.tolist()+\
lind_order.tolist()+thbody_order.tolist()+simple_order.tolist()+special_order.tolist()
self._reorder_reactions_from_map(new_to_old_map)
def _reorder_reactions_from_map(self,new_to_old_map):
rs = self._reactions.find()
rsnew = []
for i in range(len(new_to_old_map)):
r = rs[new_to_old_map[i]]
r.id = i+1 #id should start with 1
rsnew.append(r)
for r in rsnew:
self._reactions.replace2(r,r.id-1,r)
def _reorder_species_from_map(self,new_to_old_map):
from SpeciesSet import SpeciesSet
import copy
nSpecies = len(self.species())
spnew=SpeciesSet()
#reorder species
for i in range(nSpecies):
for sp in self.species():
if(sp.id == new_to_old_map[i]):
break
sp_temp=copy.deepcopy(sp)
sp_temp.id=i
spnew.insert(sp_temp.symbol, sp_temp)
self._species=spnew
def _get_reaction_matrix(self):
import numpy as npy
nSpecies = len(self.species())
nReactions = len(self.reaction())
reactionmat=npy.zeros((nReactions,nSpecies))
for i in range(nReactions):
reaction = self.reaction(id=i) #here id has to start from 0
agents = list(set(reaction.reactants+reaction.products))
efficiencies = reaction.efficiencies
for a in agents:
symbol, coefficient = a
reactionmat[i][self.species(symbol).id]=coefficient
for ii, eff in enumerate(efficiencies):
symbol, efficiency = eff
reactionmat[i][self.species(symbol).id]=1.0
return(reactionmat)
def _cluster_solve(self,mat):
from sklearn.cluster import AgglomerativeClustering
import numpy as npy
new_to_old_map=npy.array([])
if(mat.shape[0] > 1):
nclus=mat.shape[0]/4
#nclus=2
clustering = AgglomerativeClustering(n_clusters=nclus, compute_full_tree=True, affinity='l1', linkage='average')
y=clustering.fit_predict(mat)
for i in range(nclus):
for j in range(len(y)):
if(y[j]==i):
new_to_old_map = npy.append(new_to_old_map,j)
new_to_old_map=new_to_old_map.astype(int)
else:
new_to_old_map=npy.arange(mat.shape[0])
return(new_to_old_map)
def _tsp_solve(self,mat,improvement_threshold):
import numpy as npy
#===============================================================
# Calculate the euclidian distance in n-space of the route r traversing cities c, ending at the path start.
path_distance = lambda r,c: npy.sum([npy.linalg.norm(c[r[p]]-c[r[p-1]],1) for p in range(len(r))])
# Reverse the order of all elements from element i to element k in array r.
two_opt_swap = lambda r,i,k: npy.concatenate((r[0:i],r[k:-len(r)+i-1:-1],r[k+1:len(r)]))
def two_opt(cities,improvement_threshold): # 2-opt Algorithm adapted from https://en.wikipedia.org/wiki/2-opt
route = npy.arange(cities.shape[0]) # Make an array of row numbers corresponding to cities.
improvement_factor = 1 # Initialize the improvement factor.
best_distance = path_distance(route,cities) # Calculate the distance of the initial path.
while improvement_factor > improvement_threshold: # If the route is still improving, keep going!
distance_to_beat = best_distance # Record the distance at the beginning of the loop.
for swap_first in range(1,len(route)-2): # From each city except the first and last,
for swap_last in range(swap_first+1,len(route)): # to each of the cities following,
new_route = two_opt_swap(route,swap_first,swap_last) # try reversing the order of these cities
new_distance = path_distance(new_route,cities) # and check the total distance with this modification.
if new_distance < best_distance: # If the path distance is an improvement,
route = new_route # make this the accepted best route
best_distance = new_distance # and update the distance corresponding to this route.
improvement_factor = 1 - best_distance/distance_to_beat # Calculate how much the route has improved.
return route # When the route is no longer improving substantially, stop searching and return the route.
#===============================================================
if(len(mat) > 0):
nrows=mat.shape[0]
ncols=mat.shape[1]
newmat=npy.zeros((nrows+1,ncols))
newmat[1:(nrows+1),:]=mat
order=two_opt(newmat,improvement_threshold)
return(order[1:(nrows+1)]-1)
else:
return(npy.array([]))
def _sort_species_ids_tsp(self):
import numpy as npy
rmat=self._get_reaction_matrix()
new_to_old_map=self._tsp_solve(npy.transpose(rmat),0.001)
#new_to_old_map=self._cluster_solve(npy.transpose(rmat))
self._reorder_species_from_map(new_to_old_map)
def _sort_species_ids_random(self):
import numpy as npy
nSpecies = len(self.species())
rmat=self._get_reaction_matrix()
new_to_old_map = npy.random.permutation(nSpecies)
self._reorder_species_from_map(new_to_old_map)
#===================================================================
# other methods
def __init__(self, name=""):
from ElementSet import ElementSet
from SpeciesSet import SpeciesSet
from ReactionSet import ReactionSet
self._name = name
self._thermdb = "therm.dat"
self._transdb = "tran.dat"
self._externalDb = None
self._elements = ElementSet()
self._species = SpeciesSet()
self._reactions = ReactionSet()
self._thermoRange = ()
self._info = journal.debug("fuego.serialization")
self._sorted = False
return
# swallow an external thermo database
def _readExternalThermoDb(self):
import fuego
filename = self._thermdb
db = fuego.serialization.loadThermoDatabase(filename, format="chemkin")
return db
def dump(self):
print
print "Statistics:"
print "-----------"
self.printStatistics()
print
print "Elements:"
print "---------"
for element in self.element():
print "%6s: %s" % (element.symbol, element)
print
print "Species:"
print "---------"
for species in self.species():
print "%10s: %s" % (species.symbol, species)
print
| |
<filename>tefla/core/special_layers.py
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
import pydensecrf.densecrf as dcrf
from .layers import conv2d, depthwise_conv2d, avg_pool_2d, \
max_pool, relu, crelu, batch_norm_tf as batch_norm
from ..utils import util
from . import initializers as initz
def spatialtransformer(U,
theta,
batch_size=64,
downsample_factor=1.0,
num_transform=1,
name='SpatialTransformer',
**kwargs):
"""Spatial Transformer Layer.
Implements a spatial transformer layer as described in [1]_.
It's based on lasagne implementation in [2]_, modified by <NAME>
Args:
U: float
The output of a convolutional net should have the
shape [batch_size, height, width, num_channels].
theta: float
The output of the localisation network should be [batch_size,
num_transform, 6] or [batch_size, 6] if num_transform=1
```python
`theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
```
downsample_factor: a float, determines output shape, downsample input
shape by downsample_factor
Returns:
spatial transformed output of the network
References
.. [1] "Spatial Transformer Networks", <NAME>, <NAME>,
<NAME>, <NAME>
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
"""
with tf.variable_scope(name):
if num_transform > 1 and len(theta.get_shape().as_list()) == 3:
_, num_transforms = map(int, theta.get_shape().as_list()[:2])
indices = [[i] * num_transforms for i in range(batch_size)]
U = tf.gather(U, tf.reshape(indices, [-1]))
input_shape = U.get_shape().as_list()
num_channels = input_shape[3]
theta = tf.reshape(theta, (-1, 2, 3))
theta = tf.cast(theta, tf.float32)
if not isinstance(downsample_factor, float):
downsample_factor = tf.cast(downsample_factor, tf.float32)
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = tf.cast(input_shape[1] / downsample_factor, tf.int32)
out_width = tf.cast(input_shape[2] / downsample_factor, tf.int32)
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([batch_size]))
grid = tf.reshape(grid, tf.stack([batch_size, 3, -1]))
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.matmul(theta, grid)
x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
x_s_flat = tf.reshape(x_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
input_transformed = _interpolate(U, x_s_flat, y_s_flat, batch_size, downsample_factor)
output = tf.reshape(input_transformed,
tf.stack([batch_size, out_height, out_width, num_channels]))
return output
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([
n_repeats,
])), 1), [1, 0])
rep = tf.cast(rep, tf.int32)
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, batch_size, downsample_factor):
with tf.variable_scope('_interpolate'):
input_shape = im.get_shape().as_list()
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
height_f = tf.cast(height, tf.float32)
width_f = tf.cast(width, tf.float32)
out_height = tf.cast(height / downsample_factor, tf.int32)
out_width = tf.cast(width / downsample_factor, tf.int32)
zero = tf.zeros([], dtype=tf.int32)
max_y = tf.cast(height - 1, tf.int32)
max_x = tf.cast(width - 1, tf.int32)
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0) * (width_f) / 2.0
y = (y + 1.0) * (height_f) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), tf.int32)
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), tf.int32)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width * height
base = _repeat(tf.range(batch_size) * dim1, out_height * out_width)
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.cast(im_flat, tf.float32)
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, tf.float32)
x1_f = tf.cast(x1, tf.float32)
y0_f = tf.cast(y0, tf.float32)
y1_f = tf.cast(y1, tf.float32)
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(
tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(
tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], 0)
return grid
def subsample(inputs, factor, name=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
name: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return max_pool(inputs, filter_size=(1, 1), stride=(factor, factor), name=name)
def conv2d_same(inputs, num_outputs, kernel_size, stride, name=None, **kwargs):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
name: name.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return conv2d(
inputs,
num_outputs,
filter_size=(kernel_size, kernel_size),
stride=(1, 1),
padding='SAME',
name=name,
**kwargs)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return conv2d(
inputs, num_outputs, kernel_size, stride=stride, padding='VALID', name=name, **kwargs)
def bottleneck_v1(inputs, depth, depth_bottleneck, stride, rate=1, name=None, **kwargs):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
name: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
is_training = kwargs.get('is_training')
reuse = kwargs.get('reuse')
with tf.variable_scope(name, 'bottleneck_v2', [inputs]):
depth_in = util.last_dimension(inputs.get_shape(), min_rank=4)
preact = batch_norm(
inputs, activation_fn=tf.nn.relu, name='preact', is_training=is_training, reuse=reuse)
if depth == depth_in:
shortcut = subsample(inputs, stride, 'shortcut')
else:
shortcut = conv2d(
preact,
depth,
is_training,
reuse,
filter_size=(1, 1),
stride=(stride, stride),
batch_norm=None,
activation=None,
name='shortcut')
residual = conv2d(
preact, depth_bottleneck, filter_size=(1, 1), stride=(1, 1), name='conv1', **kwargs)
residual = conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, name='conv2', **kwargs)
residual = conv2d(
residual,
depth,
is_training,
reuse,
filter_size=(1, 1),
stride=(1, 1),
batch_norm=None,
activation=None,
name='conv3')
output = shortcut + residual
return output
def bottleneck_v2(inputs, depth, depth_bottleneck, stride, rate=1, name=None, **kwargs):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for | |
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.sbp')
cad.cam = 'sbp'
cam_pack_forget()
cam_file_frame.pack()
cam_vector_frame.pack()
cam_dia_frame.pack()
cam_contour_frame.pack()
jog_frame.pack()
speed_frame.pack()
string_tool_dia.set("0.125")
string_xy_speed.set("1.1")
string_z_speed.set("1.1")
string_jog_xy_speed.set("7")
string_jog_z_speed.set("7")
string_jog_z.set(".25")
root.update()
def select_oms():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.oms')
cad.cam = 'oms'
cam_pack_forget()
cam_file_frame.pack()
cam_vector_frame.pack()
cam_dia_frame.pack()
cam_contour_frame.pack()
excimer_frame.pack()
string_pulse_period.set("10000")
string_tool_dia.set("0.001")
string_cut_vel.set("0.1")
string_cut_accel.set("5.0")
root.update()
def select_dxf():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.dxf')
cad.cam = 'dxf'
cam_pack_forget()
cam_file_frame.pack()
cam_vector_frame.pack()
cam_dia_frame.pack()
cam_contour_frame.pack()
string_tool_dia.set("0.0")
root.update()
def select_uni():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.uni')
cad.cam = 'uni'
cam_pack_forget()
cam_file_frame.pack()
cam_vector_frame.pack()
cam_dia_frame.pack()
cam_contour_frame.pack()
laser_frame1.pack()
if ((cad.nz > 1) | (cad.image_r.size > 1)):
laser_frame2.pack()
string_laser_rate.set("500")
string_laser_power.set("60")
string_laser_speed.set("15")
string_tool_dia.set("0.01")
string_laser_min_power.set("10")
string_laser_max_power.set("100")
string_vector_error.set('1.1')
root.update()
def select_jpg():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.jpg')
cad.cam = 'jpg'
cam_pack_forget()
cam_file_frame.pack()
root.update()
def select_png():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.png')
cad.cam = 'png'
cam_pack_forget()
cam_file_frame.pack()
root.update()
def select_stl():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.stl')
cad.cam = 'stl'
cam_pack_forget()
cam_file_frame.pack()
STL_frame.pack()
root.update()
def select_gerber():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.grb')
cad.cam = 'grb'
cam_pack_forget()
cam_file_frame.pack()
Gerber_frame.pack()
root.update()
def select_excellon():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.drl')
cad.cam = 'drl'
cam_pack_forget()
cam_file_frame.pack()
Excellon_frame.pack()
root.update()
def select_ca():
input_file_name = string_input_file.get()
string_cam_file.set(input_file_name[0:-4]+'.ca')
cad.cam = 'ca'
cam_pack_forget()
cam_file_frame.pack()
root.update()
def cam_pack_forget():
cam_file_frame.pack_forget()
cam_vector_frame.pack_forget()
cam_dia_frame.pack_forget()
cam_contour_frame.pack_forget()
laser_frame1.pack_forget()
laser_frame2.pack_forget()
laser_frame3.pack_forget()
cut_frame.pack_forget()
speed_frame.pack_forget()
jog_frame.pack_forget()
rml_move_frame.pack_forget()
waterjet_frame.pack_forget()
excimer_frame.pack_forget()
STL_frame.pack_forget()
Gerber_frame.pack_forget()
Excellon_frame.pack_forget()
fill_frame.pack_forget()
g_frame.pack_forget()
send_to_frame.pack_forget()
def save_cam(event):
#
# write toolpath
#
if (cad.cam == "epi"):
write_epi()
elif (cad.cam == "camm"):
write_camm()
elif (cad.cam == "ps"):
write_ps()
elif (cad.cam == "ord"):
write_ord()
elif (cad.cam == "g"):
write_G()
elif (cad.cam == "rml"):
write_rml()
elif (cad.cam == "sbp"):
write_sbp()
elif (cad.cam == "oms"):
write_oms()
elif (cad.cam == "dxf"):
write_dxf()
elif (cad.cam == "uni"):
write_uni()
elif (cad.cam == "jpg"):
write_jpg()
elif (cad.cam == "png"):
write_png()
elif (cad.cam == "stl"):
write_stl()
elif (cad.cam == "grb"):
write_gerber()
elif (cad.cam == "drl"):
write_excellon()
elif (cad.cam == "ca"):
write_ca()
else:
string_msg.set("unsupported output file format")
root.update()
def write_epi():
#
# Epilog lasercutter output
# todo: try 1200 DPI
#
units = 600*cad.inches_per_unit
filename = string_cam_file.get()
file = open(filename, 'wb')
if (integer_laser_autofocus.get() == 0):
#
# init with autofocus off
#
file.write("%-12345X@PJL JOB NAME="+string_cam_file.get()+"\r\nE@PJL ENTER LANGUAGE=PCL\r\n&y0A&l0U&l0Z&u600D*p0X*p0Y*t600R*r0F&y50P&z50S*r6600T*r5100S*r1A*rC%1BIN;XR"+string_laser_rate.get()+";YP"+string_laser_power.get()+";ZS"+string_laser_speed.get()+";")
else:
#
# init with autofocus on
#
file.write("%-12345X@PJL JOB NAME="+string_cam_file.get()+"\r\nE@PJL ENTER LANGUAGE=PCL\r\n&y1A&l0U&l0Z&u600D*p0X*p0Y*t600R*r0F&y50P&z50S*r6600T*r5100S*r1A*rC%1BIN;XR"+string_laser_rate.get()+";YP"+string_laser_power.get()+";ZS"+string_laser_speed.get()+";")
power = float(string_laser_power.get())
min_power = float(string_laser_min_power.get())
max_power = float(string_laser_max_power.get())
for layer in range(len(cad.toolpaths)):
if ((len(cad.zwrite) > 1) & (len(cad.toolpaths[layer]) > 0)):
fraction = (cad.zwrite[layer]-cad.zwrite[0])/(cad.zwrite[-1]-cad.zwrite[0])
layer_power = min_power + fraction*(max_power-min_power)
file.write("YP%f;"%layer_power)
for segment in range(len(cad.toolpaths[layer])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx)))
y = int(units*(-cad.ymin - ((cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][0].y)+0.5)/float(cad.ny))))
file.write("PU"+str(x)+","+str(y)+";")
for vertex in range(1,len(cad.toolpaths[layer][segment])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx)))
y = int(units*(-cad.ymin - ((cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][vertex].y)+0.5)/float(cad.ny))))
file.write("PD"+str(x)+","+str(y)+";")
file.write("%0B%1BPUE%-12345X@PJL EOJ \r\n")
file.close()
draw_toolpath()
string_msg.set("wrote %s"%filename)
root.update()
def write_camm():
filename = string_cam_file.get()
file = open(filename, 'wb')
units = 1016*cad.inches_per_unit
file.write("PA;PA;!ST1;!FS"+string_cut_force.get()+";VS"+string_cut_velocity.get()+";")
for layer in range(len(cad.toolpaths)):
for segment in range(len(cad.toolpaths[layer])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx)))
y = int(units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][0].y)+0.5)/float(cad.ny)))
file.write("PU"+str(x)+","+str(y)+";")
for vertex in range(1,len(cad.toolpaths[layer][segment])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx)))
y = int(units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][vertex].y)+0.5)/float(cad.ny)))
file.write("PD"+str(x)+","+str(y)+";")
file.write("PU0,0;")
file.close()
draw_toolpath()
string_msg.set("wrote %s"%filename)
root.update()
def write_ps():
#
# Postscript output
#
units = cad.inches_per_unit
filename = string_cam_file.get()
file = open(filename, 'wb')
file.write("%! cad.py output\n")
file.write("%%%%BoundingBox: 0 0 %.3f %.3f\n"%
(72.0*(cad.xmax-cad.xmin),72.0*(cad.ymax-cad.ymin)))
file.write("/m {moveto} def\n")
file.write("/l {lineto} def\n")
file.write("72 72 scale\n")
file.write(".005 setlinewidth\n")
file.write("%f %f translate\n"%(0.5,0.5))
for layer in range(len(cad.toolpaths)):
for segment in range(len(cad.toolpaths[layer])):
x = units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx))
y = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][0].y)+0.5)/float(cad.ny))
file.write("%f %f m\n"%(x,y))
for vertex in range(1,len(cad.toolpaths[layer][segment])):
x = units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx))
y = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][vertex].y)+0.5)/float(cad.ny))
file.write("%f %f l\n"%(x,y))
if (integer_fill.get() == 0):
file.write("stroke\n")
else:
file.write("fill\n")
file.write("showpage\n")
file.close()
draw_toolpath()
string_msg.set("wrote %s"%filename)
root.update()
def write_ord():
#
# OMAX waterjet output
#
units = cad.inches_per_unit
lead_in = float(string_lead_in.get())
quality = int(string_quality.get())
filename = string_cam_file.get()
file = open(filename, 'wb')
xlead = []
ylead = []
for layer in range(len(cad.toolpaths)):
for segment in range(len(cad.toolpaths[layer])):
#
# calculate and write lead-in
#
x0 = units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx))
y0 = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][0].y)+0.5)/float(cad.ny))
x1 = units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][1].x+0.5)/float(cad.nx))
y1 = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][1].y)+0.5)/float(cad.ny))
dx = x1 - x0
dy = y1 - y0
norm_x = -dy
norm_y = dx
norm = sqrt(norm_x**2 + norm_y**2)
norm_x = norm_x/norm
norm_y = norm_y/norm
xlead.append(x0 + norm_x*lead_in)
ylead.append(y0 + norm_y*lead_in)
file.write("%f, %f, 0, %d\n"%(xlead[segment],ylead[segment],quality))
#
# loop over segment
#
for vertex in range(len(cad.toolpaths[layer][segment])):
x = units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx))
y = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][vertex].y)+0.5)/float(cad.ny))
file.write("%f, %f, 0, %d\n"%(x,y,quality))
#
# write lead-out
#
file.write("%f, %f, 0, 0\n"%(x0,y0))
file.write("%f, %f, 0, 0\n"%(xlead[segment],ylead[segment]))
file.close()
#
# draw toolpath with lead-in/out
#
im.xy = Image.new("RGBX",(cad.nxplot(),cad.nyplot()),'white')
im.xy_draw = ImageDraw.Draw(im.xy)
for layer in range(len(cad.toolpaths)):
for segment in range(len(cad.toolpaths[layer])):
x = cad.nxplot()*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx)
y = cad.nyplot()*(cad.toolpaths[layer][segment][0].y+0.5)/float(cad.ny)
xl = cad.nxplot()*(xlead[segment]-cad.xmin)/(cad.xmax-cad.xmin)
yl = cad.nyplot()-cad.nyplot()*(ylead[segment]-cad.ymin)/(cad.ymax-cad.ymin)
im.xy_draw.line([xl,yl,x,y],fill="black")
for vertex in range(1,len(cad.toolpaths[layer][segment])):
xnew = cad.nxplot()*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx)
ynew = cad.nyplot()*(cad.toolpaths[layer][segment][vertex].y+0.5)/float(cad.ny)
im.xy_draw.line([x,y,xnew,ynew],fill="black")
x = xnew
y = ynew
images.xy = ImageTk.PhotoImage(im.xy)
canvas_xy.create_image(cad.nplot/2,cad.nplot/2,image=images.xy)
string_msg.set("wrote %s"%filename)
root.update()
def distance(x1, y1, x2, y2):
return sqrt((x1-x2)**2+(y1-y2)**2)
def write_G():
#
# G code output
#
units = cad.inches_per_unit
zup = units*cad.zmax
feed_rate = float(string_g_feed_rate.get())
spindle_speed = float(string_g_spindle_speed.get())
coolant = integer_g_cool.get()
tool = int(string_g_tool.get())
if (cad.nz == 1):
cad.zwrite = [cad.zmin]
filename = string_cam_file.get()
file = open(filename, 'wb')
file.write("""(---------------------------------------------------------------)
(---------------------------------------------------------------)
(Start of sheet header)
G21 (metric)
G92 X0 Y0 Z0 (zero all axes)
(End of sheet header)\n""")
dxy = 0
dz = 0
xold = 0
yold = 0
for layer in range(len(cad.zwrite)-1,-1,-1):
zdown = units*cad.zwrite[layer]
#
# follow toolpaths CCW, for CW tool motion
#
unsorted_segments = cad.toolpaths[layer]
sorted_segments = []
if len(unsorted_segments) > 0:
sorted_segments.append(unsorted_segments.pop(0)) #starts with the first path in the list
else:
print "empty path --- strange"
while len(unsorted_segments) > 0:
#find closest start to the the last sorted segment start
min_dist = 99999
min_dist_index = None
for i in range(len(unsorted_segments)):
dist = distance(sorted_segments[-1][0].x, sorted_segments[-1][0].y,
unsorted_segments[i][0].x, unsorted_segments[i][0].y)
if dist < min_dist:
min_dist = dist
min_dist_index = i
#print "min_dist: %d index: %d" % (min_dist, min_dist_index)
sorted_segments.append(unsorted_segments.pop(min_dist_index))
for segment in range(len(sorted_segments)):
x = units*(cad.xmin + (cad.xmax-cad.xmin)*(sorted_segments[segment][0].x+0.5)/float(cad.nx))
y = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-sorted_segments[segment][0].y)+0.5)/float(cad.ny))
file.write("M106 S255 (Pen Up)\n")
file.write("G4 P120\n")
file.write("G1 X%0.4f "%x+"Y%0.4f "%y+" F2000.00\n") # rapid motion
file.write("M107 (Pen Down)\n") # linear motion
file.write("G4 P120\n")
dxy += sqrt((xold-x)**2+(yold-y)**2)
xold = x
yold = y
dz += zup-zdown
for vertex in range(1,len(sorted_segments[segment])):
x = units*(cad.xmin + (cad.xmax-cad.xmin)*(sorted_segments[segment][vertex].x+0.5)/float(cad.nx))
y = units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-sorted_segments[segment][vertex].y)+0.5)/float(cad.ny))
file.write("G1 X%0.4f "%x+"Y%0.4f"%y+" F2000.00\n")
dxy += sqrt((xold-x)**2+(yold-y)**2)
xold = x
yold = y
file.write("""(Start of sheet footer.)
M106 (Pen Up)
G4 P120 (wait 120ms)
G0 X0 Y0 Z15 F3500.00 (go to position for retrieving platform -- increase Z to Z25 or similar if you have trouble avoiding tool)
G4 P300 (wait 300ms)
G0 Z0 F3500.00 (return to start position of current sheet)
G4 P300 (wait 300ms)
M18 (disengage drives)
(End of sheet footer)
M01 (Printing on the next sheet?)
(yes, if dropping the default .1 mm to next sheet; no, if you will print again on same sheet)
G0 Z-0.10 F3500.00 (drop 0.1mm to next sheet)
M107 (Pen Down so as not to overheat solenoid)
(Paste in further sheets below)
(---------------------------------------------------------------)
(---------------------------------------------------------------)
""")
file.close()
print "Path length: %f" % dxy
time = (dxy/feed_rate + dz/feed_rate)
string_send_to_time.set(" estimated time: %.1f minutes"%time)
draw_toolpath()
string_msg.set("wrote %s"%filename)
root.update()
def write_rml():
#
# Roland Modela output
#
units = 1016*cad.inches_per_unit # 40/mm
filename = string_cam_file.get()
file = open(filename, 'wb')
file.write("PA;PA;VS"+string_xy_speed.get()+";!VZ"+string_z_speed.get()+";!MC1;")
zup = cad.zmax
izup = int(units*zup)
if (cad.nz == 1):
cad.zwrite = [cad.zmin]
xy_speed = float(string_xy_speed.get()) # mm/s
z_speed = float(string_z_speed.get()) # mm/s
dxy = 0
dz = 0
xold = 0
yold = 0
for layer in range(len(cad.zwrite)-1,-1,-1):
zdown = cad.zwrite[layer]
izdown = int(units*zdown)
file.write("!PZ"+str(izdown)+","+str(izup)+";")
#
# follow toolpaths CCW, for CW tool motion
#
for segment in range(len(cad.toolpaths[layer])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][0].x+0.5)/float(cad.nx)))
y = int(units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][0].y)+0.5)/float(cad.ny)))
file.write("PU"+str(x)+","+str(y)+";")
dxy += sqrt((xold-x)**2+(yold-y)**2)
xold = x
yold = y
dz += izup-izdown
for vertex in range(1,len(cad.toolpaths[layer][segment])):
x = int(units*(cad.xmin + (cad.xmax-cad.xmin)*(cad.toolpaths[layer][segment][vertex].x+0.5)/float(cad.nx)))
y = int(units*(cad.ymin + (cad.ymax-cad.ymin)*((cad.ny-cad.toolpaths[layer][segment][vertex].y)+0.5)/float(cad.ny)))
file.write("PD"+str(x)+","+str(y)+";")
dxy += sqrt((xold-x)**2+(yold-y)**2)
xold = x
yold = y
file.write("PU"+str(x)+","+str(y)+";!MC0;")
#
# file padding hack for end-of-file buffering problems
#
for i in range(1000):
file.write("!MC0;")
file.close()
time = ((dxy/40.0)/xy_speed + (dz/40.0)/z_speed)/60.0
string_send_to_time.set(" estimated time: %.1f minutes"%time)
draw_toolpath()
string_msg.set("wrote %s"%filename)
root.update()
def rml_move(event):
#
# move Roland Modela
#
units = 1016*cad.inches_per_unit # 40/mm
x = float(string_rml_x_move.get())
y = float(string_rml_y_move.get())
ix = int(units*x)
iy = int(units*y)
filename = "move.rml"
file = open(filename, 'wb')
file.write("PA;PA;!PZ0,400;VS10;!VZ10;!MC0;PU%d,%d;!MC0;"%(ix,iy))
file.close()
send_to_file("move.rml")
os.remove("move.rml")
def write_sbp():
#
# ShopBot output
#
| |
#!/usr/bin/python
row_0 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49]
row_m1 = [24, 26, 31, 32, 38, 42]
row_m2 = [2, 6, 8, 16, 36, 48]
row_m3 = [6, 7, 9, 18, 27, 46]
row_m4 = [7, 9, 11, 18, 29, 46]
w_los4 = [1, 16, 20, 21, 45, 46]
w_los4p = [13, 19, 24, 31, 37, 46]
w_los3 = [9, 15, 26, 31, 47, 49]
w_los3p = [3, 10, 11, 22, 30, 39]
w_los2 = [13, 16, 22, 25, 29, 32]
w_los2p = [2, 8, 18, 20, 25, 37]
w_los1 = [4, 8, 11, 20, 41, 44]
w_los1p = [2, 16, 17, 29, 31, 47]
row_1 = row_m1[:]
row_2 = row_m2[:]
row_3 = row_m3[:]
row_4 = row_m4[:]
for x in [5, 4, 3, 2, 1, 0]:
zx1 = row_m1[x]
zx2 = row_m2[x]
zx3 = row_m3[x]
zx4 = row_m4[x]
if row_1.count(zx1+1) == 0: row_1.insert(x+1, zx1+1) #wazna kolejnosc wstawiania
if row_1.count(zx1-1) == 0: row_1.insert(x, zx1-1)
if row_2.count(zx2+1) == 0: row_2.insert(x+1, zx2+1)
if row_2.count(zx2-1) == 0: row_2.insert(x, zx2-1)
if row_3.count(zx3+1) == 0: row_3.insert(x+1, zx3+1)
if row_3.count(zx3-1) == 0: row_3.insert(x, zx3-1)
if row_4.count(zx4+1) == 0: row_4.insert(x+1, zx4+1)
if row_4.count(zx4-1) == 0: row_4.insert(x, zx4-1)
print row_m1, row_1
print row_m2, row_2
print row_m3, row_3
print row_m4, row_4
row_b_s = 0
row_c_s = 0
row_d_s = 0
row_bcd = [0, 0, 0, 0, 0, 0]
row_abcd = [0, 0, 0, 0, 0, 0]
row_mm1 = [row_m1[0]%10, row_m1[1]%10, row_m1[2]%10, row_m1[3]%10, row_m1[4]%10, row_m1[5]%10]
row_mm2 = [row_m2[0]%10, row_m2[1]%10, row_m2[2]%10, row_m2[3]%10, row_m2[4]%10, row_m2[5]%10]
row_mm3 = [row_m3[0]%10, row_m3[1]%10, row_m3[2]%10, row_m3[3]%10, row_m3[4]%10, row_m3[5]%10]
row_mm4 = [row_m4[0]%10, row_m4[1]%10, row_m4[2]%10, row_m4[3]%10, row_m4[4]%10, row_m4[5]%10]
j_mm_pop = [0, 0, 0, 0, 0, 0]
j_m1_roz = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
j_m2_roz = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
j_m3_roz = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
j_m4_roz = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def dziesiatki(z_row1):
l_dz = [0, 0, 0, 0, 0]
for z_r in z_row1:
if z_r <= 9:
l_dz[0] = l_dz[0] + 1
elif z_r <= 19:
l_dz[1] = l_dz[1] + 1
elif z_r <= 29:
l_dz[2] = l_dz[2] + 1
elif z_r <= 39:
l_dz[3] = l_dz[3] + 1
elif z_r <= 49:
l_dz[4] = l_dz[4] + 1
return l_dz
def jednostki(z_row2):
l_j = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for z_j in z_row2:
if z_j == 0:
l_j[0] = l_j[0] + 1
elif z_j == 1:
l_j[1] = l_j[1] + 1
elif z_j == 2:
l_j[2] = l_j[2] + 1
elif z_j == 3:
l_j[3] = l_j[3] + 1
elif z_j == 4:
l_j[4] = l_j[4] + 1
elif z_j == 5:
l_j[5] = l_j[5] + 1
elif z_j == 6:
l_j[6] = l_j[6] + 1
elif z_j == 7:
l_j[7] = l_j[7] + 1
elif z_j == 8:
l_j[8] = l_j[8] + 1
elif z_j == 9:
l_j[9] = l_j[9] + 1
return l_j
#np dz_m1 = [0, 3, 2, 1, 0]
dz_m1 = dziesiatki(row_m1)
dz_m2 = dziesiatki(row_m2)
dz_m3 = dziesiatki(row_m3)
dz_m4 = dziesiatki(row_m4)
#np j_m1 = [3, 2, 1, 0, 0, 0, 0, 0, 0, 0]
j_m1 = jednostki(row_mm1)
j_m2 = jednostki(row_mm2)
j_m3 = jednostki(row_mm3)
j_m4 = jednostki(row_mm4)
def dziesiatki_suma(lista5, lista6):
lista_56 = [0, 0, 0, 0, 0]
for x56 in [0, 1, 2, 3, 4]:
lista_56[x56] = min(lista5[x56], lista6[x56])
return lista_56
#roznice
def jednostki_suma(lista1, lista2):
lista_wyn = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for x1 in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
lista_wyn[x1] = min(lista1[x1], lista2[x1])
return lista_wyn
def sortuj(lista_s):
lista_sort = [0] *len(lista_s)
listast = lista_s[:]
for xs in range(0,len(listast),1):
lista_sort[xs] = min(listast)
listast.remove(min(listast))
return lista_sort
#ile wyazow z j_lista jest w liscie w_lista (j_lista bierze kazdy wyraz musi byc bez powtorzen)
def trafionych(j_lista, w_lista):
wynik_j = []
for xj in range(0, len(j_lista)):
if w_lista.count(j_lista[xj]) > 0:
wynik_j.insert(len(wynik_j)+1, j_lista[xj])
return wynik_j
#<NAME>
plik = open("ppp-99.txt", "wb")
for par01 in [0]: #[0, 1]:
for par02 in [2]: #[2, 3, 4]:
for par03 in [1]:
for par04 in [6]: #[3, 5]:
for par05 in [3]: #[1, 2, 3]:
for par06 in [6]: #[0, 6]:
for par07 in [8]: #[4, 6, 8]:
for par08 in [2]: #[1, 2]:
for par09 in [0, 1]: #[0, 1]:
# plik = open("ppp-tr" + str(par01) + str(par02) + str(par03) + str(par04) + ".txt", "wb")
print par01, par02, par03, par04, par05, par06, par07, par08, par09
row_all = [row_1, row_2, row_3, row_4]
row_all_plus = [row_1, row_2, row_3, row_4, row_1, row_2, row_3]
all00 = 0
all01 = 0
all02 = 0
all03 = 0
all04 = 0
all05 = 0
all06 = 0
all07 = 0
all08 = 0
all09 = 0
for row_a in row_all:
row_b = row_all_plus[row_all_plus.index(row_a)+1]
row_c = row_all_plus[row_all_plus.index(row_a)+2]
row_d = row_all_plus[row_all_plus.index(row_a)+3]
# print row_all.index(row_a), "row-a", row_a
for a01 in row_a:
# print a01
for a02 in row_a:
if a02 > a01:
for a03 in row_a:
if a03 > a02:
for a04 in row_a:
if a04 > a03:
for a05 in row_0:
if (a05 <> a04) & (a05 <> a03) & (a05 <> a02) & (a05 <> a01):
for a06 in row_0:
if (a06 > a05) & (a06 <> a04) & (a06 <> a03) & (a06 <> a02) & (a06 <> a01):
dz_ax6 = dziesiatki([a01, a02, a03, a04, a05, a06])
all00 = all00 + 1
# | 3 | 2| 1 | 0 | 0|
if (dz_ax6.count(6) == 0) & (dz_ax6.count(5) == 0) & (dz_ax6.count(4) <= 1) & (dz_ax6.count(3) == 0) & (dz_ax6.count(2) <= 1) & (dz_ax6.count(1) <= 4) & (dz_ax6.count(0) <= 3):
all01 = all01 + 1
if ((row_b.count(a01) + row_b.count(a02) + row_b.count(a03) + row_b.count(a04) + row_b.count(a05) + row_b.count(a06)) <= 3) & ((row_c.count(a01) + row_c.count(a02) + row_c.count(a03) + row_c.count(a04) + row_c.count(a05) + row_c.count(a06)) <= 3) & ((row_d.count(a01) + row_d.count(a02) + row_d.count(a03) + row_d.count(a04) + row_d.count(a05) + row_d.count(a06)) <= 3):
#^to wyzej, sprawdza dla b,c,d pozostale trzy listy
row_bcd[0] = row_b.count(a01) + row_c.count(a01) + row_d.count(a01)
row_bcd[1] = row_b.count(a02) + row_c.count(a02) + row_d.count(a02)
row_bcd[2] = row_b.count(a03) + row_c.count(a03) + row_d.count(a03)
row_bcd[3] = row_b.count(a04) + row_c.count(a04) + row_d.count(a04)
row_bcd[4] = row_b.count(a05) + row_c.count(a05) + row_d.count(a05)
row_bcd[5] = row_b.count(a06) + row_c.count(a06) + row_d.count(a06)
# dla row_bcd i row_abcd [ile_a01,a02,a03,a04,a05,a06]
if (min(row_bcd[0:3]) == 0) & (max(row_bcd) >= 2):
all02 = all02 + 1
row_abcd[0] = row_a.count(a01) + row_bcd[0]
row_abcd[1] = row_a.count(a02) + row_bcd[1]
row_abcd[2] = row_a.count(a03) + row_bcd[2]
row_abcd[3] = row_a.count(a04) + row_bcd[3]
row_abcd[4] = row_a.count(a05) + row_bcd[4]
row_abcd[5] = row_a.count(a06) + row_bcd[5]
if (max(row_abcd[0:3]) >= 2) & (min(row_abcd) == 0):
# plik.write("\n" + str([a01, a02, a03, a04, a05, a06]))
all03 = all03 + 1
dz_m1_roz = dziesiatki_suma(dz_ax6, dz_m1)
dz_m2_roz = dziesiatki_suma(dz_ax6, dz_m2)
dz_m3_roz = dziesiatki_suma(dz_ax6, dz_m3)
dz_m4_roz = dziesiatki_suma(dz_ax6, dz_m4)
dz_m1_roz_s = sum(dz_m1_roz)
dz_m2_roz_s = sum(dz_m2_roz)
dz_m3_roz_s = sum(dz_m3_roz)
dz_m4_roz_s = sum(dz_m4_roz)
dz_roz_sum = [dz_m1_roz_s, dz_m2_roz_s, dz_m3_roz_s, dz_m4_roz_s]
if (dz_roz_sum.count(6) <= 1) & (dz_roz_sum.count(5) <= 1) & (dz_roz_sum.count(4) <= 3) & (dz_roz_sum.count(3) <= 3) & (dz_roz_sum.count(2) <= 3) & (dz_roz_sum.count(1) <= 1) & (dz_roz_sum.count(0) <= 1) & (dz_roz_sum.count(0) <= par03) & (max(dz_roz_sum) <= par04):
all04 = all04 + 1
j_mm10 = jednostki([a01%10, a02%10, a03%10, a04%10, a05%10, a06%10])
j_m1_roz = jednostki_suma(j_mm10, j_m1)
j_m2_roz = jednostki_suma(j_mm10, j_m2)
j_m3_roz = jednostki_suma(j_mm10, j_m3)
j_m4_roz = jednostki_suma(j_mm10, j_m4)
j_m1_m2_roz = jednostki_suma(j_m1_roz,j_m2_roz)
j_m1_m3_roz = jednostki_suma(j_m1_roz,j_m3_roz)
j_m1_m4_roz = jednostki_suma(j_m1_roz,j_m4_roz)
j_m2_m3_roz = jednostki_suma(j_m2_roz,j_m3_roz)
j_m2_m4_roz = jednostki_suma(j_m2_roz,j_m4_roz)
j_m3_m4_roz = jednostki_suma(j_m3_roz,j_m4_roz)
j_m1234_roz = [sum(j_m1_m2_roz[:]),sum(j_m1_m3_roz[:]),sum(j_m1_m4_roz[:]),sum(j_m2_m3_roz[:]),sum(j_m2_m4_roz[:]),sum(j_m3_m4_roz[:])]
# .2.1.1.1.1.0.0.0.0.0.
# .roznica. roznic...
#if ((j_mm10.count(2) <= par05) & (j_mm10.count(1) <= par06)) | (j_mm10.count(0) <= par07):
if sum(j_m1234_roz) <= 7:
j_m1_roz_s = sum(j_m1_roz)
j_m2_roz_s = sum(j_m2_roz)
j_m3_roz_s = sum(j_m3_roz)
j_m4_roz_s = sum(j_m4_roz)
j_roz_sum = [j_m1_roz_s, j_m2_roz_s, j_m3_roz_s, j_m4_roz_s]
all05 = all05 + 1
if j_roz_sum.count(6) == 0:
if j_roz_sum.count(5) == par01:
if j_roz_sum.count(4) <= par02:
if j_roz_sum.count(3) <= par02:
if j_roz_sum.count(2) <= par02:
if j_roz_sum.count(1) <= par08:
if j_roz_sum.count(0) == par09:
all06 = all06 + 1
j_mm = sortuj([a01, a02, a03, a04, a05, a06])
j_mm_roz = [j_mm[1] - j_mm[0], j_mm[2] - j_mm[1], j_mm[3] - j_mm[2], j_mm[4] - | |
"""
.. module: historical.tests.test_proxy
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. author:: <NAME> <<EMAIL>>
"""
import json
import math
import os
import sys
import time
from datetime import datetime
import boto3
import pytest # pylint: disable=E0401
from mock import MagicMock # pylint: disable=E0401
from historical.constants import EVENT_TOO_BIG_FLAG
from historical.models import TTL_EXPIRY
from historical.s3.models import VERSION
from historical.tests.factories import CloudwatchEventFactory, DetailFactory, DynamoDBDataFactory, \
DynamoDBRecordFactory, DynamoDBRecordsFactory, RecordsFactory, serialize, SnsDataFactory, SQSDataFactory
S3_BUCKET = {
"arn": "arn:aws:s3:::testbucket1",
"principalId": "<EMAIL>",
"userIdentity": {
"sessionContext": {
"userName": "oUEKDvMsBwpk",
"type": "Role",
"arn": "arn:aws:iam::123456789012:role/historical_poller",
"principalId": "AROAIKELBS2RNWG7KASDF",
"accountId": "123456789012"
},
"principalId": "AROAIKELBS2RNWG7KASDF:<EMAIL>"
},
"accountId": "123456789012",
"eventTime": "2017-09-08T00:34:34Z",
"eventSource": "aws.s3",
"BucketName": "testbucket1",
'version': VERSION,
"Region": "us-east-1",
"Tags": {},
"configuration": {
"Grants": {
"75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a": [
"FULL_CONTROL"
]
},
"Owner": {
"ID": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
},
"LifecycleRules": [
{
"Status": "Enabled",
"Prefix": '',
"Expiration": {
"Days": 7
},
"ID": "Some cleanup"
}
],
"Logging": {},
"Policy": None,
"Versioning": {},
"Website": None,
"Cors": [],
"Notifications": {},
"Acceleration": None,
"Replication": {},
"CreationDate": "2006-02-03T16:45:09Z",
"AnalyticsConfigurations": [],
"MetricsConfigurations": []
}
}
def test_make_blob():
"""Tests that the shrinking SNS/SQS shrinking code works properly."""
from historical.common.proxy import shrink_blob
ttl = int(time.time() + TTL_EXPIRY)
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
new_bucket["ttl"] = ttl
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
shrunken_blob = shrink_blob(data, False)
assert shrunken_blob['userIdentity'] == data['userIdentity']
assert shrunken_blob[EVENT_TOO_BIG_FLAG]
assert shrunken_blob['eventName'] == data['eventName']
assert shrunken_blob['dynamodb']['Keys'] == data['dynamodb']['Keys']
assert not shrunken_blob['dynamodb']['NewImage'].get('configuration')
assert not shrunken_blob['dynamodb']['OldImage'].get('configuration')
def test_detect_global_table_updates():
"""Tests that Global DDB updates don't get proxied over."""
from historical.common.dynamodb import remove_global_dynamo_specific_fields
from historical.common.proxy import detect_global_table_updates
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
old_bucket = dict(new_bucket)
new_bucket['aws:rep:deleting'] = 'something'
new_bucket['aws:rep:updatetime'] = new_bucket['eventTime']
new_bucket['aws:rep:updateregion'] = 'us-east-1'
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=old_bucket),
eventName='MODIFY')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert detect_global_table_updates(data)
# If they are both equal:
old_bucket = new_bucket
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=old_bucket),
eventName='MODIFY')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert detect_global_table_updates(data)
# An actual tangible change:
old_bucket = dict(new_bucket)
old_bucket = remove_global_dynamo_specific_fields(old_bucket)
old_bucket['Region'] = 'us-west-2'
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=old_bucket),
eventName='MODIFY')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert not detect_global_table_updates(data)
# pylint: disable=W0212
def test_make_proper_dynamodb_record():
"""Tests that the Proxy can generate the proper DDB stream events."""
import historical.common.proxy
old_publish_message = historical.common.proxy._publish_sns_message
old_logger = historical.common.proxy.LOG
mock_logger = MagicMock()
historical.common.proxy.LOG = mock_logger
from historical.common.proxy import make_proper_dynamodb_record
# With a small item:
ttl = int(time.time() + TTL_EXPIRY)
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
new_bucket["ttl"] = ttl
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
test_blob = json.dumps(json.loads(make_proper_dynamodb_record(data)), sort_keys=True)
assert test_blob == json.dumps(data, sort_keys=True)
assert not json.loads(test_blob).get(EVENT_TOO_BIG_FLAG)
assert not mock_logger.debug.called
# With a big item...
new_bucket['configuration'] = new_bucket['configuration'].copy()
new_bucket['configuration']['VeryLargeConfigItem'] = 'a' * 262144
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert math.ceil(sys.getsizeof(json.dumps(data)) / 1024) >= 200
test_blob = json.dumps(json.loads(make_proper_dynamodb_record(data)), sort_keys=True)
assert test_blob != json.dumps(data, sort_keys=True)
assert json.loads(test_blob)[EVENT_TOO_BIG_FLAG]
assert not mock_logger.debug.called
# For a deletion event:
deleted_bucket = S3_BUCKET.copy()
deleted_bucket['Tags'] = {}
deleted_bucket['configuration'] = {}
new_bucket['Region'] = 'us-east-1'
ddb_deleted_item = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=deleted_bucket,
Keys={
'arn': deleted_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
deleted_item = DynamoDBRecordsFactory(records=[ddb_deleted_item])
data = json.loads(json.dumps(deleted_item, default=serialize))['Records'][0]
item = json.loads(make_proper_dynamodb_record(data))
assert not item['dynamodb']['OldImage'].get('configuration')
assert not item['dynamodb']['NewImage']['configuration']['M']
# Unmock:
historical.common.proxy._publish_sns_message = old_publish_message
historical.common.proxy.LOG = old_logger
# pylint: disable=R0915
def test_make_proper_simple_record():
"""Tests that the simple durable schema can be generated properly for all event types."""
import historical.common.proxy
old_tech = historical.common.proxy.HISTORICAL_TECHNOLOGY
historical.common.proxy.HISTORICAL_TECHNOLOGY = 's3'
from historical.common.proxy import make_proper_simple_record, _get_durable_pynamo_obj
from historical.s3.models import DurableS3Model
# With a small item:
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
del new_bucket['eventSource']
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
test_blob = json.loads(make_proper_simple_record(data))
assert test_blob['arn'] == new_bucket['arn']
assert test_blob['event_time'] == new_bucket['eventTime']
assert test_blob['tech'] == 's3'
assert not test_blob.get(EVENT_TOO_BIG_FLAG)
assert json.dumps(test_blob['item'], sort_keys=True) == \
json.dumps(dict(_get_durable_pynamo_obj(data['dynamodb']['NewImage'], DurableS3Model)), sort_keys=True)
# With a big item...
new_bucket['configuration'] = new_bucket['configuration'].copy()
new_bucket['configuration']['VeryLargeConfigItem'] = 'a' * 262144
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert math.ceil(sys.getsizeof(json.dumps(data)) / 1024) >= 200
test_blob = json.loads(make_proper_simple_record(data))
assert test_blob['arn'] == new_bucket['arn']
assert test_blob['event_time'] == new_bucket['eventTime']
assert test_blob['tech'] == 's3'
assert test_blob[EVENT_TOO_BIG_FLAG]
assert not test_blob.get('item')
# For a deletion event:
deleted_bucket = S3_BUCKET.copy()
del deleted_bucket['eventSource']
deleted_bucket['Tags'] = {}
deleted_bucket['configuration'] = {}
new_bucket['Region'] = 'us-east-1'
ddb_deleted_item = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=deleted_bucket,
Keys={
'arn': deleted_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
deleted_item = DynamoDBRecordsFactory(records=[ddb_deleted_item])
data = json.loads(json.dumps(deleted_item, default=serialize))['Records'][0]
test_blob = json.loads(make_proper_simple_record(data))
assert test_blob['arn'] == deleted_bucket['arn']
assert test_blob['event_time'] == deleted_bucket['eventTime']
assert test_blob['tech'] == 's3'
assert json.dumps(test_blob['item'], sort_keys=True) == \
json.dumps(dict(_get_durable_pynamo_obj(data['dynamodb']['NewImage'], DurableS3Model)), sort_keys=True)
# For a creation event:
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
del new_bucket['eventSource']
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
}),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
test_blob = json.loads(make_proper_simple_record(data))
assert test_blob['arn'] == new_bucket['arn']
assert test_blob['event_time'] == new_bucket['eventTime']
assert test_blob['tech'] == 's3'
assert not test_blob.get(EVENT_TOO_BIG_FLAG)
assert json.dumps(test_blob['item'], sort_keys=True) == \
json.dumps(dict(_get_durable_pynamo_obj(data['dynamodb']['NewImage'], DurableS3Model)), sort_keys=True)
# Unmock:
historical.common.proxy.HISTORICAL_TECHNOLOGY = old_tech
def test_simple_schema():
"""Tests that the simple durable schema itself."""
import historical.common.proxy
old_tech = historical.common.proxy.HISTORICAL_TECHNOLOGY
historical.common.proxy.HISTORICAL_TECHNOLOGY = 's3'
from historical.common.proxy import make_proper_simple_record
from historical.models import SimpleDurableSchema
# Small object
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
del new_bucket['eventSource']
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
test_blob = make_proper_simple_record(data)
# Test loading from the schema:
sds = SimpleDurableSchema(strict=True)
result = sds.loads(test_blob).data
test_blob = json.loads(test_blob)
assert json.dumps(result, sort_keys=True) == json.dumps(test_blob, sort_keys=True)
assert json.dumps(json.loads(sds.dumps(result).data), sort_keys=True) == json.dumps(test_blob, sort_keys=True)
serialized = sds.serialize_me(test_blob['arn'], test_blob['event_time'], test_blob['tech'], item=test_blob['item'])
assert json.dumps(json.loads(serialized), sort_keys=True) == json.dumps(result, sort_keys=True)
# Big object:
new_bucket['configuration'] = new_bucket['configuration'].copy()
new_bucket['configuration']['VeryLargeConfigItem'] = 'a' * 262144
ddb_record = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
new_item = DynamoDBRecordsFactory(records=[ddb_record])
data = json.loads(json.dumps(new_item, default=serialize))['Records'][0]
assert math.ceil(sys.getsizeof(json.dumps(data)) / 1024) >= 200
test_blob = make_proper_simple_record(data)
result = sds.loads(test_blob).data
test_blob = json.loads(test_blob)
assert json.dumps(result, sort_keys=True) == json.dumps(test_blob, sort_keys=True)
assert json.dumps(json.loads(sds.dumps(result).data), sort_keys=True) == json.dumps(test_blob, sort_keys=True)
serialized = sds.serialize_me(test_blob['arn'], test_blob['event_time'], test_blob['tech'])
assert json.dumps(json.loads(serialized), sort_keys=True) == json.dumps(result, sort_keys=True)
# Unmock:
historical.common.proxy.HISTORICAL_TECHNOLOGY = old_tech
# pylint: disable=R0914,R0915,W0613
def test_proxy_dynamodb_differ(historical_role, current_s3_table, durable_s3_table, mock_lambda_environment,
buckets):
"""This mostly checks that the differ is able to properly load the reduced dataset from the Proxy."""
# Create the item in the current table:
from historical.s3.collector import handler as current_handler
from historical.s3.differ import handler as diff_handler
from historical.s3.models import CurrentS3Model, DurableS3Model
from historical.common.proxy import shrink_blob
# Mock out the loggers:
import historical.common.dynamodb
old_logger = historical.common.dynamodb.LOG
mocked_logger = MagicMock()
historical.common.dynamodb.LOG = mocked_logger
now = datetime.utcnow().replace(tzinfo=None, microsecond=0)
create_event = CloudwatchEventFactory(
detail=DetailFactory(
requestParameters={
"bucketName": "testbucket1"
},
eventSource="aws.s3",
eventName="CreateBucket",
eventTime=now
)
)
data = json.dumps(create_event, default=serialize)
data = RecordsFactory(records=[SQSDataFactory(body=data)])
data = json.dumps(data, default=serialize)
data = json.loads(data)
current_handler(data, mock_lambda_environment)
result = list(CurrentS3Model.query("arn:aws:s3:::testbucket1"))
assert len(result) == 1
# Mock out the DDB Stream for this creation and for an item that is NOT in the current table::
ttl = int(time.time() + TTL_EXPIRY)
new_bucket = S3_BUCKET.copy()
new_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
new_bucket['ttl'] = ttl
ddb_existing_item = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=new_bucket,
Keys={
'arn': new_bucket['arn']
},
OldImage=new_bucket),
eventName='INSERT')
missing_bucket = S3_BUCKET.copy()
missing_bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
missing_bucket['ttl'] = ttl
missing_bucket['BucketName'] = 'notinthecurrenttable'
missing_bucket['arn'] = 'arn:aws:s3:::notinthecurrenttable'
ddb_missing_item = DynamoDBRecordFactory(
dynamodb=DynamoDBDataFactory(
NewImage=missing_bucket,
Keys={
'arn': 'arn:aws:s3:::notinthecurrenttable'
},
OldImage=new_bucket),
eventName='INSERT')
# Get the shrunken blob:
shrunken_existing = json.dumps(shrink_blob(json.loads(json.dumps(ddb_existing_item, default=serialize)), False))
shrunken_missing = json.dumps(shrink_blob(json.loads(json.dumps(ddb_missing_item, default=serialize)), False))
# Also try one without the SNS data factory -- it should still work properly on de-serialization:
records = RecordsFactory(
records=[SQSDataFactory(body=json.dumps(SnsDataFactory(Message=shrunken_existing), default=serialize)),
SQSDataFactory(body=json.dumps(shrunken_missing, default=serialize))]
)
records_event = json.loads(json.dumps(records, default=serialize))
# Run the differ:
diff_handler(records_event, mock_lambda_environment)
# Verify that the existing bucket in the Current table is in the Durable table with the correct configuration:
result = list(DurableS3Model.query("arn:aws:s3:::testbucket1"))
assert len(result) | |
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import math, os, datetime
class data_manipulation_module:
def __init__(self):
self.a = 1
self.list_x = None
self.list_y = None
def init_graph_list(self):
self.list_x = []
self.list_y = []
def add_graph_list(self, element_x, element_y):
self.list_x.append(element_x)
self.list_y.append(element_y)
# 단순히 배열의 길이를 늘리기만 한다.
# 나머지 부분은 0으로 채운다.
def data_stretched_no_modify(self, data :np.ndarray, target_length :int):
self.a = 1
if data.size < target_length:
print("sizes are wrong")
return -1
ret = np.zeros(target_length)
ret[:data.size] = data
return ret
# np.interp 와 비슷한 연산을 한다.
# interp 와 다르게, origin_axis 범위 밖의 모든 부분들을 0으로 채운다.
# interp 는 낮은 부분들만 0으로 채운다.
# target_axis
# - y 값을 구할 x 축 좌표들이다.
# - 순서에 상관이 없다
# origin_axis
# - 기존의 x 축 좌표들이다.
# - x[i] <= x[j] for all i <= j
# data
# - 기존의 y 축 좌표들이다.
# - origin_axis 와 크기가 같아야 한다.
def data_interp_zeros(self, target_axis :np.ndarray, origin_axis :np.ndarray, data :np.ndarray):
self.a = 1
# if data.size is not origin_axis.size:
# print("DataManipulation__data_interp_zeros : origin data sizes are wrong %d %d" % (data.size, origin_axis.size))
return np.interp(target_axis, origin_axis, data) * ((origin_axis[0] <= target_axis) & (target_axis <= origin_axis[-1]))
# 측정 데이터의 시간 영역과 주파수 영역의 x 축 좌표들의 배열을 구한다.
# 시간 영역
# - N or n : Nano seconds
# - U or u : Micro seconds
# - M or m : Mili
# 주파수 영역
# - G or g : Giga
# - M or m : Mega
# - K or k : Kilo
def get_sample_spacing(self, samples_per_second :int, size :int, unit_output_time :str, unit_output_freq :str):
self.a = 1
if unit_output_time[0] == 'N' or unit_output_time[0] == 'n':
u_output_time = 1e9
elif unit_output_time[0] == 'U' or unit_output_time[0] == 'u':
u_output_time = 1e6
elif unit_output_time[0] == 'M' or unit_output_time[0] == 'm':
u_output_time = 1e3
else:
u_output_time = 1
if unit_output_freq[0] == 'G' or unit_output_freq[0] == 'g':
u_output_freq = 1e-9
elif unit_output_freq[0] == 'M' or unit_output_freq[0] == 'm':
u_output_freq = 1e-6
elif unit_output_freq[0] == 'K' or unit_output_freq[0] == 'u':
u_output_freq = 1e-3
else:
u_output_freq = 1
ret_time = np.arange(size) * u_output_time / samples_per_second
ret_freq = np.arange(size) * u_output_freq * samples_per_second / size
return ret_time, ret_freq
# 신호 데이터의 시간 영역 혹은 주파수 영역의 x축 단위를 샘플의 개수를 유지하면서 변환한다.
# 시간영역의 단위가 변하면 주파수 영역도 그에 따라서 변하도록 한다.
# 주파수 영역의 단위가 바뀌면 시간 영역의 단위도 그에 따라서 바뀐다.
#
# time_x : 변환 전 시간 영역의 x 좌표
# freq_x : 변환 전 주파수 영역의 x 좌표
# delta_before : 변환 전 단위의 크기(ex: 10MHz 에서 10)
# delta_after : 변환 후 단위의 크기
# unit_before : 변환 전 단위(ex: 10MHz 에서 MHz, 8.2ms 에서 ms), unit_after 와 시간 or 주파수가 일치해야됨
# unit_after : 변환 후 단위 unit_before 와 시간 or 주파수가 일치해야됨
def get_new_sample_spacing(self, time_x :np.ndarray, freq_x :np.ndarray, delta_before :float, delta_after :float, unit_before :str, unit_after :str):
if unit_before[0] == 'H' or unit_before[0] == 'h' or unit_before[1] == 'H' or unit_before[1] == 'h':
mode_is_freq = True
elif unit_before[0] == 'S' or unit_before[0] == 'S' or unit_before[1] == 'S' or unit_before[1] == 's':
mode_is_freq = False
else:
print("unit_before is wrong")
return None
if (unit_after[0] == 'H' or unit_after[0] == 'h' or unit_after[1] == 'H' or unit_after[1] == 'h') and (mode_is_freq is False) is True:
print("Input : time, Output : freq -> Wrong")
return None
elif (unit_after[0] == 'S' or unit_after[0] == 'S' or unit_after[1] == 'S' or unit_after[1] == 's') and (mode_is_freq is True) is True:
print("Input : freq, Output : time -> Wrong")
return None
if mode_is_freq:
if unit_before[0] == 'G' or unit_before[0] == 'g':
c = 1000
elif unit_before[0] == 'M' or unit_before[0] == 'm':
c = 1
elif unit_before[0] == 'K' or unit_before[0] == 'k':
c = 0.001
elif unit_before[0] == 'H' or unit_before[0] == 'h':
c = 0.000001
else:
print("Unit of frequency is too small")
return None
if unit_after[0] == 'G' or unit_after[0] == 'g':
d = 0.001
elif unit_after[0] == 'M' or unit_after[0] == 'm':
d = 1
elif unit_after[0] == 'K' or unit_after[0] == 'k':
d = 1000
elif unit_after[0] == 'H' or unit_after[0] == 'h':
d = 1000000
else:
print("Unit of frequency is too small")
return None
ret_freq = freq_x * c * d * delta_after / delta_before
ret_time = time_x * delta_before / (c * d * delta_after)
else:
if unit_before[0] == 'P' or unit_before[0] == 'p':
c = 0.000001
elif unit_before[0] == 'N' or unit_before[0] == 'n':
c = 0.0001
elif unit_before[0] == 'U' or unit_before[0] == 'u':
c = 1
elif unit_before[0] == 'M' or unit_before[0] == 'm':
c = 1000
elif unit_before[0] == 'S' or unit_before[0] == 's':
c = 1000000
else:
print("Unit of time is too large")
return None
if unit_before[0] == 'P' or unit_before[0] == 'p':
d = 1000000
elif unit_before[0] == 'N' or unit_before[0] == 'n':
d = 1000
elif unit_before[0] == 'U' or unit_before[0] == 'u':
d = 1
elif unit_before[0] == 'M' or unit_before[0] == 'm':
d = 0.001
elif unit_before[0] == 'S' or unit_before[0] == 's':
d = 0.000001
else:
print("Unit of time is too large")
return None
ret_time = time_x * c * d * delta_after / delta_before
ret_freq = freq_x * delta_before / (c * d * delta_after)
return ret_time, ret_freq
def _resizing(self, x_t, y_t):
self.a = 1
x_size = x_t.size
y_size = y_t.size
if x_size > y_size:
z = np.zeros(x_size)
z[:y_size] = y_t
return x_t, z
elif x_size < y_size:
z = np.zeros(y_size)
z[:x_size] = x_t
return z, y_t
else:
return x_t, y_t
def _return_mode(self, data, mode: str=None):
self.a = 1
if mode is None:
return data
elif mode is "complex" or mode is "cpx":
return np.real(data), np.imag(data)
elif mode[:4] is "real":
return np.real(data)
elif mode[:4] is "imag":
return np.imag(data)
elif mode[:3] is "abs" or mode[:3] is "Abs":
return np.abs(data)
else:
return data
def convert_deconvolution(self, x_t, y_t, any_value, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
x_f[0] = 1
h_f = y_f / x_f
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
def convert_selective_divide(self, x_t, y_t, threshold, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
sizes = len(x_f)
h_f = np.zeros(sizes)
for i in range(sizes):
if np.abs(x_f[i]) >= threshold:
h_f[i] = y_f[i]/x_f[i]
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
def convert_wiener_convolution(self, x_t, y_t, snr_dB, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
x_f = np.fft.fft(x_t)
y_f = np.fft.fft(y_t)
snr = math.pow(10, snr_dB/10)
g_f = np.conj(x_f) / (np.square(np.absolute(x_f)) + 1 / snr)
h_f = y_f * g_f
h_t = np.fft.ifft(h_f)
return self._return_mode(h_t, output_mode)
# y_t 가 고정된다
# cor[0] = x_t[-1]*y_t[0] 이다.
# x_t 의 오른쪽 끝부분부터 y_t 의 선두 부분이랑 접촉을 시작한다.
# x_t 의 시작부분과 y_t 의 시작부분이 만나는 지점부터의 데이터가 의미가 있다.
def convert_cross_correlation(self, x_t, y_t, output_mode: str=None):
x_t, y_t = self._resizing(x_t, y_t)
length = x_t.size
h_cor = np.correlate(y_t, x_t, 'full')
h_t = h_cor[length-1:]
return self._return_mode(h_t, output_mode)
def convert_filter_lpf_f(self, x_t, ff, output_mode: str=None):
x_f = np.fft.fft(x_t)
for i in range(ff, x_t.size):
x_f[i] = 0
x_t = np.fft.ifft(x_f)
return self._return_mode(x_t, output_mode)
def convert_filter_lpf_t(self, x_t, ff, output_mode: str=None):
w0 = 2 * pi * ff
fs = 1
mothers = w0+2
y_t = np.zeros(x_t.size)
y_t[0] = 2*x_t[0]/mothers
for i in range(1, x_t.size):
y_t[i] = 2/mothers*x_t[i] - 2/mothers*x_t[i-1] - (w0-2)/mothers*y_t[i-1]
return self._return_mode(y_t, output_mode)
# arr_x 는 arr_y 와 차원이 같아야 한다.
# arr_x 는 3차원 리스트이다
# (row, col, data)
def graphing_1D(self, arr_x=None, arr_y=None, isDot :bool=False, isCpx :bool=False):
if arr_x is None:
arr_x = self.list_x
if arr_y is None:
arr_y = self.list_y
if arr_x is None or arr_y is None:
print("list_x and list_y should be filled")
return None
if len(arr_x) is not len(arr_y):
print("size of row is different")
return None
if len(arr_x[0]) is not len(arr_y[0]):
print("size of col is different")
return None
size_row = len(arr_x)
size_col = len(arr_x[0])
fig = | |
<gh_stars>0
#!/usr/bin/env python2
import sys
import os
import pickle
import errno
import random
class RenPyArchive:
file = None
handle = None
files = {}
indexes = {}
version = None
padlength = 0
key = None
verbose = False
RPA2_MAGIC = 'RPA-2.0 '
RPA3_MAGIC = 'RPA-3.0 '
def __init__(self, file = None, version = 3, padlength = 0, key = 0xDEADBEEF, verbose = False):
self.padlength = padlength
self.key = key
self.verbose = verbose
if file is not None:
self.load(file)
else:
self.version = version
def __del__(self):
if self.handle is not None:
self.handle.close()
# Determine archive version.
def get_version(self):
self.handle.seek(0)
magic = self.handle.readline().decode('utf-8')
if magic.startswith(self.RPA3_MAGIC):
return 3
elif magic.startswith(self.RPA2_MAGIC):
return 2
elif self.file.endswith('.rpi'):
return 1
raise ValueError('the given file is not a valid Ren\'Py archive, or an unsupported version')
# Extract file indexes from opened archive.
def extract_indexes(self):
self.handle.seek(0)
indexes = None
if self.version == 2 or self.version == 3:
# Fetch metadata.
metadata = self.handle.readline()
offset = int(metadata[8:24], 16)
if self.version == 3:
self.key = 0
for subkey in metadata[25:].split():
self.key ^= int(subkey, 16)
# Load in indexes.
self.handle.seek(offset)
indexes = pickle.loads(self.handle.read().decode('zlib'))
# Deobfuscate indexes.
if self.version == 3:
obfuscated_indexes = indexes
indexes = {}
for i in obfuscated_indexes.keys():
if len(obfuscated_indexes[i][0]) == 2:
indexes[i] = [ (offset ^ self.key, length ^ self.key) for offset, length in obfuscated_indexes[i] ]
else:
indexes[i] = [ (offset ^ self.key, length ^ self.key, prefix) for offset, length, prefix in obfuscated_indexes[i] ]
else:
indexes = pickle.loads(self.handle.read().decode('zlib'))
return indexes
# Generate pseudorandom padding (for whatever reason).
def generate_padding(self):
length = random.randint(1, self.padlength)
padding = ''
while length > 0:
padding += chr(random.randint(1, 255))
length -= 1
return padding
# Converts a filename to archive format.
def convert_filename(self, filename):
(drive, filename) = os.path.splitdrive(os.path.normpath(filename).replace(os.sep, '/'))
return filename
# Debug (verbose) messages.
def verbose_print(self, message):
if self.verbose:
print(message)
# List files in archive and current internal storage.
def list(self):
return self.indexes.keys() + self.files.keys()
# Check if a file exists in the archive.
def has_file(self, filename):
return filename in self.indexes.keys() or filename in self.files.keys()
# Read file from archive or internal storage.
def read(self, filename):
filename = self.convert_filename(filename)
# Check if the file exists in our indexes.
if filename not in self.files and filename not in self.indexes:
raise IOError(errno.ENOENT, 'the requested file {0} does not exist in the given Ren\'Py archive'.format(filename))
# If it's in our opened archive index, and our archive handle isn't valid, something is obviously wrong.
if filename not in self.files and filename in self.indexes and self.handle is None:
raise IOError(errno.ENOENT, 'the requested file {0} does not exist in the given Ren\'Py archive'.format(filename))
# Check our simplified internal indexes first, in case someone wants to read a file they added before without saving, for some unholy reason.
if filename in self.files:
self.verbose_print('Reading file {0} from internal storage...'.format(filename.encode('utf-8')))
return self.files[filename]
# We need to read the file from our open archive.
else:
# Read offset and length, seek to the offset and read the file contents.
if len(self.indexes[filename][0]) == 3:
(offset, length, prefix) = self.indexes[filename][0]
else:
(offset, length) = self.indexes[filename][0]
prefix = ''
self.verbose_print('Reading file {0} from data file {1}... (offset = {2}, length = {3} bytes)'.format(filename.encode('utf-8'), self.file, offset, length))
self.handle.seek(offset)
return prefix + self.handle.read(length - len(prefix))
# Modify a file in archive or internal storage.
def change(self, filename, contents):
# Our 'change' is basically removing the file from our indexes first, and then re-adding it.
self.remove(filename)
self.add(filename, contents)
# Add a file to the internal storage.
def add(self, filename, contents):
filename = unicode(self.convert_filename(filename), 'utf-8')
if filename in self.files or filename in self.indexes:
raise ValueError('file {0} already exists in archive'.format(filename))
self.verbose_print('Adding file {0} to archive... (length = {1} bytes)'.format(filename.encode('utf-8'), len(contents)))
self.files[filename] = contents
# Remove a file from archive or internal storage.
def remove(self, filename):
filename = unicode(self.convert_filename(filename), 'utf-8')
if filename in self.files:
self.verbose_print('Removing file {0} from internal storage...'.format(filename.encode('utf-8')))
del self.files[filename]
elif filename in self.indexes:
self.verbose_print('Removing file {0} from archive indexes...'.format(filename.encode('utf-8')))
del self.indexes[filename]
else:
raise IOError(errno.ENOENT, 'the requested file {0} does not exist in this archive'.format(filename.encode('utf-8')))
# Load archive.
def load(self, filename):
if self.handle is not None:
self.handle.close()
self.file = filename
self.files = {}
self.handle = open(self.file, 'rb')
self.version = self.get_version()
self.indexes = self.extract_indexes()
# Save current state into a new file, merging archive and internal storage, rebuilding indexes, and optionally saving in another format version.
def save(self, filename = None):
if filename is None:
filename = self.file
if filename is None:
raise ValueError('no target file found for saving archive')
if self.version != 2 and self.version != 3:
raise ValueError('saving is only supported for version 2 and 3 archives')
self.verbose_print('Rebuilding archive index...')
# Fill our own files structure with the files added or changed in this session.
files = self.files
# First, read files from the current archive into our files structure.
for file in self.indexes.keys():
content = self.read(file)
# Remove from indexes array once read, add to our own array.
del self.indexes[file]
files[file] = content
# Predict header length, we'll write that one last.
offset = 0
if version == 3:
offset = 34
elif version == 2:
offset = 25
archive = open(filename, 'wb')
archive.seek(offset)
# Build our own indexes while writing files to the archive.
indexes = {}
self.verbose_print('Writing files to archive file...')
for file, content in files.items():
# Generate random padding, for whatever reason.
if self.padlength > 0:
padding = self.generate_padding()
archive.write(padding)
offset += len(padding)
archive.write(content)
# Update index.
if self.version == 3:
indexes[file] = [ (offset ^ self.key, len(content) ^ self.key) ]
elif self.version == 2:
indexes[file] = [ (offset, len(content)) ]
offset += len(content)
# Write the indexes.
self.verbose_print('Writing archive index to archive file...')
archive.write(pickle.dumps(indexes, pickle.HIGHEST_PROTOCOL).encode('zlib'))
# Now write the header.
self.verbose_print('Writing header to archive file... (version = RPAv{0})'.format(self.version))
archive.seek(0)
if self.version == 3:
archive.write('RPA-3.0 %016x %08x\n' % (offset, self.key))
else:
archive.write('RPA-2.0 %016x\n' % (offset))
# We're done, close it.
archive.close()
# Reload the file in our inner database.
self.load(filename)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='A tool for working with Ren\'Py archive files.',
epilog='The FILE argument can optionally be in ARCHIVE=REAL format, mapping a file in the archive file system to a file on your real file system. An example of this: rpatool -x test.rpa script.rpyc=/home/foo/test.rpyc',
add_help=False)
parser.add_argument('archive', metavar='ARCHIVE', help='The Ren\'py archive file to operate on.')
parser.add_argument('files', metavar='FILE', nargs='*', action='append', help='Zero or more files to operate on.')
parser.add_argument('-l', '--list', action='store_true', help='List files in archive ARCHIVE.')
parser.add_argument('-x', '--extract', action='store_true', help='Extract FILEs from ARCHIVE.')
parser.add_argument('-c', '--create', action='store_true', help='Creative ARCHIVE from FILEs.')
parser.add_argument('-d', '--delete', action='store_true', help='Delete FILEs from ARCHIVE.')
parser.add_argument('-a', '--append', action='store_true', help='Append FILEs to ARCHIVE.')
parser.add_argument('-2', '--two', action='store_true', help='Use the RPAv2 format for creating/appending to archives.')
parser.add_argument('-3', '--three', action='store_true', help='Use the RPAv3 format for creating/appending to archives (default).')
parser.add_argument('-k', '--key', metavar='KEY', help='The obfuscation key used for creating RPAv3 archives, in hexadecimal (default: 0xDEADBEEF).')
parser.add_argument('-p', '--padding', metavar='COUNT', help='The maximum number of bytes of padding to add between files (default: 0).')
parser.add_argument('-o', '--outfile', help='An alternative output archive file when appending to or deleting from archives, or output directory when extracting.')
parser.add_argument('-h', '--help', action='help', help='Print this help and exit.')
parser.add_argument('-v', '--verbose', action='store_true', help='Be a bit more verbose while performing operations.')
parser.add_argument('-V', '--version', action='version', version='rpatool v0.8', help='Show version information.')
arguments = parser.parse_args()
# Determine RPA version.
if arguments.two:
version = 2
else:
version = 3
# Determine RPAv3 key.
if 'key' in arguments and arguments.key is not None:
key = int(arguments.key, 16)
else:
key = 0xDEADBEEF
# Determine padding bytes.
if 'padding' in arguments and arguments.padding is not None:
padding = int(arguments.padding)
else:
padding = 0
# Determine output file/directory and input archive
if arguments.create:
archive = None
output | |
fnodes = global_mesh.nodes[ends[0]].get('far')
dmask = mask + 0
did = 0
while True:
did += 1
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
if did > 3:
break
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
if len(ffnode) == 0:
continue
fpath.append((fnode[0], fnode[1]))
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc in new_locs:
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < fpath_map.shape[0] and xx[1] >= 0 and xx[1] < fpath_map.shape[1]]
if np.all([(fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True:
break
if npath_map[new_loc[0], new_loc[1]] != -1:
if npath_map[new_loc[0], new_loc[1]] != edge_id:
break_flag = True
break
else:
continue
if valid_map[new_loc[0], new_loc[1]] == 0:
break_flag = True
break
fpath.append(new_loc)
if break_flag is True:
break
if step != len(npath) - 1:
for xx in npath[step:]:
if npath_map[xx[0], xx[1]] == edge_id:
npath_map[xx[0], xx[1]] = -1
npath = npath[:step]
if len(fpath) > 0:
for fp_node in fpath:
fpath_map[fp_node[0], fp_node[1]] = edge_id
fpaths[edge_id] = fpath
npaths[edge_id] = npath
fpath_map[valid_near_edge != 0] = -1
if len(fpath) > 0:
iter_fpath = copy.deepcopy(fpaths[edge_id])
for node in iter_fpath:
if valid_near_edge[node[0], node[1]] != 0:
fpaths[edge_id].remove(node)
return fpath_map, npath_map, False, npaths, fpaths, invalid_edge_ids
def plan_path_e2e(mesh, cc, end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None):
my_npath_map = np.zeros_like(input_edge) - 1
my_fpath_map = np.zeros_like(input_edge) - 1
sub_mesh = mesh.subgraph(list(cc)).copy()
ends_1, ends_2 = end_pts[0], end_pts[1]
edge_id = global_mesh.nodes[ends_1]['edge_id']
npath = [*netx.shortest_path(sub_mesh, (ends_1[0], ends_1[1]), (ends_2[0], ends_2[1]), weight='length')]
for np_node in npath:
my_npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends_1].get('far') is None:
print("None far")
else:
fnodes = global_mesh.nodes[ends_1].get('far')
dmask = mask + 0
while True:
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
e_fnodes = global_mesh.nodes[ends_2].get('far')
dmask = mask + 0
while True:
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
e_ffnode = [e_fnode for e_fnode in e_fnodes if (dmask[e_fnode[0], e_fnode[1]] > 0 and mask[e_fnode[0], e_fnode[1]] == 0 and\
global_mesh.nodes[e_fnode].get('inpaint_id') != inpaint_id + 1)]
if len(e_ffnode) > 0:
e_fnode = e_ffnode[0]
break
fpath.append((fnode[0], fnode[1]))
if len(e_ffnode) == 0 or len(ffnode) == 0:
return my_npath_map, my_fpath_map, [], []
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc in new_locs:
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < my_fpath_map.shape[0] and xx[1] >= 0 and xx[1] < my_fpath_map.shape[1]]
if fpath_map is not None and np.sum([fpath_map[nlne[0], nlne[1]] for nlne in new_loc_nes]) != 0:
break_flag = True
break
if my_npath_map[new_loc[0], new_loc[1]] != -1:
continue
if npath_map is not None and npath_map[new_loc[0], new_loc[1]] != edge_id:
break_flag = True
break
fpath.append(new_loc)
if break_flag is True:
break
if (e_fnode[0], e_fnode[1]) not in fpath:
fpath.append((e_fnode[0], e_fnode[1]))
if step != len(npath) - 1:
for xx in npath[step:]:
if my_npath_map[xx[0], xx[1]] == edge_id:
my_npath_map[xx[0], xx[1]] = -1
npath = npath[:step]
if len(fpath) > 0:
for fp_node in fpath:
my_fpath_map[fp_node[0], fp_node[1]] = edge_id
return my_fpath_map, my_npath_map, npath, fpath
def plan_path(mesh, info_on_pix, cc, end_pt, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=None):
my_npath_map = np.zeros_like(input_edge) - 1
my_fpath_map = np.zeros_like(input_edge) - 1
sub_mesh = mesh.subgraph(list(cc)).copy()
pnodes = netx.periphery(sub_mesh)
ends = [*end_pt]
edge_id = global_mesh.nodes[ends[0]]['edge_id']
pnodes = sorted(pnodes,
key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])),
reverse=True)[0]
if npath is None:
npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')]
else:
if (ends[0][0], ends[0][1]) == npath[0]:
npath = npath
elif (ends[0][0], ends[0][1]) == npath[-1]:
npath = npath[::-1]
else:
import pdb; pdb.set_trace()
for np_node in npath:
my_npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends[0]].get('far') is None:
print("None far")
else:
fnodes = global_mesh.nodes[ends[0]].get('far')
dmask = mask + 0
did = 0
while True:
did += 1
if did > 3:
return my_fpath_map, my_npath_map, -1
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
fpath.append((fnode[0], fnode[1]))
disp_diff = 0.
for n_loc in npath:
if mask[n_loc[0], n_loc[1]] != 0:
disp_diff = abs(abs(1. / info_on_pix[(n_loc[0], n_loc[1])][0]['depth']) - abs(1. / ends[0][2]))
break
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc | |
<reponame>m1griffin/arrayfunc
#!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Purpose: Generate the C code for math operations.
# Language: Python 3.4
# Date: 30-Dec-2017
#
###############################################################################
#
# Copyright 2014 - 2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# ==============================================================================
import itertools
import codegen_common
# ==============================================================================
mathops_head = """//------------------------------------------------------------------------------
// Project: arrayfunc
// Module: %(funclabel)s.c
// Purpose: Calculate the %(funclabel)s of values in an array.
// Language: C
// Date: 15-Nov-2017.
//
//------------------------------------------------------------------------------
//
// Copyright 2014 - 2020 <NAME> <<EMAIL>>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------
/*--------------------------------------------------------------------------- */
// This must be defined before "Python.h" in order for the pointers in the
// argument parsing functions to work properly.
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include <limits.h>
#include <math.h>
#include "arrayerrs.h"
#include "arrayparams_base.h"
#include "arrayparams_comp.h"
#include "simddefs.h"
#ifdef AF_HASSIMD_X86
#include "%(funclabel)s_simd_x86.h"
#endif
#if defined(AF_HASSIMD_ARMv7_32BIT) || defined(AF_HASSIMD_ARM_AARCH64)
#include "arm_neon.h"
#endif
#if defined(AF_HASSIMD_ARMv7_32BIT)
#include "%(funclabel)s_simd_armv7.h"
#endif
#if defined(AF_HASSIMD_ARM_AARCH64)
#include "%(funclabel)s_simd_armv8.h"
#endif
/*--------------------------------------------------------------------------- */
"""
# ==============================================================================
# The actual compare operations (non-SIMD).
ops_comp = """
/*--------------------------------------------------------------------------- */
/* The following series of functions reflect the different parameter options possible.
arraylen = The length of the data arrays.
data1 = The first data array.
data2 = The second data array.
param = The parameter to be applied to each array element.
*/
// param_arr_num
char %(funclabel)s_%(funcmodifier)s_1(Py_ssize_t arraylen, int nosimd, %(arraytype)s *data1, %(arraytype)s param) {
// array index counter.
Py_ssize_t x;
%(simd_call_1)s
for (x = 0; x < arraylen; x++) {
if (!(data1[x] %(copname)s param)) { return 0; }
}
return 1;
}
// param_num_arr
char %(funclabel)s_%(funcmodifier)s_3(Py_ssize_t arraylen, int nosimd, %(arraytype)s param, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t x;
%(simd_call_3)s
for (x = 0; x < arraylen; x++) {
if (!(param %(copname)s data2[x])) { return 0; }
}
return 1;
}
// param_arr_arr
char %(funclabel)s_%(funcmodifier)s_5(Py_ssize_t arraylen, int nosimd, %(arraytype)s *data1, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t x;
%(simd_call_5)s
for (x = 0; x < arraylen; x++) {
if (!(data1[x] %(copname)s data2[x])) { return 0; }
}
return 1;
}
"""
# ==============================================================================
# ==============================================================================
# The actual compare operations using SIMD operations for x86-64.
ops_simdsupport_x86 = """
/*--------------------------------------------------------------------------- */
/* The following series of functions reflect the different parameter options possible.
arraylen = The length of the data arrays.
data1 = The first data array.
data2 = The second data array.
param = The parameter to be applied to each array element.
*/
// param_arr_num for array code: %(arraycode)s
#if defined(AF_HASSIMD_X86)
char %(funclabel)s_%(funcmodifier)s_1_simd(Py_ssize_t arraylen, %(arraytype)s *data1, %(arraytype)s param) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
unsigned int y;
%(simdattr)s datasliceleft, datasliceright;
%(simdattr)s resultslice%(SIMD_x86_compslice)s;
%(arraytype)s compvals[%(simdwidth)s];
// Initialise the comparison values.
for (y = 0; y < %(simdwidth)s; y++) {
compvals[y] = param;
}
datasliceright = %(vldinstr)s compvals);
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
// On x86 we have to do this in a round-about fashion for some
// types of comparison operations due to how SIMD works on that
// platform.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceleft = %(vldinstr)s &data1[index]);
%(SIMD_x86_arr_num)s
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < arraylen; index++) {
if (!(data1[index] %(compare_ops)s param)) {
return 0;
}
}
return 1;
}
// param_num_arr
char %(funclabel)s_%(funcmodifier)s_3_simd(Py_ssize_t arraylen, %(arraytype)s param, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
unsigned int y;
%(simdattr)s datasliceleft, datasliceright;
%(simdattr)s resultslice%(SIMD_x86_compslice)s;
%(arraytype)s compvals[%(simdwidth)s];
// Initialise the comparison values.
for (y = 0; y < %(simdwidth)s; y++) {
compvals[y] = param;
}
datasliceleft = %(vldinstr)s compvals);
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
// On x86 we have to do this in a round-about fashion for some
// types of comparison operations due to how SIMD works on that
// platform.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceright = %(vldinstr)s &data2[index]);
%(SIMD_x86_num_arr)s
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < arraylen; index++) {
if (!(param %(compare_ops)s data2[index])) {
return 0;
}
}
return 1;
}
// param_arr_arr
char %(funclabel)s_%(funcmodifier)s_5_simd(Py_ssize_t arraylen, %(arraytype)s *data1, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
%(simdattr)s datasliceleft, datasliceright;
%(simdattr)s resultslice%(SIMD_x86_compslice)s;
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
// On x86 we have to do this in a round-about fashion for some
// types of comparison operations due to how SIMD works on that
// platform.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceleft = %(vldinstr)s &data1[index]);
datasliceright = %(vldinstr)s &data2[index]);
%(SIMD_x86_arr_arr)s
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < arraylen; index++) {
if (!(data1[index] %(compare_ops)s data2[index])) {
return 0;
}
}
return 1;
}
#endif
/*--------------------------------------------------------------------------- */
"""
# The actual compare operations using SIMD operations for ARMv7 NEON 32 bit.
ops_simdsupport_armv7 = """
/*--------------------------------------------------------------------------- */
/* ARMv7 32 bit SIMD.
The following series of functions reflect the different parameter options possible.
arraylen = The length of the data arrays.
data1 = The first data array.
data2 = The second data array.
param = The parameter to be applied to each array element.
*/
// param_arr_num
#if defined(AF_HASSIMD_ARMv7_32BIT)
char %(funclabel)s_%(funcmodifier)s_1_simd(Py_ssize_t arraylen, %(arraytype)s *data1, %(arraytype)s param) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
unsigned int y;
%(simdattr)s datasliceleft, datasliceright;
%(simdrsltattr)s resultslice;
%(arraytype)s compvals[%(simdwidth)s];
// Initialise the comparison values.
for (y = 0; y < %(simdwidth)s; y++) {
compvals[y] = param;
}
datasliceright = %(vldinstr)s( compvals);
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceleft = %(vldinstr)s( &data1[index]);
// The actual SIMD operation.
resultslice = %(SIMD_ARM_comp)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(%(vresult)s)) {
return 0;
}
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < arraylen; index++) {
if (!(data1[index] %(compare_ops)s param)) {
return 0;
}
}
return 1;
}
// param_num_arr
char %(funclabel)s_%(funcmodifier)s_3_simd(Py_ssize_t arraylen, %(arraytype)s param, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
unsigned int y;
%(simdattr)s datasliceleft, datasliceright;
%(simdrsltattr)s resultslice;
%(arraytype)s compvals[%(simdwidth)s];
// Initialise the comparison values.
for (y = 0; y < %(simdwidth)s; y++) {
compvals[y] = param;
}
datasliceleft = %(vldinstr)s( compvals);
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceright = %(vldinstr)s( &data2[index]);
// The actual SIMD operation.
resultslice = %(SIMD_ARM_comp)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(%(vresult)s)) {
return 0;
}
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < arraylen; index++) {
if (!(param %(compare_ops)s data2[index])) {
return 0;
}
}
return 1;
}
// param_arr_arr
char %(funclabel)s_%(funcmodifier)s_5_simd(Py_ssize_t arraylen, %(arraytype)s *data1, %(arraytype)s *data2) {
// array index counter.
Py_ssize_t index;
// SIMD related variables.
Py_ssize_t alignedlength;
%(simdattr)s datasliceleft, datasliceright;
%(simdrsltattr)s resultslice;
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = arraylen - (arraylen %% %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (index = 0; index < alignedlength; index += %(simdwidth)s) {
datasliceleft = %(vldinstr)s( &data1[index]);
datasliceright = %(vldinstr)s( &data2[index]);
// The actual SIMD operation.
resultslice = %(SIMD_ARM_comp)s(datasliceleft, datasliceright);
// Compare the results of the SIMD operation.
if (!(%(vresult)s)) {
return 0;
}
}
// Get the max value within the left over elements at the end of the array.
for (index = alignedlength; index < | |
4 , int , 0 , 255 )
PlotBg = ThemeElementTData( 1 , 0 , 6 , 4 , int , 0 , 255 )
PlotBorder = ThemeElementTData( 1 , 0 , 7 , 4 , int , 0 , 255 )
LegendBg = ThemeElementTData( 1 , 0 , 8 , 4 , int , 0 , 255 )
LegendBorder = ThemeElementTData( 1 , 0 , 9 , 4 , int , 0 , 255 )
LegendText = ThemeElementTData( 1 , 0 , 10 , 4 , int , 0 , 255 )
TitleText = ThemeElementTData( 1 , 0 , 11 , 4 , int , 0 , 255 )
InlayText = ThemeElementTData( 1 , 0 , 12 , 4 , int , 0 , 255 )
XAxis = ThemeElementTData( 1 , 0 , 13 , 4 , int , 0 , 255 )
XAxisGrid = ThemeElementTData( 1 , 0 , 14 , 4 , int , 0 , 255 )
YAxis = ThemeElementTData( 1 , 0 , 15 , 4 , int , 0 , 255 )
YAxisGrid = ThemeElementTData( 1 , 0 , 16 , 4 , int , 0 , 255 )
YAxis2 = ThemeElementTData( 1 , 0 , 17 , 4 , int , 0 , 255 )
YAxisGrid2 = ThemeElementTData( 1 , 0 , 18 , 4 , int , 0 , 255 )
YAxis3 = ThemeElementTData( 1 , 0 , 19 , 4 , int , 0 , 255 )
YAxisGrid3 = ThemeElementTData( 1 , 0 , 20 , 4 , int , 0 , 255 )
Selection = ThemeElementTData( 1 , 0 , 21 , 4 , int , 0 , 255 )
Query = ThemeElementTData( 1 , 0 , 22 , 4 , int , 0 , 255 )
Crosshairs = ThemeElementTData( 1 , 0 , 23 , 4 , int , 0 , 255 )
# Style
LineWeight = ThemeElementTData( 1 , 1 , 0 , 1 , float, 0.0 , 5.0 )
Marker = ThemeElementTData( 1 , 1 , 1 , 1 , float, 0.0 , None )
MarkerSize = ThemeElementTData( 1 , 1 , 2 , 1 , float, 0.0 , 10.0 )
MarkerWeight = ThemeElementTData( 1 , 1 , 3 , 1 , float, 0.0 , 5.0 )
FillAlpha = ThemeElementTData( 1 , 1 , 4 , 1 , float, 0.0 , 1.0 )
ErrorBarSize = ThemeElementTData( 1 , 1 , 5 , 1 , float, 0.0 , 10.0 )
ErrorBarWeight = ThemeElementTData( 1 , 1 , 6 , 1 , float, 0.0 , 5.0 )
DigitalBitHeight = ThemeElementTData( 1 , 1 , 7 , 1 , float, 0.0 , 20.0 )
DigitalBitGap = ThemeElementTData( 1 , 1 , 8 , 1 , float, 0.0 , 20.0 )
PlotBorderSize = ThemeElementTData( 1 , 1 , 9 , 1 , float, 0.0 , 2.0 )
MinorAlpha = ThemeElementTData( 1 , 1 , 10 , 1 , float, 0.0 , 1.0 )
MajorTickLen = ThemeElementTData( 1 , 1 , 11 , 2 , float, 0.0 , 20.0 )
MinorTickLen = ThemeElementTData( 1 , 1 , 12 , 2 , float, 0.0 , 20.0 )
MajorTickSize = ThemeElementTData( 1 , 1 , 13 , 2 , float, 0.0 , 2.0 )
MinorTickSize = ThemeElementTData( 1 , 1 , 14 , 2 , float, 0.0 , 2.0 )
MajorGridSize = ThemeElementTData( 1 , 1 , 15 , 2 , float, 0.0 , 2.0 )
MinorGridSize = ThemeElementTData( 1 , 1 , 16 , 2 , float, 0.0 , 2.0 )
PlotPadding = ThemeElementTData( 1 , 1 , 17 , 2 , float, 0.0 , 20.0 )
LabelPadding = ThemeElementTData( 1 , 1 , 18 , 2 , float, 0.0 , 20.0 )
LegendPadding = ThemeElementTData( 1 , 1 , 19 , 2 , float, 0.0 , 20.0 )
LegendInnerPadding = ThemeElementTData( 1 , 1 , 20 , 2 , float, 0.0 , 10.0 )
LegendSpacing = ThemeElementTData( 1 , 1 , 21 , 2 , float, 0.0 , 5.0 )
MousePosPadding = ThemeElementTData( 1 , 1 , 22 , 2 , float, 0.0 , 20.0 )
AnnotationPadding = ThemeElementTData( 1 , 1 , 23 , 2 , float, 0.0 , 5.0 )
FitPadding = ThemeElementTData( 1 , 1 , 24 , 2 , float, 0.0 , 0.2 )
PlotDefaultSize = ThemeElementTData( 1 , 1 , 25 , 2 , int , 0.0 , 1000 )
PlotMinSize = ThemeElementTData( 1 , 1 , 26 , 2 , int , 0.0 , 300 )
class ThemeCategoryNode(_ThemeCategoryT, Enum):
# Color
NodeBackground = ThemeElementTData( 2 , 0 , 0 , 4 , int , 0 , 255 )
NodeBackgroundHovered = ThemeElementTData( 2 , 0 , 1 , 4 , int , 0 , 255 )
NodeBackgroundSelected = ThemeElementTData( 2 , 0 , 2 , 4 , int , 0 , 255 )
NodeOutline = ThemeElementTData( 2 , 0 , 3 , 4 , int , 0 , 255 )
TitleBar = ThemeElementTData( 2 , 0 , 4 , 4 , int , 0 , 255 )
TitleBarHovered = ThemeElementTData( 2 , 0 , 5 , 4 , int , 0 , 255 )
TitleBarSelected = ThemeElementTData( 2 , 0 , 6 , 4 , int , 0 , 255 )
Link = ThemeElementTData( 2 , 0 , 7 , 4 , int , 0 , 255 )
LinkHovered = ThemeElementTData( 2 , 0 , 8 , 4 , int , 0 , 255 )
LinkSelected = ThemeElementTData( 2 , 0 , 9 , 4 , int , 0 , 255 )
Pin = ThemeElementTData( 2 , 0 , 10 , 4 , int , 0 , 255 )
PinHovered = ThemeElementTData( 2 , 0 , 11 , 4 , int , 0 , 255 )
BoxSelector = ThemeElementTData( 2 , 0 , 12 , 4 , int , 0 , 255 )
BoxSelectorOutline = ThemeElementTData( 2 , 0 , 13 , 4 , int , 0 , 255 )
GridBackground = ThemeElementTData( 2 , 0 , 14 , 4 , int , 0 , 255 )
GridLine = ThemeElementTData( 2 , 0 , 15 , 4 , int , 0 , 255 )
GridLinePrimary = ThemeElementTData( 2 , 0 , 16 , 4 , int , 0 , 255 )
MiniMapBackground = ThemeElementTData( 2 , 0 , 17 , 4 , int , 0 , 255 )
MiniMapBackgroundHovered = ThemeElementTData( 2 , 0 , 18 , 4 , int , 0 , 255 )
MiniMapOutline = ThemeElementTData( 2 , 0 , 19 , 4 , int , 0 , 255 )
MiniMapOutlineHovered = ThemeElementTData( 2 , 0 , 20 , 4 , int , 0 , 255 )
MiniMapNodeBackground = ThemeElementTData( 2 , 0 , 21 , 4 , int , 0 , 255 )
MiniMapNodeBackgroundHovered = ThemeElementTData( 2 , 0 , 22 , 4 , int , 0 , 255 )
MiniMapNodeBackgroundSelected = ThemeElementTData( 2 , 0 , 23 , 4 , int , 0 , 255 )
MiniMapNodeOutline = ThemeElementTData( 2 , 0 , 24 , 4 , int , 0 , 255 )
MiniMapLink = ThemeElementTData( 2 , 0 , 25 , 4 , int , 0 , 255 )
MiniMapLinkSelected = ThemeElementTData( 2 , 0 , 26 , 4 , int , 0 , 255 )
MiniMapCanvas = ThemeElementTData( 2 , 0 , 27 , 4 , int , 0 , 255 )
MiniMapCanvasOutline = ThemeElementTData( 2 , 0 , 28 , 4 , int , 0 , 255 )
# Style
GridSpacing = ThemeElementTData( 2 , 1 , 0 , 1 , float, 0.0 , None )
NodeCornerRounding = ThemeElementTData( 2 , 1 , 1 , 1 , float, 0.0 , None | |
siz} = {1, 1, 1})
def test_wo_zp_stk_relative_indirect_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = True; mpu.siz = True;
# write kernel stack relative indirect byte
mpu.p = mpu.p | 0x20 # set Mode bit
mpu.pc = 0x200
mpu.sp[1] = 0x1FD
self.outVal = 0x55AA
sp = 0x01
mpu.memory[mpu.pc] = sp
mpu.memory[mpu.sp[1] + 1] = 0x01
mpu.memory[mpu.sp[1] + 2] = 0x02
pc = mpu.pc + 1
sk = mpu.sp[1]
mpu.wo_zp(self.reg)
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(data, self.outVal)
self.assertEqual(pc, mpu.pc)
self.assertEqual(sk, mpu.sp[1])
# write user stack relative indirect byte
mpu.p = mpu.p & 0xDF # clr Mode bit
mpu.pc = 0x200
mpu.sp[0] = 0x17D
self.outVal = 0x66
sp = 0x01
mpu.memory[mpu.pc] = sp
mpu.memory[mpu.sp[0] + 1] = 0x03
mpu.memory[mpu.sp[0] + 2] = 0x02
pc = mpu.pc + 1
su = mpu.sp[0]
mpu.wo_zp(self.reg)
tmp1 = mpu.byteMask & mpu.memory[0x203]
tmp2 = mpu.byteMask & mpu.memory[0x204]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(data, self.outVal)
self.assertEqual(pc, mpu.pc)
self.assertEqual(su, mpu.sp[0])
# rmw_zp (flags: {osx, ind, siz} = {0, 0, 0})
def test_rmw_zp_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = False; mpu.siz = False;
mpu.pc = 0x200
self.rmwVal = 0x55
self.rtnVal = 0xAB
self.mask = mpu.byteMask
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[zp] = self.rmwVal
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[zp]
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {0, 0, 1})
def test_rmw_zp_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = False; mpu.siz = True;
# rmw zero page word w/o wrap-around
mpu.pc = 0x200
self.rmwVal = 0x55AA
self.rtnVal = 0xAA56
self.mask = mpu.wordMask
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[zp] = 0xAA
mpu.memory[zp + 1] = 0x55
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[zp]
tmp2 = mpu.byteMask & mpu.memory[zp + 1]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw zero page word w/ wrap-around
mpu.pc = 0x200
self.rmwVal = 0x6633
self.rtnVal = 0x99CD
self.mask = mpu.wordMask
zp = 0xFF
mpu.memory[mpu.pc] = zp
mpu.memory[zp] = 0x33
mpu.memory[mpu.byteMask & (zp + 1)] = 0x66
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[zp]
tmp2 = mpu.byteMask & mpu.memory[mpu.byteMask & (zp + 1)]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {0, 1, 0})
def test_rmw_zp_indirect_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = True; mpu.siz = False;
# zero page indirect rmw byte w/o wrap-around
mpu.pc = 0x200
self.rmwVal = 0x55
self.rtnVal = 0xAB
self.mask = mpu.byteMask
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = self.rmwVal
mpu.memory[zp] = 0x01
mpu.memory[zp + 1] = 0x02
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[0x201]
self.assertEqual(data, self.rtnVal)
self.assertEqual(pc, mpu.pc)
# zero page indirect rmw byte w/ wrap-around
mpu.pc = 0x200
self.rmwVal = 0x66
self.rtnVal = 0x9A
self.mask = mpu.byteMask
zp = 0xFF
mpu.memory[mpu.pc] = zp
mpu.memory[0x203] = self.rmwVal
mpu.memory[zp] = 0x03
mpu.memory[mpu.byteMask & (zp + 1)] = 0x02
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[0x203]
self.assertEqual(data, self.rtnVal)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {0, 1, 1})
def test_rmw_zp_indirect_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = False; mpu.ind = True; mpu.siz = True;
# zero page indirect rmw byte w/o wrap-around
mpu.pc = 0x200
self.rmwVal = 0x55AA
self.rtnVal = 0xAA56
self.mask = mpu.wordMask
zp = 0x80
mpu.memory[mpu.pc] = zp
mpu.memory[0x201] = 0xAA
mpu.memory[0x202] = 0x55
mpu.memory[zp] = 0x01
mpu.memory[zp + 1] = 0x02
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[0x201]
tmp2 = mpu.byteMask & mpu.memory[0x202]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(data, self.rtnVal)
self.assertEqual(pc, mpu.pc)
# zero page indirect rmw byte w/ wrap-around
mpu.pc = 0x200
self.rmwVal = 0x6633
self.rtnVal = 0x99CD
self.mask = mpu.wordMask
zp = 0xFF
mpu.memory[mpu.pc] = zp
mpu.memory[0x203] = 0x33
mpu.memory[0x204] = 0x66
mpu.memory[zp] = 0x03
mpu.memory[mpu.byteMask & (zp + 1)] = 0x02
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[0x203]
tmp2 = mpu.byteMask & mpu.memory[0x204]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(data, self.rtnVal)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {1, 0, 0})
def test_rmw_zp_stk_relative_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = False; mpu.siz = False;
# kernel stk relative rmw byte
mpu.p = mpu.p | 0x20 # set M flag
mpu.pc = 0x200
mpu.sp[1] = 0x1FE
self.rmwVal = 0x55
self.rtnVal = 0xAB
self.mask = mpu.byteMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[1] + 1] = self.rmwVal
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[mpu.sp[1] + 1]
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# user stk relative rmw byte
mpu.p = mpu.p & 0xDF # clr M flag
mpu.pc = 0x200
mpu.sp[0] = 0x17E
self.rmwVal = 0x33
self.rtnVal = 0xCD
self.mask = mpu.byteMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[0] + 1] = self.rmwVal
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[mpu.sp[0] + 1]
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {1, 0, 1})
def test_rmw_zp_stk_relative_word(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = False; mpu.siz = True;
# kernel stk relative rmw word
mpu.p = mpu.p | 0x20 # set M flag
mpu.pc = 0x200
mpu.sp[1] = 0x1FD
self.rmwVal = 0x55AA
self.rtnVal = 0xAA56
self.mask = mpu.wordMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[1] + 1] = 0xAA
mpu.memory[mpu.sp[1] + 2] = 0x55
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[mpu.sp[1] + 1]
tmp2 = mpu.byteMask & mpu.memory[mpu.sp[1] + 2]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# user stk relative rmw byte
mpu.p = mpu.p & 0xDF # clr M flag
mpu.pc = 0x200
mpu.sp[0] = 0x17D
self.rmwVal = 0x6633
self.rtnVal = 0x99CD
self.mask = mpu.wordMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[0] + 1] = 0x33
mpu.memory[mpu.sp[0] + 2] = 0x66
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
tmp1 = mpu.byteMask & mpu.memory[mpu.sp[0] + 1]
tmp2 = mpu.byteMask & mpu.memory[mpu.sp[0] + 2]
data = mpu.wordMask & ((tmp2 << 8) + tmp1)
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} = {1, 1, 0})
def test_rmw_zp_stk_relative_indirect_byte(self):
stdout = StringIO()
mon = Monitor(stdout = stdout)
mpu = mon._mpu
mpu.osx = True; mpu.ind = True; mpu.siz = False;
# kernel stk relative indirect rmw word
mpu.p = mpu.p | 0x20 # set M flag
mpu.pc = 0x200
mpu.sp[1] = 0x1FD
self.rmwVal = 0x55
self.rtnVal = 0xAB
self.mask = mpu.byteMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[1] + 1] = 0x01
mpu.memory[mpu.sp[1] + 2] = 0x02
mpu.memory[0x201] = 0x55
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[0x201]
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# user stk relative rmw byte
mpu.p = mpu.p & 0xDF # clr M flag
mpu.pc = 0x200
mpu.sp[0] = 0x17D
self.rmwVal = 0x33
self.rtnVal = 0xCD
self.mask = mpu.byteMask
zp = 0x01
mpu.memory[mpu.pc] = zp
mpu.memory[mpu.sp[0] + 1] = 0x03
mpu.memory[mpu.sp[0] + 2] = 0x02
mpu.memory[0x203] = 0x33
pc = mpu.pc + 1
mpu.rmw_zp(self.rmw)
data = mpu.byteMask & mpu.memory[0x203]
self.assertEqual(self.rtnVal, data)
self.assertEqual(pc, mpu.pc)
# rmw_zp (flags: {osx, ind, siz} | |
spins
that.S1 = R( this.S1, start_index ); that.S2 = R( this.S2, start_index )
that.X1 = R( this.X1, start_index ); that.X2 = R( this.X2, start_index )
# * initial angular momenta
that.L1 = R( this.L1, start_index ); that.L2 = R( this.L2, start_index )
# * initial positions / position time series / maybe velocities
that.R1 = R( this.R1, start_index ); that.R2 = R( this.R2, start_index )
# * others
that.S = R( this.S, start_index ); that.J = R( this.J, start_index )
that.L = R( this.L, start_index )
# If source dynamics time series is stored, then rotate that too
if 'dynamics' in this.__dict__:
alert('Attempting to rotate dynamics timeseries in this.dynamics')
#
times_used = this.dynamics['waveform_times']
#
J_ = this.dynamics['J'].copy()
L_ = this.dynamics['L'].copy()
S_ = this.dynamics['S'].copy()
L1_ = this.dynamics['L1'].copy()
L2_ = this.dynamics['L2'].copy()
R1_ = this.dynamics['R1'].copy()
R2_ = this.dynamics['R2'].copy()
S1_ = this.dynamics['S1'].copy()
S2_ = this.dynamics['S2'].copy()
if not angles_are_arrays:
#
# print J.shape, len(J.T), alpha
J = array([rotate3(j, alpha[0], beta[0], gamma[0])
for j in J_])
L = array([rotate3(l, alpha[0], beta[0], gamma[0])
for l in L_])
S = array([rotate3(s, alpha[0], beta[0], gamma[0])
for s in S_])
L1 = array([rotate3(j, alpha[0], beta[0], gamma[0])
for j in L1_])
L2 = array([rotate3(l, alpha[0], beta[0], gamma[0])
for l in L2_])
S1 = array([rotate3(s, alpha[0], beta[0], gamma[0])
for s in S1_])
S2 = array([rotate3(s, alpha[0], beta[0], gamma[0])
for s in S2_])
R1 = array([rotate3(s, alpha[0], beta[0], gamma[0])
for s in R1_])
R2 = array([rotate3(s, alpha[0], beta[0], gamma[0])
for s in R2_])
else:
#
if transform_domain == 'td':
a = spline(this.t, alpha)(times_used)
b = spline(this.t, beta)(times_used)
g = spline(this.t, gamma)(times_used)
J = array([rotate3(J_[k], a[k], b[k], g[k])
for k in range(len(J_[:, 0]))])
L = array([rotate3(L_[k], a[k], b[k], g[k])
for k in range(len(L_[:, 0]))])
S = array([rotate3(S_[k], a[k], b[k], g[k])
for k in range(len(S_[:, 0]))])
R1 = array([rotate3(R1_[k], a[k], b[k], g[k])
for k in range(len(R1_[:, 0]))])
R2 = array([rotate3(R2_[k], a[k], b[k], g[k])
for k in range(len(R2_[:, 0]))])
S1 = array([rotate3(S1_[k], a[k], b[k], g[k])
for k in range(len(S1_[:, 0]))])
S2 = array([rotate3(S2_[k], a[k], b[k], g[k])
for k in range(len(S2_[:, 0]))])
L1 = array([rotate3(L1_[k], a[k], b[k], g[k])
for k in range(len(L1_[:, 0]))])
L2 = array([rotate3(L2_[k], a[k], b[k], g[k])
for k in range(len(L2_[:, 0]))])
else:
#
warning('Dynamics rotations will '+bold(red('not'))+' be performed as FD angles given. There may be a way to determine the relevant TD angles')
J,L,S,L1,L2,S1,S2,R1,R2 = J_,L_,S_,L1_,L2_,S1_,S2_,R1_,R2_
# from numpy import pi
# f = this[2,2]['psi4'].dphi/(2*pi)
# _alpha = spline( this.f )
# a = spline(this.t, _alpha)(times_used)
# b = spline(this.t, _beta)(times_used)
# g = spline(this.t, _gamma)(times_used)
# J = array([rotate3(J_[k], a[k], b[k], g[k])
# for k in range(len(J_[:, 0]))])
# L = array([rotate3(L_[k], a[k], b[k], g[k])
# for k in range(len(L_[:, 0]))])
# S = array([rotate3(S_[k], a[k], b[k], g[k])
# for k in range(len(S_[:, 0]))])
# R1 = array([rotate3(R1_[k], a[k], b[k], g[k])
# for k in range(len(R1_[:, 0]))])
# R2 = array([rotate3(R2_[k], a[k], b[k], g[k])
# for k in range(len(R2_[:, 0]))])
# S1 = array([rotate3(S1_[k], a[k], b[k], g[k])
# for k in range(len(S1_[:, 0]))])
# S2 = array([rotate3(S2_[k], a[k], b[k], g[k])
# for k in range(len(S2_[:, 0]))])
# L1 = array([rotate3(L1_[k], a[k], b[k], g[k])
# for k in range(len(L1_[:, 0]))])
# L2 = array([rotate3(L2_[k], a[k], b[k], g[k])
# for k in range(len(L2_[:, 0]))])
#
that.dynamics['J'] = J
that.dynamics['L'] = L
that.dynamics['S'] = S
that.dynamics['L1'] = L1
that.dynamics['L2'] = L2
that.dynamics['S1'] = S1
that.dynamics['S2'] = S2
that.dynamics['R1'] = R1
that.dynamics['R2'] = R2
# Rotate system radiated and remnant quantities
if not ( 'remnant' in this.__dict__ ) :
this.__calc_radiated_quantities__(use_mask=False)
that.remnant = this.remnant
that.radiated = this.radiated
that.old_remnant = copy.deepcopy(this.remnant)
that.old_radiated = copy.deepcopy(this.radiated)
# NOTE that the "old" quantities will be correct values for non-intertial (dynamical) angles
for key in this.remnant:
if isinstance(this.remnant[key],ndarray):
# print key, len(this.remnant[key].shape)
if this.remnant[key].shape[-1] == 3:
if len(alpha) == len(this.remnant['time_used']):
for k in range(len(alpha)):
that.remnant[key][k,:] = R( this.remnant[key][k,:], k )
elif len(alpha)==1:
for k in range( that.remnant[key].shape[0] ):
that.remnant[key][k,:] = R( this.remnant[key][k,:], 0 )
else:
warning('cannot rotate radiated quantities, length mismatch: len alpha is %i, but times are %i'%(len(alpha),len(this.remnant['time_used'])))
print alpha
for key in this.radiated:
if isinstance(this.radiated[key],ndarray):
# print key, len(this.radiated[key].shape)
if (this.radiated[key].shape[0] > 1) and (this.radiated[key].shape[-1] == 3):
if len(alpha) == len(this.radiated['time_used']):
if len(this.radiated[key].shape)>1:
for k in range(len(alpha)):
that.radiated[key][k,:] = R( this.radiated[key][k,:], k )
else:
if key=='J0':
that.radiated[key] = R( this.radiated[key], 0 )
elif len(alpha)==1:
if len(this.radiated[key].shape)>1:
for k in range( that.radiated[key].shape[0] ):
that.old_radiated[key][k,:] = R( this.radiated[key][k,:], 0 )
else:
that.old_radiated[key] = R( this.radiated[key], 0 )
else:
warning('cannot rotate radiated quantities, length mismatch: len alpha is %i, but times are %i'%(len(alpha),len(this.radiated['time_used'])))
print alpha
#
alert('Note that metadata at the scentry level (i.e. this.__scentry__) have not been rotated, but this.Sf, this.R1 and others have been rotated. This includes radiated and remnant quantities.')
# Return answer
return that
# Estimate Remnant BH mass and spin from gwylm object. This is done by "brute" force here (i.e. an actual calculation), but NOTE that values for final mass and spin are Automatically loaded within each scentry; However!, some of these values may be incorrect -- especially for BAM sumulations. Here we make a rough estimate of the remnant mass and spin based on a ringdown fit.
def brute_masspin( this, # IMR gwylm object
T0 = 20, # Time relative to peak strain to start ringdown
T1 = None, # Time relative to peak lum where ringdown ends (if None, gwylm.ringdown sets its value to the end of the waveform approx at noise floor)
apply_result = False, # If true, apply result to input this object
guess=None,
verbose = False ): # Let the people know
'''Estimate Remnant BH mass and spin from gwylm object. This is done by "brute"
force here (i.e. an actual calculation), but NOTE that values for final mass
and spin are Automatically loaded within each scentry; However!, some of
these values may be incorrect -- especially for BAM sumulations. Here we make
a rough estimate of the remnant mass and spin based on a ringdown fit.'''
# Import useful things
thisfun='gwylm.brute_masspin'
from scipy.optimize import minimize
from nrutils import jf14067295,Mf14067295,remnant
from kerr import qnmfit
# Validate first input type
is_number = isinstance(this,(float,int))
is_gwylm = False if is_number else 'gwylm'==this.__class__.__name__
if not is_gwylm:
msg = 'First input must be member of gwylm class from nrutils.'
error(msg)
# Get the ringdown part starting from 20M after the peak luminosity
g = this.ringdown(T0=T0,T1=T1)
# Define a work function
def action( Mfxf ):
# NOTE that the first psi4 multipole is referenced below.
# There was only one loaded here, s it has to be for l=m=2
f = qnmfit(g.lm[2,2]['psi4'],Mfxf=Mfxf,statsfit=not True,greedy=True)
# f = qnmfit(g.ylm[0],Mfxf=Mfxf)
return f.frmse
if guess is None:
# Use PhenomD fit for guess
eta = this.m1*this.m2/((this.m1+this.m2)**2)
chi1, chi2 = this.S1[-1]/(this.m1**2), this.S2[-1]/(this.m2**2)
# guess_xf = jf14067295( this.m1,this.m2,chi1,chi2 )
# guess_Mf = Mf14067295( this.m1,this.m2,chi1,chi2 )
guess_Mf,guess_xf = remnant(this.m1,this.m2,this.X1[-1],this.X2[-1])
guess = (guess_Mf,guess_xf)
print guess
# perform the minization
Q = minimize( action,guess, bounds=[(1-0.999,1),(-0.999,0.999)] )
# Extract the solution
mf,xf = Q.x
#
fo = qnmfit(g.lm[2,2]['psi4'],Mfxf=Q.x,statsfit= not True,greedy=not True)
# Apply to the input gwylm object if requested
if apply_result:
this.mf = mf
this.xf = xf
this.Xf = this.Sf / (mf*mf)
attr = [ 'ylm', 'hlm', 'flm' ]
for atr in attr:
for y in this.__dict__[atr]:
y.mf, y.xf = mf, xf
if ('Sf' in y.__dict__) and ('Xf' in y.__dict__):
y.Xf = y.Sf / (mf*mf)
# Return stuff, including the fit object
return mf,xf,Q,fo
# Estimate the energy radiated for the current collection of GW multipoles
def __calc_radiated_quantities__(this, # The current object
use_mask = True, # Toggle for chopping of noisey data. NOTE use_mask = False is useful if you need radiated quantities of the same length as the original waveforms
ref_orientation = None,
enforce_initial_J_consistency=True,
verbose=False # Toggle for letting the people know
):
''' Reference: https://arxiv.org/pdf/0707.4654.pdf '''
# Import usefuls
from numpy import trapz,pi,arange,isfinite,vstack,array,ones,sign
# Construct | |
<filename>qiskit/chemistry/aqua_extensions/components/variational_forms/uccsd.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This trial wavefunction is a Unitary Coupled-Cluster Single and Double excitations
variational form.
For more information, see https://arxiv.org/abs/1805.04340
"""
import logging
import sys
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.tools import parallel_map
from qiskit.tools.events import TextProgressBar
from qiskit.aqua import aqua_globals
from qiskit.aqua.operators import WeightedPauliOperator, Z2Symmetries
from qiskit.aqua.components.variational_forms import VariationalForm
from qiskit.chemistry.fermionic_operator import FermionicOperator
logger = logging.getLogger(__name__)
class UCCSD(VariationalForm):
"""
This trial wavefunction is a Unitary Coupled-Cluster Single and Double excitations
variational form.
For more information, see https://arxiv.org/abs/1805.04340
"""
CONFIGURATION = {
'name': 'UCCSD',
'description': 'UCCSD Variational Form',
'input_schema': {
'$schema': 'http://json-schema.org/draft-07/schema#',
'id': 'uccsd_schema',
'type': 'object',
'properties': {
'depth': {
'type': 'integer',
'default': 1,
'minimum': 1
},
'num_orbitals': {
'type': 'integer',
'default': 4,
'minimum': 1
},
'num_particles': {
'type': ['array', 'integer'],
'default': [1, 1],
'contains': {
'type': 'integer'
},
'minItems': 2,
'maxItems': 2
},
'active_occupied': {
'type': ['array', 'null'],
'default': None
},
'active_unoccupied': {
'type': ['array', 'null'],
'default': None
},
'qubit_mapping': {
'type': 'string',
'default': 'parity',
'enum': ['jordan_wigner', 'parity', 'bravyi_kitaev']
},
'two_qubit_reduction': {
'type': 'boolean',
'default': True
},
'num_time_slices': {
'type': 'integer',
'default': 1,
'minimum': 1
},
},
'additionalProperties': False
},
'depends': [
{
'pluggable_type': 'initial_state',
'default': {
'name': 'HartreeFock',
}
},
],
}
def __init__(self, num_qubits, depth, num_orbitals, num_particles,
active_occupied=None, active_unoccupied=None, initial_state=None,
qubit_mapping='parity', two_qubit_reduction=True, num_time_slices=1,
shallow_circuit_concat=True, z2_symmetries=None):
"""Constructor.
Args:
num_qubits (int): number of qubits
depth (int): number of replica of basic module
num_orbitals (int): number of spin orbitals
num_particles (Union(list, int)): number of particles, if it is a list,
the first number is alpha and the second number if beta.
active_occupied (list): list of occupied orbitals to consider as active space
active_unoccupied (list): list of unoccupied orbitals to consider as active space
initial_state (InitialState): An initial state object.
qubit_mapping (str): qubit mapping type.
two_qubit_reduction (bool): two qubit reduction is applied or not.
num_time_slices (int): parameters for dynamics.
z2_symmetries (Z2Symmetries): represent the Z2 symmetries, including symmetries,
sq_paulis, sq_list, tapering_values, and cliffords
shallow_circuit_concat (bool): indicate whether to use shallow (cheap) mode for
circuit concatenation
Raises:
ValueError: Computed qubits do not match actual value
"""
self.validate(locals())
super().__init__()
self._z2_symmetries = Z2Symmetries([], [], [], []) \
if z2_symmetries is None else z2_symmetries
self._num_qubits = num_orbitals if not two_qubit_reduction else num_orbitals - 2
self._num_qubits = self._num_qubits if self._z2_symmetries.is_empty() \
else self._num_qubits - len(self._z2_symmetries.sq_list)
if self._num_qubits != num_qubits:
raise ValueError('Computed num qubits {} does not match actual {}'
.format(self._num_qubits, num_qubits))
self._depth = depth
self._num_orbitals = num_orbitals
if isinstance(num_particles, list):
self._num_alpha = num_particles[0]
self._num_beta = num_particles[1]
else:
logger.info("We assume that the number of alphas and betas are the same.")
self._num_alpha = num_particles // 2
self._num_beta = num_particles // 2
self._num_particles = [self._num_alpha, self._num_beta]
if sum(self._num_particles) > self._num_orbitals:
raise ValueError('# of particles must be less than or equal to # of orbitals.')
self._initial_state = initial_state
self._qubit_mapping = qubit_mapping
self._two_qubit_reduction = two_qubit_reduction
self._num_time_slices = num_time_slices
self._shallow_circuit_concat = shallow_circuit_concat
self._single_excitations, self._double_excitations = \
UCCSD.compute_excitation_lists([self._num_alpha, self._num_beta], self._num_orbitals,
active_occupied, active_unoccupied)
self._hopping_ops, self._num_parameters = self._build_hopping_operators()
self._bounds = [(-np.pi, np.pi) for _ in range(self._num_parameters)]
self._logging_construct_circuit = True
@property
def single_excitations(self):
"""
Getter of single excitation list
Returns:
list[list[int]]: single excitation list
"""
return self._single_excitations
@property
def double_excitations(self):
"""
Getter of double excitation list
Returns:
list[list[int]]: double excitation list
"""
return self._double_excitations
def _build_hopping_operators(self):
if logger.isEnabledFor(logging.DEBUG):
TextProgressBar(sys.stderr)
results = parallel_map(UCCSD._build_hopping_operator,
self._single_excitations + self._double_excitations,
task_args=(self._num_orbitals,
self._num_particles, self._qubit_mapping,
self._two_qubit_reduction, self._z2_symmetries),
num_processes=aqua_globals.num_processes)
hopping_ops = []
s_e_list = []
d_e_list = []
for op, index in results:
if op is not None and not op.is_empty():
hopping_ops.append(op)
if len(index) == 2: # for double excitation
s_e_list.append(index)
else: # for double excitation
d_e_list.append(index)
self._single_excitations = s_e_list
self._double_excitations = d_e_list
num_parameters = len(hopping_ops) * self._depth
return hopping_ops, num_parameters
@staticmethod
def _build_hopping_operator(index, num_orbitals, num_particles, qubit_mapping,
two_qubit_reduction, z2_symmetries):
h_1 = np.zeros((num_orbitals, num_orbitals))
h_2 = np.zeros((num_orbitals, num_orbitals, num_orbitals, num_orbitals))
if len(index) == 2:
i, j = index
h_1[i, j] = 1.0
h_1[j, i] = -1.0
elif len(index) == 4:
i, j, k, m = index
h_2[i, j, k, m] = 1.0
h_2[m, k, j, i] = -1.0
dummpy_fer_op = FermionicOperator(h1=h_1, h2=h_2)
qubit_op = dummpy_fer_op.mapping(qubit_mapping)
if two_qubit_reduction:
qubit_op = Z2Symmetries.two_qubit_reduction(qubit_op, num_particles)
if not z2_symmetries.is_empty():
symm_commuting = True
for symmetry in z2_symmetries.symmetries:
symmetry_op = WeightedPauliOperator(paulis=[[1.0, symmetry]])
symm_commuting = qubit_op.commute_with(symmetry_op)
if not symm_commuting:
break
qubit_op = z2_symmetries.taper(qubit_op) if symm_commuting else None
if qubit_op is None:
logger.debug('Excitation (%s) is skipped since it is not commuted '
'with symmetries', ','.join([str(x) for x in index]))
return qubit_op, index
def construct_circuit(self, parameters, q=None):
"""
Construct the variational form, given its parameters.
Args:
parameters (numpy.ndarray): circuit parameters
q (QuantumRegister): Quantum Register for the circuit.
Returns:
QuantumCircuit: a quantum circuit with given `parameters`
Raises:
ValueError: the number of parameters is incorrect.
"""
if len(parameters) != self._num_parameters:
raise ValueError('The number of parameters has to be {}'.format(self._num_parameters))
if q is None:
q = QuantumRegister(self._num_qubits, name='q')
if self._initial_state is not None:
circuit = self._initial_state.construct_circuit('circuit', q)
else:
circuit = QuantumCircuit(q)
if logger.isEnabledFor(logging.DEBUG) and self._logging_construct_circuit:
logger.debug("Evolving hopping operators:")
TextProgressBar(sys.stderr)
self._logging_construct_circuit = False
num_excitations = len(self._hopping_ops)
results = parallel_map(UCCSD._construct_circuit_for_one_excited_operator,
[(self._hopping_ops[index % num_excitations], parameters[index])
for index in range(self._depth * num_excitations)],
task_args=(q, self._num_time_slices),
num_processes=aqua_globals.num_processes)
for qc in results:
if self._shallow_circuit_concat:
circuit.data += qc.data
else:
circuit += qc
return circuit
@staticmethod
def _construct_circuit_for_one_excited_operator(qubit_op_and_param, qr, num_time_slices):
qubit_op, param = qubit_op_and_param
qc = qubit_op.evolve(state_in=None, evo_time=param * -1j,
num_time_slices=num_time_slices, quantum_registers=qr)
return qc
@property
def preferred_init_points(self):
"""Getter of preferred initial points based on the given initial state."""
if self._initial_state is None:
return None
else:
bitstr = self._initial_state.bitstr
if bitstr is not None:
return np.zeros(self._num_parameters, dtype=np.float)
else:
return None
@staticmethod
def compute_excitation_lists(num_particles, num_orbitals, active_occ_list=None,
active_unocc_list=None, same_spin_doubles=True):
"""
Computes single and double excitation lists
Args:
num_particles (Union(list, int)): number of particles, if it is a tuple,
the first number is alpha and the second number if beta.
num_orbitals (int): Total number of spin orbitals
active_occ_list (list): List of occupied orbitals to include, indices are
0 to n where n is max(num_alpha, num_beta)
active_unocc_list (list): List of unoccupied orbitals to include, indices are
0 to m where m is num_orbitals // 2 - min(num_alpha, num_beta)
same_spin_doubles (bool): True to include alpha,alpha and beta,beta double excitations
as well as alpha,beta pairings. False includes only alpha,beta
Returns:
list: Single excitation list
list: Double excitation list
Raises:
ValueError: invalid setting of number of particles
ValueError: invalid setting of number of orbitals
"""
if isinstance(num_particles, list):
num_alpha = num_particles[0]
num_beta = num_particles[1]
else:
logger.info("We assume that the number of alphas and betas are the same.")
num_alpha = num_particles // 2
num_beta = num_particles // 2
num_particles = num_alpha + num_beta
if num_particles < 2:
raise ValueError('Invalid number of particles {}'.format(num_particles))
if num_orbitals < 4 or num_orbitals % 2 != 0:
raise ValueError('Invalid number of orbitals {}'.format(num_orbitals))
if num_orbitals <= num_particles:
raise ValueError('No unoccupied orbitals')
# convert the user-defined active space for alpha and beta respectively
active_occ_list_alpha = []
active_occ_list_beta = []
active_unocc_list_alpha = []
active_unocc_list_beta = []
if active_occ_list is not None:
active_occ_list = \
[i if i >= 0 else i + max(num_alpha, num_beta) for i in active_occ_list]
for i in active_occ_list:
if i < num_alpha:
active_occ_list_alpha.append(i)
else:
raise ValueError(
'Invalid index {} in active active_occ_list {}'.format(i, active_occ_list))
if i < num_beta:
active_occ_list_beta.append(i)
else:
raise ValueError(
'Invalid index {} in active active_occ_list {}'.format(i, active_occ_list))
else:
active_occ_list_alpha = list(range(0, num_alpha))
active_occ_list_beta = list(range(0, num_beta))
if active_unocc_list is not None:
active_unocc_list = [i + min(num_alpha, num_beta) if i >=
0 else i + num_orbitals // 2 for i in active_unocc_list]
for i in active_unocc_list:
if i >= num_alpha:
active_unocc_list_alpha.append(i)
else:
raise ValueError('Invalid index {} in active active_unocc_list {}'
.format(i, | |
to solve at point",um
return None
solver.setBiasConfig([])
d1 = robot.distance(n.qa,qm)
d2 = robot.distance(qm,n.qb)
#print d1,d2
#print qm,"->",robot.getConfig()
qm = robot.getConfig()
d1 = robot.distance(n.qa,qm)
d2 = robot.distance(qm,n.qb)
dtotal += d1+d2 - d0
if dtotal > dorig*growthTol or (d1 > scalecond*d0) or (d2 > scalecond*d0):
print "cartesian_interpolate_bisect(): Excessive growth condition reached",d0,d1,d2,"at point",um
print n.qa
print qm
print n.qb
return None
if feasibilityTest and not feasibilityTest(qm):
print "cartesian_interpolate_bisect(): Violation of feasibility test","at point",um
return None
n.left = BisectNode(n.a,m,n.ua,um,n.qa,qm)
n.left.d = d1
n.right = BisectNode(m,n.b,um,n.ub,qm,n.qb)
n.right.d = d2
if d1 < d2:
q.append(n.left)
q.append(n.right)
else:
q.append(n.right)
q.append(n.left)
#done resolving, now output path from left to right of tree
res = RobotTrajectory(robot,[0],[startConfig])
q = [root]
while len(q) > 0:
n = q.pop(-1)
if n.left is None:
#leaf node
res.times.append(n.ub)
res.milestones.append(n.qb)
else:
q.append(n.right)
q.append(n.left)
return res
def cartesian_path_interpolate(robot,path,constraints,
startConfig='robot',endConfig=None,
delta=1e-2,
method='any',
solver=None,
feasibilityTest=None,
numSamples=1000,
maximize=False):
"""Resolves a continuous robot trajectory that follows a cartesian path for a single
link of a robot. Note that the output path is only a kinematic resolution, and may not
respect the robot's velocity / acceleration limits.
Args:
robot: the RobotModel or SubRobotModel.
path: a list of milestones, or a Trajectory for the parameters of the given constraints. In the former
case the milestones are spaced 1s apart in time.
constraints: one or more link indices, link names, or IKObjective's giving the manner
in which the Cartesian space is defined. Interpreted as follows:
* int or str: the specified link's entire pose is constrained
* IKObjective: the links, constraint types, local positions, and local axes are used as constraints.
The world space elements are considered temporary and will change to match the Cartesian trajectory.
* list of int, list of str, or list of IKObjective: concatenates the specified constraints together
startConfig (optional): either 'robot' (configuration taken from the robot), a configuration, or None (any configuration)
endConfig (optional): same type as startConfig.
delta (float, optional): the maximum configuration space distance between points on the output path
method: method used. Can be 'any', 'pointwise', or 'roadmap'.
solver (IKSolver, optional): if provided, an IKSolver configured with the desired parameters for IK
constraint solving.
feasibilityTest (function, optional): a function f(q) that returns false when a configuration q is infeasible
numSamples (int, optional): if 'roadmap' or 'any' method is used, the # of configuration space samples that are used.
maximize (bool, optional): if true, goes as far as possible along the path.
Returns:
RobotTrajectory: a configuration space path that interpolates the Cartesian path, or None if no
solution can be found.
"""
assert delta > 0,"Spatial resolution must be positive"
if hasattr(path,'__iter__'):
path = Trajectory(range(len(path)),path)
constraints,startConfig,endConfig,solver = _make_canonical(robot,constraints,startConfig,endConfig,solver)
#correct start and goal configurations, if specified
if startConfig:
robot.setConfig(startConfig)
set_cartesian_constraints(path.milestones[0],constraints,solver)
if not solver.isSolved():
if not solver.solve():
print "cartesian_path_interpolate(): Error, initial configuration cannot be solved to match initial Cartesian coordinates"
return None
print "cartesian_path_interpolate(): Warning, initial configuration does not match initial Cartesian coordinates, solving"
startConfig = robot.getConfig()
if endConfig:
robot.setConfig(endConfig)
set_cartesian_constraints(path.milestones[-1],constraints,solver)
if not solver.isSolved():
if not solver.solve():
print "cartesian_path_interpolate(): Error, final configuration cannot be solved to match final Cartesian coordinates"
return None
print "cartesian_path_interpolate(): Warning, final configuration does not match final Cartesian coordinates, solving"
endConfig = robot.getConfig()
#now we're at a canonical setup
if method == 'any' or method == 'pointwise':
#try pointwise resolution first
if startConfig is None:
if ik.solve_global(constraints,solver.getMaxIters(),solver.getTolerance(),solver.getActiveDofs(),max(100,numSamples),feasibilityTest):
startConfig = robot.getConfig()
else:
print "cartesian_path_interpolate(): Error: could not solve for start configuration"
return None
res = RobotTrajectory(robot)
res.times.append(path.times[0])
res.milestones.append(startConfig)
infeasible = False
for i in xrange(len(path.milestones)-1):
if endConfig is None:
segEnd = None
else:
u = (path.times[i+1] - path.times[i])/(path.times[-1] - path.times[i])
segEnd = robot.interpolate(res.milestones[-1],endConfig,u)
robot.setConfig(segEnd)
if solve_cartesian(path.milestones[i+1],constraints,solver):
segEnd = robot.getConfig()
if segEnd is None:
seg = cartesian_interpolate_linear(robot,path.milestones[i],path.milestones[i+1],constraints,
startConfig=res.milestones[-1],endConfig=segEnd,delta=delta,solver=solver,feasibilityTest=feasibilityTest)
else:
seg = cartesian_interpolate_bisect(robot,path.milestones[i],path.milestones[i+1],constraints,
startConfig=res.milestones[-1],endConfig=segEnd,delta=delta,solver=solver,feasibilityTest=feasibilityTest)
if not seg:
print "cartesian_path_interpolate(): Found infeasible cartesian interpolation segment at time",path.times[i+1]
infeasible = True
break
#concatenate
dt = path.times[i+1] - path.times[i]
seg.times = [t*dt for t in seg.times]
res = res.concat(seg,relative=True)
if not infeasible:
#print "Resolved with pointwise interpolation!"
return res
if method == 'pointwise' and maximize:
return res
if method == 'any' or method == 'roadmap':
#TODO: sample on continuous parameterization of path
if path.duration() > 0:
#manual discretization using config.interpolate
numdivs = 20
divpts = [path.startTime() + path.duration()*float(i)/(numdivs-1) for i in xrange(numdivs)]
oldseg = 0
oldu = 0
times = [0]
milestones = [path.milestones[0]]
for t in divpts:
s,u = path.getSegment(t)
if s+1 >= len(path.milestones):
s = len(path.milestones)-2
u = 1
if s == oldseg:
if u != oldu:
times.append(t)
milestones.append(config.interpolate(constraints,path.milestones[s],path.milestones[s+1],u))
else:
for i in range(oldseg+1,s+1):
times.append(path.times[i])
milestones.append(path.milestones[i])
times.append(t)
print s,u
milestones.append(config.interpolate(constraints,path.milestones[s],path.milestones[s+1],u))
oldseg,oldu = s,u
path = path.constructor()(times,milestones)
import random
#mark whether we need to sample the end or start
pathIndices = range(1,len(path.milestones)-1)
if startConfig is None:
pathIndices = [0] + pathIndices
if endConfig is None:
pathIndices = pathIndices + [len(path.milestones)-1]
samp = 0
if startConfig is None:
#need to seed a start configuration
while samp < numSamples:
samp += 1
solver.sampleInitial()
if solve_cartesian(path.milestones[0],constraints,solver):
if feasibilityTest is None or feasibilityTest(robot.getConfig()):
startConfig = robot.getConfig()
break
if endConfig is None:
#need to seed an end configuration
samp = 0
while samp < numSamples:
samp += 1
if samp > 0:
solver.sampleInitial()
else:
robot.setConfig(startConfig)
if solve_cartesian(path.milestones[-1],constraints,solver):
if feasibilityTest is None or feasibilityTest(robot.getConfig()):
endConfig = robot.getConfig()
break
if startConfig is None or endConfig is None:
print "cartesian_path_interpolate(): Exhausted all samples, perhaps endpoints are unreachable"
return None
selfMotionManifolds = [[] for i in path.milestones]
nodes = []
configs = []
ccs = []
edges = []
def findpath(depth):
#start and goal are connected! find a path through the edges list using BFS
eadj = [[] for n in nodes]
for (i,j,p) in edges:
eadj[i].append((j,p))
q = deque()
parent = [None]*len(nodes)
for c in selfMotionManifolds[0]:
q.append(c)
#print "Adjacency list"
#for i,alist in enumerate(eadj):
# print nodes[i],": ",' '.join(str(nodes[j]) for (j,p) in alist)
while len(q) > 0:
n = q.pop()
for c,p in eadj[n]:
if parent[c] is not None:
continue
parent[c] = n
if nodes[c][0] == depth:
print "cartesian_path_interpolate(): Found a path using roadmap after",samp,"samples"
#arrived at goal node, trace parent list back
npath = []
n = c
while c is not None:
npath.append(c)
c = parent[c]
npath = [n for n in reversed(npath)]
print ' '.join(str(nodes[n]) for n in npath)
assert nodes[npath[0]][0] == 0,"Didnt end up at a start configuration?"
res = RobotTrajectory(robot)
res.times.append(path.times[0])
res.milestones.append(configs[npath[0]])
for i,n in enumerate(npath[:-1]):
found = False
for j,p in eadj[n]:
if j == npath[i+1]:
#print "Suffix",p.times[0],p.times[-1]
#print res.times[0],res.times[-1]
res = res.concat(p,relative=False)
#print "Resulting range",res.times[0],res.times[-1]
found = True
break
assert found,"Internal error? "+str(nodes[npath[i]])+" -> "+str(nodes[npath[i+1]])
return res
q.append(c)
print "cartesian_path_interpolate(): Path to depth",depth,"could not be found"
return None
selfMotionManifolds[0].append(0)
configs.append(startConfig)
nodes.append((0,0))
ccs.append(0)
selfMotionManifolds[-1].append(1)
configs.append(endConfig)
nodes.append((len(path.milestones)-1,0))
ccs.append(1)
for samp in xrange(samp,numSamples):
irand = random.choice(pathIndices)
solver.sampleInitial()
#check for successful sample on self motion manifold, test feasibility
if not solve_cartesian(path.milestones[irand],constraints,solver):
continue
x = robot.getConfig()
if feasibilityTest is not None and not feasibilityTest(x):
continue
#add to data structure
nx = len(nodes)
nodes.append((irand,len(selfMotionManifolds[irand])))
ccs.append(nx)
assert len(ccs) == nx+1
selfMotionManifolds[irand].append(nx)
configs.append(x)
#try connecting to other nodes
k = int(math.log(samp+2)) + 2
#brute force k-nearest neighbor
d = []
for i,n in enumerate(nodes[:-1]):
if n[0] == irand:
continue
dist = config.distance(constraints,path.milestones[n[0]],path.milestones[irand])
dist = robot.distance(x,configs[i])
d.append((dist,i))
k = min(k,len(d))
print "cartesian_path_interpolate(): Sampled at time point",irand,"checking",k,"potential connections"
totest = [v[1] for v in sorted(d)[:k]]
for n in totest:
i = irand
j = nodes[n][0]
qi = x
qj = configs[n]
ni = nx
nj = n
if ccs[ni] == ccs[nj]:
#same connected component, use visibility graph technique
continue
if i > j:
i,j = j,i
qi,qj = qj,qi
ni,nj = nj,ni
pij = path.constructor()(path.times[i:j+1],path.milestones[i:j+1])
#try connecting edges
t = cartesian_path_interpolate(robot,pij,constraints,
startConfig=qi,endConfig=qj,delta=delta,method='pointwise',solver=solver,feasibilityTest=feasibilityTest)
#t = cartesian_interpolate_bisect(robot,path.milestones[i],path.milestones[j],constraints,qi,qj,delta=delta,solver=solver,feasibilityTest=feasibilityTest)
if t is None:
print " Failed edge",nodes[ni],"->",nodes[nj]
continue
#t.times = [path.times[i] + v*(path.times[j]-path.times[i]) for v in t.times]
print " Added edge",nodes[ni],"->",nodes[nj]
edges.append((ni,nj,t))
if ccs[ni] != ccs[nj]:
#not in same connected component, | |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# The methods found in this file are adapted from a repository under Apache 2.0:
# eBay's Pythonic Bayesian Belief Network Framework.
# @online{
# author = {<NAME>,<NAME>},
# title = {bayesian-belief-networks},
# organisation = {eBay},
# codebase = {https://github.com/eBay/bayesian-belief-networks},
# }
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import copy
from collections import Counter
import pytest
from causalnex.ebaybbn import (
BBNNode,
JoinTree,
JoinTreeCliqueNode,
SepSet,
build_bbn,
build_bbn_from_conditionals,
build_join_tree,
combinations,
make_moralized_copy,
make_node_func,
make_undirected_copy,
priority_func,
triangulate,
)
from causalnex.ebaybbn.exceptions import (
VariableNotInGraphError,
VariableValueNotInDomainError,
)
from causalnex.ebaybbn.graph import Node, UndirectedNode
from causalnex.ebaybbn.utils import get_args, get_original_factors, make_key
def r3(x):
return round(x, 3)
def r5(x):
return round(x, 5)
class TestBBN:
def test_get_graphviz_source(self, sprinkler_graph):
gv_src = """digraph G {
graph [ dpi = 300 bgcolor="transparent" rankdir="LR"];
Cloudy [ shape="ellipse" color="blue"];
Rain [ shape="ellipse" color="blue"];
Sprinkler [ shape="ellipse" color="blue"];
WetGrass [ shape="ellipse" color="blue"];
Cloudy -> Rain;
Cloudy -> Sprinkler;
Rain -> WetGrass;
Sprinkler -> WetGrass;
}
"""
assert sprinkler_graph.get_graphviz_source() == gv_src
def test_get_original_factors(self, huang_darwiche_nodes):
original_factors = get_original_factors(huang_darwiche_nodes)
assert original_factors["a"] == huang_darwiche_nodes[0]
assert original_factors["b"] == huang_darwiche_nodes[1]
assert original_factors["c"] == huang_darwiche_nodes[2]
assert original_factors["d"] == huang_darwiche_nodes[3]
assert original_factors["e"] == huang_darwiche_nodes[4]
assert original_factors["f"] == huang_darwiche_nodes[5]
assert original_factors["g"] == huang_darwiche_nodes[6]
assert original_factors["h"] == huang_darwiche_nodes[7]
def test_build_graph(self, huang_darwiche_nodes):
bbn = build_bbn(huang_darwiche_nodes)
nodes = {node.name: node for node in bbn.nodes}
assert nodes["f_a"].parents == []
assert nodes["f_b"].parents == [nodes["f_a"]]
assert nodes["f_c"].parents == [nodes["f_a"]]
assert nodes["f_d"].parents == [nodes["f_b"]]
assert nodes["f_e"].parents == [nodes["f_c"]]
assert nodes["f_f"].parents == [nodes["f_d"], nodes["f_e"]]
assert nodes["f_g"].parents == [nodes["f_c"]]
assert nodes["f_h"].parents == [nodes["f_e"], nodes["f_g"]]
def test_make_undirecred_copy(self, huang_darwiche_dag):
ug = make_undirected_copy(huang_darwiche_dag)
nodes = {node.name: node for node in ug.nodes}
assert set(nodes["f_a"].neighbours) == set([nodes["f_b"], nodes["f_c"]])
assert set(nodes["f_b"].neighbours) == set([nodes["f_a"], nodes["f_d"]])
assert set(nodes["f_c"].neighbours) == set(
[nodes["f_a"], nodes["f_e"], nodes["f_g"]]
)
assert set(nodes["f_d"].neighbours) == set([nodes["f_b"], nodes["f_f"]])
assert set(nodes["f_e"].neighbours) == set(
[nodes["f_c"], nodes["f_f"], nodes["f_h"]]
)
assert set(nodes["f_f"].neighbours) == set([nodes["f_d"], nodes["f_e"]])
assert set(nodes["f_g"].neighbours) == set([nodes["f_c"], nodes["f_h"]])
assert set(nodes["f_h"].neighbours) == set([nodes["f_e"], nodes["f_g"]])
def test_make_moralized_copy(self, huang_darwiche_dag):
gu = make_undirected_copy(huang_darwiche_dag)
gm = make_moralized_copy(gu, huang_darwiche_dag)
nodes = {node.name: node for node in gm.nodes}
assert set(nodes["f_a"].neighbours) == set([nodes["f_b"], nodes["f_c"]])
assert set(nodes["f_b"].neighbours) == set([nodes["f_a"], nodes["f_d"]])
assert set(nodes["f_c"].neighbours) == set(
[nodes["f_a"], nodes["f_e"], nodes["f_g"]]
)
assert set(nodes["f_d"].neighbours) == set(
[nodes["f_b"], nodes["f_f"], nodes["f_e"]]
)
assert set(nodes["f_e"].neighbours) == set(
[nodes["f_c"], nodes["f_f"], nodes["f_h"], nodes["f_d"], nodes["f_g"]]
)
assert set(nodes["f_f"].neighbours) == set([nodes["f_d"], nodes["f_e"]])
assert set(nodes["f_g"].neighbours) == set(
[nodes["f_c"], nodes["f_h"], nodes["f_e"]]
)
assert set(nodes["f_h"].neighbours) == set([nodes["f_e"], nodes["f_g"]])
def test_triangulate(self, huang_darwiche_moralized):
# Because of ties in the priority q we will
# override the priority function here to
# insert tie breakers to ensure the same
# elimination ordering as Darwich Huang.
def priority_func_override(node):
introduced_arcs = 0
cluster = [node] + node.neighbours
for node_a, node_b in combinations(cluster, 2):
if node_a not in node_b.neighbours:
assert node_b not in node_a.neighbours
introduced_arcs += 1
introduced_arcs_dict = {
"f_h": [introduced_arcs, 0],
"f_g": [introduced_arcs, 1],
"f_c": [introduced_arcs, 2],
"f_b": [introduced_arcs, 3],
"f_d": [introduced_arcs, 4],
"f_e": [introduced_arcs, 5],
"others": [introduced_arcs, 10],
}
if node.name in introduced_arcs_dict:
return introduced_arcs_dict[node.name]
return introduced_arcs_dict["others"]
cliques, elimination_ordering = triangulate(
huang_darwiche_moralized, priority_func_override
)
nodes = {node.name: node for node in huang_darwiche_moralized.nodes}
assert len(cliques) == 6
assert cliques[0].nodes == set([nodes["f_e"], nodes["f_g"], nodes["f_h"]])
assert cliques[1].nodes == set([nodes["f_c"], nodes["f_e"], nodes["f_g"]])
assert cliques[2].nodes == set([nodes["f_d"], nodes["f_e"], nodes["f_f"]])
assert cliques[3].nodes == set([nodes["f_a"], nodes["f_c"], nodes["f_e"]])
assert cliques[4].nodes == set([nodes["f_a"], nodes["f_b"], nodes["f_d"]])
assert cliques[5].nodes == set([nodes["f_a"], nodes["f_d"], nodes["f_e"]])
assert elimination_ordering == [
"f_h",
"f_g",
"f_f",
"f_c",
"f_b",
"f_d",
"f_e",
"f_a",
]
# Now lets ensure the triangulated graph is
# the same as Darwiche Huang fig. 2 pg. 13
nodes = {node.name: node for node in huang_darwiche_moralized.nodes}
assert set(nodes["f_a"].neighbours) == set(
[nodes["f_b"], nodes["f_c"], nodes["f_d"], nodes["f_e"]]
)
assert set(nodes["f_b"].neighbours) == set([nodes["f_a"], nodes["f_d"]])
assert set(nodes["f_c"].neighbours) == set(
[nodes["f_a"], nodes["f_e"], nodes["f_g"]]
)
assert set(nodes["f_d"].neighbours) == set(
[nodes["f_b"], nodes["f_f"], nodes["f_e"], nodes["f_a"]]
)
assert set(nodes["f_e"].neighbours) == set(
[
nodes["f_c"],
nodes["f_f"],
nodes["f_h"],
nodes["f_d"],
nodes["f_g"],
nodes["f_a"],
]
)
assert set(nodes["f_f"].neighbours) == set([nodes["f_d"], nodes["f_e"]])
assert set(nodes["f_g"].neighbours) == set(
[nodes["f_c"], nodes["f_h"], nodes["f_e"]]
)
assert set(nodes["f_h"].neighbours) == set([nodes["f_e"], nodes["f_g"]])
def test_triangulate_no_tie_break(self, huang_darwiche_moralized):
# Now lets see what happens if
# we dont enforce the tie-breakers...
# It seems the triangulated graph is
# different adding edges from d to c
# and b to c
# Will be interesting to see whether
# inference will still be correct.
triangulate(huang_darwiche_moralized)
nodes = {node.name: node for node in huang_darwiche_moralized.nodes}
assert set(nodes["f_a"].neighbours) == set([nodes["f_b"], nodes["f_c"]])
assert set(nodes["f_b"].neighbours) == set(
[nodes["f_a"], nodes["f_d"], nodes["f_c"]]
)
assert set(nodes["f_c"].neighbours) == set(
[nodes["f_a"], nodes["f_e"], nodes["f_g"], nodes["f_b"], nodes["f_d"]]
)
assert set(nodes["f_d"].neighbours) == set(
[nodes["f_b"], nodes["f_f"], nodes["f_e"], nodes["f_c"]]
)
assert set(nodes["f_e"].neighbours) == set(
[nodes["f_c"], nodes["f_f"], nodes["f_h"], nodes["f_d"], nodes["f_g"]]
)
assert set(nodes["f_f"].neighbours) == set([nodes["f_d"], nodes["f_e"]])
assert set(nodes["f_g"].neighbours) == set(
[nodes["f_c"], nodes["f_h"], nodes["f_e"]]
)
assert set(nodes["f_h"].neighbours) == set([nodes["f_e"], nodes["f_g"]])
def test_build_join_tree(self, huang_darwiche_dag):
def priority_func_override(node):
introduced_arcs = 0
cluster = [node] + node.neighbours
for node_a, node_b in combinations(cluster, 2):
if node_a not in node_b.neighbours:
assert node_b not in node_a.neighbours
introduced_arcs += 1
introduced_arcs_dict = {
"f_h": [introduced_arcs, 0],
"f_g": [introduced_arcs, 1],
"f_c": [introduced_arcs, 2],
"f_b": [introduced_arcs, 3],
"f_d": [introduced_arcs, 4],
"f_e": [introduced_arcs, 5],
"others": [introduced_arcs, 10],
}
if node.name in introduced_arcs_dict:
return introduced_arcs_dict[node.name]
return introduced_arcs_dict["others"]
jt = build_join_tree(huang_darwiche_dag, priority_func_override)
for node in jt.sepset_nodes:
assert {n.clique for n in node.neighbours} == {node.sepset.X, node.sepset.Y}
# clique nodes.
def test_initialize_potentials(self, huang_darwiche_jt, huang_darwiche_dag):
# Seems like there can be multiple assignments so
# for this test we will set the assignments explicitely
cliques = {node.name: node for node in huang_darwiche_jt.nodes}
bbn_nodes = {node.name: node for node in huang_darwiche_dag.nodes}
assignments = {
cliques["Clique_ACE"]: [bbn_nodes["f_c"], bbn_nodes["f_e"]],
cliques["Clique_ABD"]: [
bbn_nodes["f_a"],
bbn_nodes["f_b"],
bbn_nodes["f_d"],
],
}
huang_darwiche_jt.initialize_potentials(assignments, huang_darwiche_dag)
for node in huang_darwiche_jt.sepset_nodes:
for v in node.potential_tt.values():
assert v == 1
# Note that in H&D there are two places that show
# initial potentials, one is for ABD and AD
# and the second is for ACE and CE
# We should test both here but we must enforce
# the assignments above because alternate and
# equally correct Junction Trees will give
# different potentials.
def r(x):
return round(x, 3)
tt = cliques["Clique_ACE"].potential_tt
assert r(tt[("a", True), ("c", True), ("e", True)]) == 0.21
assert r(tt[("a", True), ("c", True), ("e", False)]) == 0.49
assert r(tt[("a", True), ("c", False), ("e", True)]) == 0.18
assert r(tt[("a", True), ("c", False), ("e", False)]) == 0.12
assert r(tt[("a", False), ("c", True), ("e", True)]) == 0.06
assert r(tt[("a", False), ("c", True), ("e", False)]) == 0.14
assert r(tt[("a", False), ("c", False), ("e", True)]) == 0.48
assert r(tt[("a", False), ("c", False), ("e", False)]) == 0.32
tt = cliques["Clique_ABD"].potential_tt
assert r(tt[("a", True), ("b", True), ("d", True)]) == 0.225
assert r(tt[("a", True), ("b", True), ("d", False)]) == 0.025
assert r(tt[("a", True), ("b", False), ("d", True)]) == 0.125
assert r(tt[("a", True), ("b", False), ("d", False)]) == 0.125
assert r(tt[("a", False), ("b", True), ("d", True)]) == 0.180
assert r(tt[("a", False), ("b", True), ("d", False)]) == 0.020
assert r(tt[("a", False), ("b", False), ("d", True)]) == 0.150
assert r(tt[("a", False), | |
initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g,
``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set
to :obj:`True`.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a Flax checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
_fast_init(:obj:`bool`, `optional`, defaults to `:obj:`True`):
Whether or not to disable fast initialization.
torch_dtype (:obj:`str` or :obj:`torch.dtype`, `optional`):
Override the default ``torch.dtype`` and load the model under this dtype. If ``"auto"`` is passed the
dtype will be automatically derived from the model's weights.
.. warning::
One should only disable `_fast_init` to ensure backwards compatibility with
``transformers.__version__ < 4.6.0`` for seeded model initialization. This argument will be removed
at the next major version. See `pull request 11471
<https://github.com/huggingface/transformers/pull/11471>`__ for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
.. note::
Activate the special `"offline-mode"
<https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled
environment.
Examples::
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = BertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
>>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
>>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
from_flax = kwargs.pop("from_flax", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
_fast_init = kwargs.pop("_fast_init", True)
torch_dtype = kwargs.pop("torch_dtype", None)
from_pt = not (from_tf | from_flax)
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from | |
= 0.17 )
########################################################################
# Plotting funcitons that dont have supported dependencies
########################################################################
def get_power_spectrum_RProfile(yprof_path, rprof_path, r0, t_lim=None, t_res=None, l_max=6):
'''
Calculates power spectra of radial velocity
Parameters
----------
yprof_path/rprof_path: strings
paths for matching rprofile and yprofile files
r0: float
radius to examine
t_lim: 2 index array
[t_min, t_max], data to look at
t_res: int
set number of data points between [t_min, t_max], None = ndumps
l_max:
maximum spherical harmonic degree l
Returns
--------
t,l,power : time, spherical harmonic degree l, and power vectors
'''
yprof = ppm.yprofile(yprof_path, filename_offset=-1)
n_buckets = 80
rp_set = rprofile.rprofile_set(rprof_path)
n_dumps = rp_set.dumps[-1]
if t_lim is None:
t_min = rp_set.get_dump(1).get('time')
t_max = rp_set.get_dump(n_dumps).get('time')
t_lim = [t_min, t_max]
if t_res is None:
t_res = n_dumps
t = np.linspace(t_lim[0], t_lim[1], t_res)
l = np.arange(0, l_max + 1)
power = np.zeros((len(t), l_max + 1))
for i in range(len(t)):
dump = int(yprof.get('Ndump', fname=t[i], numtype='t', silent=True)[-1])
if dump > n_dumps:
dump = n_dumps
rp = rp_set.get_dump(dump)
r = rp.get_table('y')
idx0 = np.argmin(np.abs(r - r0))
vx = rp.get_table('ux')
vy = rp.get_table('uy')
vz = rp.get_table('uz')
centers = rp.get_centers()
lat = np.zeros(n_buckets)
lon = np.zeros(n_buckets)
vr = np.zeros(n_buckets)
for bucket in range(n_buckets):
x = centers[0, bucket]
y = centers[1, bucket]
z = centers[2, bucket]
bucket_r = (x**2 + y**2 + z**2)**0.5
lat[bucket] = 90. - (180./np.pi)*np.arccos(z/bucket_r)
lon[bucket] = (180./np.pi)*np.arctan2(y, x)
r_norm = np.array([x, y, z])/bucket_r
v = np.array([vx[0, idx0, bucket+1], \
vy[0, idx0, bucket+1], \
vz[0, idx0, bucket+1]])
vr[bucket] = np.dot(v, r_norm)
coeffs, _ = SHExpandLSQ(vr, lat, lon, l_max)
power[i, :] = spectrum(coeffs, convention='power', unit='per_l')
return t, l, power
def plot_power_spectrum_RProfile(t, l, power, ifig=1, title='', vmin=1e-2, vmax=1.):
'''
Plots power spectra of radial velocity
Parameters
----------
t,l,power : arrays
time, sperical harmonic degree and power generated by
get_power_spectrum_RProfile()
title : string
title
'''
ifig = ifig; pl.close(ifig); pl.figure(ifig, figsize=(8., 5.), dpi=125)
extent = (t[0]/60., t[-1]/60., l[0] - 0.5, l[-1] + 0.5)
aspect = 0.5*(extent[1] - extent[0])/(extent[3] - extent[2])
max_power = np.max(power)
norm = LogNorm(vmin=vmin*max_power, vmax=vmax*max_power, clip=True)
#norm = Normalize(vmin=0., vmax=max_power, clip=True)
pl.imshow(np.transpose(np.abs(power)), origin='lower', extent=extent, aspect=aspect, \
interpolation='nearest', norm=norm, cmap='viridis')
cb = pl.colorbar()
cb.set_label('m$^2$ (s$^2$ l)$^{-1}$')
pl.xlabel('t / min')
pl.ylabel('l')
pl.title(title, y=1.025)
ax0= pl.gca()
ax0.get_yaxis().set_tick_params(direction='out')
ax0.get_xaxis().set_tick_params(direction='out')
def bucket_map(rprofile, quantity, limits = None, ticks = None, file_name = None, time = None):
'''
Plots a Mollweide projection of the rprofile object using the mpl_toolkits.basemap package
Parameters
-----------
rprofile: rprofile object
rprofile dump used just for geometry
quantity: array
data to be passed into the projection
limits: 2 index array
cmap limits, scale the colormap for limit[0] =min to
limit[1] =max
ticks:
passed into matplotlib.colors.ColorbarBase see ColourbarBase
file_name: string
file name: '/path/filename' to save the image as
time: float
time to display as the title
'''
q = quantity#rp.get_table(quantity)[0, :, :]
#r = rp.get('y')
#idx_r0 = np.argmin(np.abs(r - r0))
corners = rprofile.get_corners()
corners_per_bucket = corners.shape[1]
n_buckets = corners.shape[2]
points_per_side = 10
points_per_bucket = corners_per_bucket*points_per_side
x = np.zeros((n_buckets, points_per_bucket))
y = np.zeros((n_buckets, points_per_bucket))
z = np.zeros((n_buckets, points_per_bucket))
t = np.linspace(1., 0., num = points_per_side)
for i in range(n_buckets):
for k in range(corners_per_bucket):
idx_range = list(range(points_per_side*k, points_per_side*(k + 1)))
x[i, idx_range] = t*corners[0, k - 1, i] + (1. - t)*corners[0, k, i]
y[i, idx_range] = t*corners[1, k - 1, i] + (1. - t)*corners[1, k, i]
z[i, idx_range] = t*corners[2, k - 1, i] + (1. - t)*corners[2, k, i]
radius = (x**2 + y**2 + z**2)**0.5
phi = np.arctan2(y, x)
theta = np.pi/2. - np.arccos(z/radius)
eps = 1e-3
for i in range(phi.shape[0]):
for k in range(phi.shape[1] - 1):
# if the vertex k is at one of the poles
if (np.abs(theta[i, k] - 0.5*np.pi) < eps or
np.abs(theta[i, k] + 0.5*np.pi) < eps):
if (theta[i, k] == theta[i, k - 1] and
phi[i, k] == phi[i, k - 1]):
phi[i, k - 1] = phi[i, k - 2]
phi[i, k] = phi[i, k + 1]
# A first estimate of how many rows will be needed. We need more
# than n_buckets, because we have to slice the polygons that
# lie at the boundaries of the plot.
n_rows_est = int(np.round(1.25*n_buckets))
phi2 = np.zeros((n_rows_est, points_per_bucket))
theta2 = np.zeros((n_rows_est, points_per_bucket))
value = np.zeros(n_rows_est)
n_rows = 0
for i in range(n_buckets):
# Add more rows if necessary.
if n_rows >= phi2.shape[0]:
n_rows_add = int(np.round(0.25*phi2.shape[0]))
phi2 = np.vstack((phi2, np.zeros((n_rows_add, points_per_bucket))))
theta2 = np.vstack((theta2, np.zeros((n_rows_add, points_per_bucket))))
value = np.append(value, np.zeros(n_rows_add))
this_phi = np.copy(phi[i, :])
this_theta = np.copy(theta[i, :])
this_value = q[i]# np.log10(q[idx_r0, i])
if not (np.min(this_phi) < -0.5*np.pi and np.max(this_phi) > 0.5*np.pi):
# This polygon doesn't touch the boundaries of the plot. Original
# coordinates can be used directly.
phi2[n_rows, :] = this_phi
theta2[n_rows, :] = this_theta
value[n_rows] = this_value
n_rows += 1
else:
# This polygon lies on the boundary of the plot. We have to slice into
# two polygons -- one on the left side of the plot and on on the right.
# First add the one on the right.
this_phi2 = np.copy(this_phi)
for k in range(points_per_bucket):
if this_phi2[k] <= -0.:
this_phi2[k] = np.pi
phi2[n_rows, :] = this_phi2
theta2[n_rows, :] = this_theta
value[n_rows] = this_value
n_rows += 1
# Now add the one on the left.
this_phi2 = np.copy(this_phi)
for k in range(points_per_bucket):
if this_phi2[k] >= 0.:
this_phi2[k] = -np.pi
phi2[n_rows, :] = this_phi2
theta2[n_rows, :] = this_theta
value[n_rows] = this_value
n_rows += 1
# Trim the arrays to the actual size of the data.
if n_rows < phi2.shape[0]:
phi2 = phi2[0:n_rows, :]
theta2 = theta2[0:n_rows, :]
value = value[0:n_rows]
#ifig = 1; plt.close(ifig); fig = plt.figure(ifig, figsize = (9, 4))
#ifig = 1; plt.close(ifig); fig = plt.figure(ifig, figsize = (3.39, 2.4))
pl.clf()
gs = gridspec.GridSpec(2, 1, height_ratios = [12, 1])
ax0 = pl.subplot(gs[0])
ax1 = pl.subplot(gs[1])
m = Basemap(projection = 'moll', lon_0 = 0., ax = ax0)
cmap_min = np.min(quantity)
cmap_max = np.max(quantity)
cmap_avg = np.sum(quantity)/float(len(quantity))
if limits is not None:
cmap_min = limits[0]
cmap_max = limits[1]
cmap_avg = 0.5*(cmap_min + cmap_max)
if len(limits) > 2:
cmap_avg = limits[2]
cmap_avg_rel = (cmap_avg - cmap_min)/(cmap_max - cmap_min)
gamma = 1.0
c1 = np.array([95, 158, 209])/255.
c2 = np.array([255, 255, 255])/255.
c3 = np.array([200, 82, 0])/255.
cmap_points = {'red': ((0.0, 0.0, c1[0]),
(cmap_avg_rel**gamma, c2[0], c2[0]),
(1.0, c3[0], 0.0)),
'green': ((0.0, 0.0, c1[1]),
(cmap_avg_rel**gamma, c2[1], c2[1]),
(1.0, c3[1], 0.0)),
'blue': ((0.0, 0.0, c1[2]),
(cmap_avg_rel**gamma, c2[2], c2[2]),
(1.0, c3[2], 0.0))
}
cmap = LinearSegmentedColormap('my_cmap', cmap_points, gamma = gamma)
#cmap_name = 'gist_earth_r'
#cmap = plt.get_cmap(cmap_name)
for i in range(phi2.shape[0]):
t = (value[i] - cmap_min)/(cmap_max - cmap_min)
if t < 0: t = 0.
if t > 1: t = 1.
facecolor = cmap(t)
x, y = m((180./np.pi)*phi2[i, :], (180./np.pi)*theta2[i, :])
xy = list(zip(x, y))
poly = Polygon(xy, facecolor = facecolor, edgecolor = facecolor, lw = 0.25)
ax0.add_patch(poly)
#m.drawmapboundary(color = 'k', linewidth = 1.5)
m.drawmapboundary(color = 'k', fill_color = 'none', zorder = 10000)
#ax0.set_title(cmap_name)
if time is not None:
ax0.set_title('t = {:.0f} min'.format(time/60.))
def fmt(x, pos):
return '{: .2f}'.format(x)
norm = matplotlib.colors.Normalize(vmin = cmap_min, vmax = cmap_max)
cb = ColorbarBase(ax1, cmap = cmap, norm = norm, ticks = ticks, \
format = ticker.FuncFormatter(fmt), orientation='horizontal')
cb.set_label(r'$\Delta$r$_\mathrm{ub}$ / Mm')
#ropplt.tight_layout(h_pad = 2.)
pl.show()
if file_name is not None:
#plt.savefig(file_name + '_' + cmap_name + '.pdf', bbox_inches = 'tight', facecolor = 'w', dpi = 332.7)
pl.savefig(file_name, bbox_inches = 'tight', facecolor = 'w', dpi = 332.7)
def plot_Mollweide(rp_set, dump_min, dump_max, r1, r2, output_dir = None, Filename = None, ifig = 2):
'''
Plot Mollweide spherical projection plot
Parameters
----------
dump_min/dump_max = int
Range of file numbers you want to use in the histogram
r1/r2 = float
This function will only search for the convective
boundary in the range between r1/r2
ouput_dir: string
path to output directory
filename: string
name for output file, None: no output
Examples
--------
data_path = "/rpod2/PPM/RProfiles/AGBTP_M2.0Z1.e-5/F4"
rp_set = rprofile.rprofile_set(data_path)
plot_Mollweide(rp_set, 100,209,7.4,8.4)
'''
pl.close(ifig); fig = pl.figure(ifig, figsize = (3.384, 2.))
fig.patch.set_facecolor('w')
fig.patch.set_alpha(1.)
dr_ub_avg = np.zeros(80)
n = 0
| |
ract in replayactions:
item = getitem(ract, "addChatItemAction", "item", "liveChatTextMessageRenderer")
if item:
addchatitem(item)
item = getitem(act, "addChatItemAction", "item", "liveChatTextMessageRenderer")
if item:
addchatitem(item)
return cmtlist, ms
def monitorchat(self, js):
while True:
cont = getitem(js, "continuationContents", "liveChatContinuation", "continuations", 0, "invalidationContinuationData", "continuation")
respjson = self.yt.getlivechat(cont)
if self.args.debug:
print("============ comment req")
print(respjson.decode('utf-8'))
print()
js = json.loads(respjson)
cmtlist, newms = self.extractchat(js)
for author, time, comment in cmtlist:
print("--->", time, author)
print(extracttext(comment))
sys.stdout.flush()
import time
time.sleep(1)
class CommentReader:
"""
class which can recursively print comments
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.contclick, self.xsrf = self.getcommentinfo(cfg)
def recursecomments(self, cc=None, level=0):
if not cc:
cc = self.contclick
while cc:
cmtjson = self.yt.getcomments(cc, self.xsrf, replies=(level>0))
if self.args.debug:
print("============ comment req")
print(cmtjson.decode('utf-8'))
print()
if not cmtjson:
raise Exception("empty response")
js = json.loads(cmtjson)
if type(js)==list:
# this is for 'replies', which return an array instead of a dict as the top-level response.
js = getitem(js, ("response",))
cmtlist, cc = self.extractcomments(js)
for author, when, comment, likes, replies, subcc in cmtlist:
if self.args.verbose:
print("---" * (level+1) + ">", "%s ; %s ; %s likes ; %s replies" % (author, when, likes, replies))
else:
print("---" * (level+1) + ">", author)
print(extracttext(comment))
if subcc:
self.recursecomments(subcc, level+1)
def getcommentinfo(self, cfg):
"""
Find the base parameters for querying the video's comments.
"""
item = getitem(cfg, "initdata", "contents", "twoColumnWatchNextResults", "results", "results", "contents")
cont = getcontinuation(getitem(item, ("itemSectionRenderer",), "itemSectionRenderer"))
xsrftoken = getitem(cfg, "ytcfg", "XSRF_TOKEN")
xsrffield = getitem(cfg, "ytcfg", "XSRF_FIELD_NAME")
xsrfdict = { xsrffield: xsrftoken } if xsrftoken else {}
return cont, xsrfdict
def getcomment(self, p):
"""
Return info for a single comment.
"""
if "commentThreadRenderer" in p:
p = p["commentThreadRenderer"]
c = p
r = p
if "comment" in c:
c = c["comment"]
if "commentRenderer" in c:
c = c["commentRenderer"]
if "replies" in r:
r = r["replies"]
author = getitem(c, "authorText", "simpleText")
content = getitem(c, "contentText")
likes = getitem(c, "likeCount")
nrreplies = getitem(c, "replyCount")
when = extracttext(getitem(c, "publishedTimeText"))
replies = getitem(r, "commentRepliesRenderer")
if replies:
cont = getcontinuation(replies)
else:
cont = None
return author, when, content, int(likes or 0), int(nrreplies or 0), cont
def extractcomments(self, js):
"""
Extract a list of comments from comment dictionary
"""
p = getitem(js, "response", "continuationContents")
if not p:
print("non contents found in continuation")
return [], None
if "itemSectionContinuation" in p:
p = p["itemSectionContinuation"]
elif "commentRepliesContinuation" in p:
p = p["commentRepliesContinuation"]
cmtlist = []
contents = p.get("contents")
if contents:
for c in contents:
cmtlist.append(self.getcomment(c))
# header.commentsHeaderRenderer -> commentsCount at same level as 'contents'
return cmtlist, getcontinuation(p)
class SearchReader:
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def getresults(self, js):
ct = getitem(js, "contents", "twoColumnSearchResultsRenderer", "primaryContents", "sectionListRenderer", "contents")
if not ct:
ct = getitem(js, "onResponseReceivedCommands", 0, "appendContinuationItemsAction", "continuationItems")
resultlist = getitem(ct, ("itemSectionRenderer",), "itemSectionRenderer", "contents")
cont = getitem(ct, ("continuationItemRenderer",), "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token")
return resultlist, cont
def recursesearch(self):
resultlist, cont = self.getresults(getitem(self.cfg, "initdata"))
while True:
for item in resultlist:
if video := item.get("videoRenderer"):
vid = getitem(video, "videoId")
pub = getitem(video, "publishedTimeText", "simpleText")
title = getitem(video, "title")
# title -> runs
# descriptionSnippet -> runs
# publishedTimeText -> simpleText
# lengthText -> simpleText
# viewCountText -> simpleText
# ownerText -> runs
print("%s - %s" % (vid, extracttext(title)))
elif chan := item.get("channelRenderer"):
cid = getitem(chan, "channelId")
title = getitem(chan, "title", "simpleText")
# "videoCountText" -> runs
# subscriberCountText -> simpleText
# descriptionSnippet -> runs
print("%s - %s" % (cid, title))
jstext = self.yt.getsearch(cont)
js = json.loads(jstext)
resultlist, cont = self.getresults(js)
class DetailReader:
"""
Extract some details for a video from the config.
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def output(self):
vd = getitem(self.cfg, "initplayer", "videoDetails")
mf = getitem(self.cfg, "initplayer", "microformat", "playerMicroformatRenderer")
twocol = getitem(self.cfg, "initdata", "contents", "twoColumnWatchNextResults", "results", "results", "contents")
sentiment = getitem(twocol, ("videoPrimaryInfoRenderer",), "videoPrimaryInfoRenderer", "sentimentBar", "sentimentBarRenderer", "tooltip")
if not mf:
print("microformat not found")
return
vc = int(mf.get("viewCount"))
ls = cvseconds(mf.get("lengthSeconds"))
pd = cvdate(mf.get("publishDate"))
ud = cvdate(mf.get("uploadDate"))
desc = getitem(mf, "description", "simpleText")
vid = vd.get("videoId")
title = getitem(mf, "title", "simpleText")
owner = getitem(mf, "ownerChannelName")
print("%s - %s" % (vid, title))
print("By: %s" % (owner))
print()
print("viewcount: %d, length: %s, sentiment: %s, published: %s%s" % (vc, ls, sentiment, pd, "" if pd==ud else ", uploaded at: %s" % ud))
print()
print("%s" % desc)
print()
class SubtitleReader:
"""
class which can print a video's subtitles
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def languagematches(self, language, ct):
"""
Match a captionTrack record to the language filter.
"""
if language == 'asr' and ct.get('kind') == 'asr':
return True
if ct["name"]["simpleText"] == language:
return True
if ct["languageCode"] == language:
return True
def output(self):
js = getitem(self.cfg, "initplayer")
p = getitem(js, "captions", "playerCaptionsTracklistRenderer", "captionTracks")
if not p:
print("no subtitles found")
return
captiontracks = p
# filter subtitles based on language
if self.args.language:
captiontracks = self.filtertracks(self.args.language, captiontracks)
for ct in captiontracks:
if len(captiontracks) > 1:
print("### %s ###" % ct["name"]["simpleText"])
self.outputsubtitles(ct["baseUrl"])
if len(captiontracks) > 1:
print()
def filtertracks(self, language, captiontracks):
matchedtracks = defaultdict(list)
for ct in captiontracks:
if not self.languagematches(language, ct):
continue
matchedtracks[ct["languageCode"]].append(ct)
filteredlist = []
for lang, tracks in matchedtracks.items():
if len(tracks) > 1:
# prefer non automated translation
tracks = filter(lambda ct:ct.get("kind") != "asr", tracks)
filteredlist.extend(tracks)
return filteredlist
def outputsubtitles(self, cturl):
ttxml = self.yt.httpreq(cturl)
if self.args.debug:
print("========== timedtext xml")
print(ttxml.decode('utf-8'))
print()
tt = self.extractxmltext(ttxml)
if self.args.srt:
self.output_srt(tt)
elif self.args.verbose:
for t0, t1, txt in tt:
print("%s %s" % (self.formattime(t0), txt))
else:
for t0, t1, txt in tt:
print(txt)
@staticmethod
def formattime(t):
m = int(t/60) ; t -= 60*m
h = int(m/60) ; m -= 60*h
return "%d:%02d:%06.3f" % (h, m, t)
@staticmethod
def srttime(t):
return SubtitleReader.formattime(t).replace('.', ',')
@staticmethod
def output_srt(tt):
n = 1
for t0, t1, txt in tt:
print(n)
print("%s --> %s" % (SubtitleReader.srttime(t0), SubtitleReader.srttime(t1)))
print(txt)
print()
@staticmethod
def unhtml(htmltext):
"""
Removes html font tags, and decodes html entities
"""
return html.unescape(re.sub(r'</?font[^>]*>', '', htmltext))
def extractxmltext(self, xml):
"""
Returns a list of tuples: time, endtime, text
"""
lines = []
tstart = None
tend = None
text = None
def handle_begin_element(elem, attr):
nonlocal text, tstart, tend
if elem == 'text':
text = ""
tstart = float(attr.get('start'))
tend = tstart + float(attr.get('dur'))
def handle_end_element(elem):
nonlocal text
if elem == 'text':
lines.append((tstart, tend, self.unhtml(text)))
text = None
def handle_data(data):
nonlocal text
if text is not None:
text += data
parser = ParserCreate()
parser.StartElementHandler = handle_begin_element
parser.EndElementHandler = handle_end_element
parser.CharacterDataHandler = handle_data
parser.Parse(xml, 1)
return lines
class PlaylistReader:
"""
class which can print a playlist's contents.
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def output(self):
# ==== [ 'playlistVideoRenderer', 1, 'contents', 'playlistVideoListRenderer', 0, 'contents', 'itemSectionRenderer', 0, 'contents', 'sectionListRenderer', 'content', 'tabRenderer', 0, 'tabs', 'twoColumnBrowseResultsRenderer', 'contents', 'response', 1]
# ==== ['gridVideoRenderer', 1, 'items', 'horizontalListRenderer', 'content', 'shelfRenderer', 0,
# 'contents', 'itemSectionRenderer', 1, 'contents', 'sectionListRenderer', 'content', 'tabRenderer', 0,
# 'tabs', 'twoColumnBrowseResultsRenderer', 'contents', 'response', 1]
playlist = getitem(self.cfg, "initdata", "contents", "twoColumnWatchNextResults", "playlist")
if playlist:
print("Title: %s" % getitem(playlist, "playlist", "title"))
for entry in getitem(playlist, "playlist", "contents"):
vid = getitem(entry, "playlistPanelVideoRenderer", "videoId")
title = getitem(entry, "playlistPanelVideoRenderer", "title", "simpleText")
length = getitem(entry, "playlistPanelVideoRenderer", "lengthText", "simpleText")
if args.verbose:
print("%s - %s %s" % (vid, length, title))
else:
print("%s - %s" % (vid, title))
return
tabs = getitem(self.cfg, "initdata", "contents", "twoColumnBrowseResultsRenderer", "tabs", 0, "tabRenderer", "content")
ct1 = getitem(tabs, "sectionListRenderer", "contents", 0, "itemSectionRenderer", "contents", 0)
playlist = getitem(ct1, "playlistVideoListRenderer")
list_tag = "contents"
entry_tag = "playlistVideoRenderer"
if not playlist:
playlist = getitem(ctl, "shelfRenderer", "content", 'horizontalListRenderer')
list_tag = "items"
entry_tag = "gridVideoRenderer"
if playlist:
cont = None
for entry in playlist[list_tag]:
vid = getitem(entry, entry_tag, "videoId")
title = getitem(entry, entry_tag, "title")
if vid and title:
print("%s - %s" % (vid, extracttext(title)))
c = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token")
if c:
cl = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "clickTrackingParams")
cont = c, cl
if not cont:
cont = getcontinuation(playlist)
while cont:
browsejson = self.yt.browse(cont)
if | |
<filename>weak_strong_convergence_revision.py
from l96 import l96
from l96 import l96_jacobian
from l96 import alpha
from l96 import rho
import numpy as np
import pickle
import ipdb
import sys
########################################################################################################################
# Euler-Murayama path
def em_step_path(x, xi, h, args):
"""This will propagate the state x one step forward by euler-murayama
step size is h and the weiner process is assumed to have a scalar diffusion coefficient"""
# unpack the arguments for the integration step
[f, diffusion] = args
# rescale the standard normal to variance h
W = xi * np.sqrt(h)
# step forward by interval h
x_step = x + h * l96(x, f) + diffusion * W
return x_step
########################################################################################################################
# Stochastic Runge-Kutta, 4 step
# This is the four step runge kutta scheme for stratonovich calculus, described in Hansen and Penland 2005
# The rule has strong convergence order 1
def rk_step_path(x, xi, h, args):
"""One step of integration rule for l96 4 step stratonovich runge kutta
Here it is assumed that the Brownian motion is given a priori, and we wish to reconstruc the path"""
# unpack the arguments
[f, diffusion] = args
# rescale the standard normal to variance h
W = xi * np.sqrt(h)
# Define the four terms of the RK scheme recursively
k1 = l96(x, f) * h + diffusion * W
k2 = l96(x + .5 * k1, f) * h + diffusion * W
k3 = l96(x + .5 * k2, f) * h + diffusion * W
k4 = l96(x + k3, f) * h + diffusion * W
return x + (1 / 6) * (k1 + 2*k2 + 2*k3 + k4)
########################################################################################################################
# 2nd order strong taylor SDE step
# This method is derived from page 359, NUMERICAL SOLUTIONS OF STOCHASTIC DIFFERENTIAL EQUATIONS, KLOEDEN & PLATEN;
# this uses the approximate statonovich integrals defined on page 202
# this depends on rho and alpha as above
def ty_step_path(x, xi, h, args):
"""One step of integration rule for l96 second order taylor rule
Here it is assumed that the Brownian motion is given a priori, and we wish to reconstruct it using this
discretization scheme"""
# Infer system dimension
sys_dim = len(x)
# unpack the args for the integration step
# note that a and b are computed directly via the brownian bridge process, up to a truncation of b. This is
# performed outside of the integration step for this conceptual only simulation
[a, b, p, f, diffusion] = args
# Compute the deterministic dxdt and the jacobian equations
dx = l96(x, f)
Jac_x = l96_jacobian(x)
# vector of first order Stratonovich integrals
J_pdelta = (h / 2) * (np.sqrt(h) * xi + a)
def Psi(l, j):
# psi will be a generic function of the indicies l and j, we will define psi plus and psi minus via this
psi = h**2 * xi[l] * xi[j] / 3 + h**(1.5) * (xi[l] * a[j] + xi[j] * a[l]) / 4 + h * a[l] * a[j] / 2 \
- h**(1.5) * (xi[l] * b[j] + xi[j] * b[l]) / (2 * np.pi)
return psi
# we define the approximations of the second order Stratonovich integral
psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim) for i in range(sys_dim)])
psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim) for i in range(sys_dim)])
# the final vectorized step forward is given as
x_step = x + dx * h + h**2 * .5 * Jac_x @ dx # deterministic taylor step
x_step += diffusion * np.sqrt(h) * xi # stochastic euler step
x_step += diffusion * Jac_x @ J_pdelta # stochastic first order taylor step
x_step += diffusion**2 * (psi_plus - psi_minus) # stochastic second order taylor step
return x_step
########################################################################################################################
# auxiliary function to compute the fourier coefficients of the brownian bridge
def compute_a_b(w, fine_delta, coarse_delta, p):
# the array w is the data over which we compute the brownian bridge
# Delta is the time step of the fine scale Brownian process
# p is the order of truncation of the Brownian bridge process
[sys_dim, n_steps] = np.shape(w)
# compute the cumulative brownian paths
w = np.cumsum(w, axis=1)
# and the weighted vector of the final value, with respect to tau / coarse_delta
W_tau_delta = np.tile( np.linspace( 1 / n_steps, 1, n_steps), [sys_dim, 1]).transpose() * np.squeeze(w[:, -1])
# the brownian bridge is computed at each point
bridge = (w.transpose() - W_tau_delta).transpose()
# compute a directly from the zeroth order fourier coefficient, via the definition of the right reimann sum
a = (2 / coarse_delta) * np.sum( bridge * fine_delta, axis=1)
# we compute the b by the sin fourier components, up to the truncation at p
b = np.zeros(sys_dim)
for r in range(1, p+1):
# define the matrix of the sin coefficients for the discretized brownian bridge
sin_cof = np.sin( 2 * np.pi * r * np.linspace( 1 / n_steps, 1, n_steps) )
sin_cof = np.tile(sin_cof, [sys_dim, 1])
b += (1 / r) * (2 / coarse_delta) * np.sum( bridge * sin_cof * fine_delta, axis=1)
return a, b
########################################################################################################################
def experiment(args):
"""This experiment will test strong convergence of the EM, RK and order 2.0 Taylor scheme.
We will initialize a fine realization of the EM scheme with discretization step of .5X10^(20) as the basis for
comparison. We run the EM, RK and Taylor at coarser discretizations to test at what order they match the above"""
####################################################################################################################
# we load the arguments for the experiment, including intiial condition, diffusion coefficient, forcing f, and
# inital seed
[x_init, sample_num, diff, p, f] = args
sys_dim = len(x_init)
# number of ensemble members generated from the initial condition
N_ens = 2
# T is the time horizon, this should be order .5 or less
T = 0.125
# Delta is the base for the coarse and fine integration step sizes
Delta = .5
# gamma is the order of exponent to which the fine integration step size
# will be taken, defined by Delta**gamma
gamma = 18
# fine scale integration step for the benchmark euler-maruyama scheme
fine_Delta = Delta**gamma
# beta controls the how fine the coarse simulation is --- note that gamma
# must be greater than 2 X beta for the second order scheme to benchmark
# agains the first order scheme
beta = 9
# coarse integration step sizes, we
dt = Delta**np.arange(5, beta + 1)
# storage for comparison between simulations
ty = np.zeros([len(dt), N_ens, sys_dim])
em = np.zeros([len(dt), N_ens, sys_dim])
rk = np.zeros([len(dt), N_ens, sys_dim])
truth = np.ones([N_ens, sys_dim])
for N in range(N_ens):
# reset the seed over the ensemble number for independent noise sequences from the same initial condition
# seed = N
#np.random.seed(seed)
# we compute the integration recursively in the state x, reinitialize with the same state but different
# noise realizations
x = x_init
W = np.zeros([sys_dim, int(round(T / fine_Delta))])
for i in range(int(round(T / fine_Delta))):
# step forward the reference path
w = np.random.standard_normal(sys_dim)
x = em_step_path(x, w, fine_Delta, [f, diff])
W[:, i] = w
# we store the true solution only at the final time when we make the analysis of the absolute difference
truth[N, :] = x
# then with respect to the N-th ensemble, noise process
for i in range(len(dt)):
# cycle through the coarse grained discretizations
h = dt[i]
# define the number of discretization points in the coarse scale, over forecast time 0.5
nanl = int(round(T / h))
# define the coarse time windows of length t_w
t_w = int(len(W[0, :]) / nanl)
# initialize the euler maruyama, taylor and runge-kutta schemes
x_em = x_init
x_ty = x_init
x_rk = x_init
for k in range(nanl):
# define the coarse brownian increments over the time windows
# of length t_w, but rescaled to standard normals
xi = np.sum( W[:, k * | |
<filename>CMS/calibration_rhod.py<gh_stars>0
#! /usr/bin/env python3
from argparse import ArgumentParser
import csv
from datetime import datetime
import logging
import json
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
import optuna
import cmsmodel
import hatmodel_ug_rhod
import hatmodel_mw
builders = {
"hatmodel_ug_rhod": hatmodel_ug_rhod.build_model,
"hatmodel_mw": hatmodel_mw.build_model
}
import clr
# Normally I would do this down in if __name__ == "__main__", but we need the path to compartments[.exe] for the clr.AddReference call below.
parser = ArgumentParser()
# The default value here will work if the .NET assembly "compartments" is in the PYTHONPATH.
# If you are using the pycms docker container, this will be the case. Note that the default value
# doesn't have ".exe" at the end of it.
parser.add_argument("-c", "--compartments", default="bin/compartments", help="Specify full path to compartments.exe")
parser.add_argument("-b", "--database", type=str, default="sqlite:///hat-studies.db", help="Study database name ['hat-studies.db']")
parser.add_argument("-d", "--data", type=Path, default=Path("HAT_ug_Tbr_calibr.csv"), help="Target case data for calibration")
parser.add_argument("-m", "--model", type=str, default="hatmodel_ug_rhod", help=f"model name - [{', '.join(list(builders.keys()))}]")
parser.add_argument("-n", "--name", type=str, default="default", help="Study name ['default']")
parser.add_argument("-p", "--png", action="store_true", help="Save output to a .png file")
parser.add_argument("-r", "--repetitions", type=int, default=3, help="Number of CMS solver repetitions per trial")
parser.add_argument("-s", "--surveillance", type=float, default=0.0833, help="Estimated surveillance rate")
parser.add_argument("-t", "--trials", type=int, default=10, help="Number of trials in study [10]")
parser.add_argument("-v", "--verbosity", type=str, default="INFO", help="{DEBUG|INFO|WARN|ERROR|CRITICAL}")
parser.add_argument("-w", "--working", type=Path, default=None, help="Working directory path")
parser.add_argument("-y", "--years", type=int, default=45, help="Number of years in simulation [45]")
args = parser.parse_args()
args.working = args.working if args.working else Path.cwd().absolute() / "studies" / f"{datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M')}-{args.name}"
logger = logging.getLogger(__name__)
logger.info(f"Loading CMS from {args.compartments}")
clr.AddReference(args.compartments)
from compartments.emodl import EmodlLoader
from compartments import Configuration as cfg
from compartments.emod.utils import SolverFactory as solvers
def configure_logging(level: str = "INFO", directory: Path = None):
levels = {
"DEBUG": logging.DEBUG, # 10
"INFO": logging.INFO, # 20
"WARN": logging.WARN, # 30
"WARNING": logging.WARNING, # 30
"ERROR": logging.ERROR, # 40
"CRITICAL": logging.CRITICAL, # 50
"FATAL": logging.FATAL # 50
}
level = levels[level] if isinstance(level, str) else int(level)
# logger = logging.getLogger() # did this at module scope above
logger.setLevel(level)
console = logging.StreamHandler()
console.setLevel(level)
formatter = logging.Formatter("%(levelname)s:%(message)s")
console.setFormatter(formatter)
logger.addHandler(console)
logfile = (directory / f"{__file__}-{datetime.now():%Y%m%d-%H%M%S}.log").absolute()
print(f"Logging to {logfile}")
disk = logging.FileHandler(logfile)
disk.setLevel(level)
disk.setFormatter(formatter)
logger.addHandler(disk)
return
args.working.mkdir(exist_ok=True)
configure_logging(args.verbosity, args.working)
df = pd.read_csv(args.data)
case_data = np.array(df.groupby("Year")["New_HAT_cases"].sum(), dtype=np.uint32)
def objective(trial):
beta_h = trial.suggest_uniform('beta_h', 0.0001, 0.001)
config = {
"solver": "TAU",
# "prng_seed": 20201025, # use this for stochasticity: datetime.now().microsecond
"prng_seed": datetime.now().microsecond,
"tau-leaping": {
"epsilon": 0.001,
"Nc": 10,
"Multiple": 10.0,
"SSARuns": 100
}
}
cfg.CurrentConfiguration = cfg.ConfigurationFromString(json.dumps(config))
model_name = args.model.lower()
if model_name in builders:
model_description = builders[model_name](beta_h, **{
"human-susceptible":7_642-204-240,
"human-infectious-one":204,
"human-infectious-two": 240,
"tsetse-susceptible":7_642*6.56*(1-0.006818574),
"tsetse-infectious":7_642*6.56*0.006818574,
"reservoir-sd-susceptible":7_642*0.06*(1-0.013547841),
"reservoir-sd-infectious":7_642*0.06*0.013547841,
"reservoir-rd-susceptible":7_642*0.16*(1-0.011662679),
"reservoir-rd-infectious":7_642*0.16*0.011662679,
"reservoir-sw-susceptible":7_642*(0.06/2)*(1-0.017688), #later do the ratio as fitted, currently just setting to 1/2 domestic #s
"reservoir-sw-infectious":7_642*(0.06/2)*0.017688, #later do the ratio as fitted, currently just setting to 1/2 domestic #s
"reservoir-rw-susceptible":7_642*(0.16/2)*(1-0.021505376),#later do the ratio as fitted, currently just setting to 1/2 domestic #s
"reservoir-rw-infectious":7_642*(0.16/2)*0.021505376,#later do the ratio as fitted, currently just setting to 1/2 domestic #s
"non-reservoir-hosts":7_642/10})
else:
raise RuntimeError(f"Model{model_name}is not a known model.")
model_info = load_model(model_description)
# Create an SSA solver - could just specify "SSA" or "TAU" here. Run for 3650 time units, record 3650 intermediate states of the system.
args.repetitions = 3
args.years = 45
sim_duration = args.years*365 + 1 # 5 years + 1 more day
num_samples = sim_duration
t_start = datetime.now()
solver = solvers.CreateSolver(config["solver"], model_info, args.repetitions, sim_duration, num_samples)
t_create = datetime.now()
logger.info(f"{t_create - t_start} for creating the CMS solver.")
solver.Solve() # Run the solver
t_solve = datetime.now()
logger.info(f"{t_solve - t_create} for solving the model {args.repetitions} times ({sim_duration} simulated days).")
datafile = args.working / f"trajectories-{trial.number:03}.csv"
save_trajectories_to_file(solver, datafile)
save_plots(solver, trial, args.working)
# extract relevant trajectories
trajectories = []
data = solver.GetTrajectoryData() # Retrieve the recorded data (all observables, see build_model())
max_sample = num_samples - 1
extra_data = max_sample - (args.years * 365)
print(f"Ignoring {extra_data} additional samples") if extra_data else None
year_indices = np.array(range(args.years+1))*365
for index, label in enumerate(solver.GetTrajectoryLabels()):
if label.startswith("human-infection-cumulative"):
trajectory = np.array(list(data[index]), dtype=np.uint32)
trajectory = trajectory[year_indices]
# transform cumulative counts to incidence (t - t-1)
trajectory[1:] -= trajectory[0:-1]
trajectories.append(trajectory)
# aggregate counts by year
trajectories = np.array(trajectories)
# take mean of all trajectories
mean = trajectories[0, :] if trajectories.shape[0] == 1 else np.mean(trajectories, axis=0)
# fitness is sum(squares) last N years
num_years_data = len(case_data)
fitness = case_data - (mean[-num_years_data:] * args.surveillance)
fitness *= fitness
score = np.sum(fitness)
return score
def build_model(beta_h = 0.002, **kwargs):
# See hatmodel.md
model = cmsmodel.CmsModel("hat")
# The "odd" construction of `kwargs[...] if ... in kwargs else 0` allows you to selectively specify
# some initial populations in the call to build_model() and those values will be used to initialize
# the population of those species. If you do not specify a value, the initial population will be 0.
species = [
{"name":"human-susceptible", "population": kwargs["human-susceptible"] if "human-susceptible" in kwargs else 0, "observe":True},
{"name":"human-exposed", "population": kwargs["human-exposed"] if "human-exposed" in kwargs else 0, "observe":True},
{"name":"human-infectious-one", "population": kwargs["human-infectious-one"] if "human-infectious-one" in kwargs else 0, "observe":True},
{"name":"human-infectious-two", "population": kwargs["human-infectious-two"] if "human-infectious-two" in kwargs else 0, "observe":True},
{"name":"human-recovered", "population": kwargs["human-recovered"] if "human-recovered" in kwargs else 0, "observe":True},
{"name":"human-infection-cumulative", "population": kwargs["human-infection-cumulative"] if "human-infection-cumulative" in kwargs else 0, "observe":True},
{"name":"human-dead", "population": kwargs["human-dead"] if "human-dead" in kwargs else 0, "observe":True},
{"name":"tsetse-susceptible", "population": kwargs["tsetse-susceptible"] if "tsetse-susceptible" in kwargs else 0, "observe":True},
{"name":"tsetse-exposed", "population": kwargs["tsetse-exposed"] if "tsetse-exposed" in kwargs else 0, "observe":True},
{"name":"tsetse-infectious", "population": kwargs["tsetse-infectious"] if "tsetse-infectious" in kwargs else 0, "observe":True},
{"name":"tsetse-non-susceptible", "population": kwargs["tsetse-non-susceptible"] if "tsetse-non-susceptible" in kwargs else 0, "observe":True},
{"name":"reservoir-sd-susceptible", "population": kwargs["reservoir-sd-susceptible"] if "reservoir-sd-susceptible" in kwargs else 0, "observe":True},
{"name":"reservoir-sd-exposed", "population": kwargs["reservoir-sd-exposed"] if "reservoir-sd-exposed" in kwargs else 0, "observe":True},
{"name":"reservoir-sd-infectious", "population": kwargs["reservoir-sd-infectious"] if "reservoir-sd-infectious" in kwargs else 0, "observe":True},
{"name":"reservoir-sd-recovered", "population": kwargs["reservoir-sd-recovered"] if "reservoir-sd-recovered" in kwargs else 0, "observe":True},
{"name":"reservoir-rd-susceptible", "population": kwargs["reservoir-rd-susceptible"] if "reservoir-rd-susceptible" in kwargs else 0, "observe":True},
{"name":"reservoir-rd-exposed", "population": kwargs["reservoir-rd-exposed"] if "reservoir-rd-exposed" in kwargs else 0, "observe":True},
{"name":"reservoir-rd-infectious", "population": kwargs["reservoir-rd-infectious"] if "reservoir-rd-infectious" in kwargs else 0, "observe":True},
{"name":"reservoir-rd-recovered", "population": kwargs["reservoir-rd-recovered"] if "reservoir-rd-recovered" in kwargs else 0, "observe":True},
{"name":"reservoir-sw-susceptible", "population": kwargs["reservoir-sw-susceptible"] if "reservoir-sw-susceptible" in kwargs else 0, "observe":True},
{"name":"reservoir-sw-exposed", "population": kwargs["reservoir-sw-exposed"] if "reservoir-sw-exposed" in kwargs else 0, "observe":True},
{"name":"reservoir-sw-infectious", "population": kwargs["reservoir-sw-infectious"] if "reservoir-sw-infectious" in kwargs else 0, "observe":True},
{"name":"reservoir-sw-recovered", "population": kwargs["reservoir-sw-recovered"] if "reservoir-sw-recovered" in kwargs else 0, "observe":True},
{"name":"reservoir-rw-susceptible", "population": kwargs["reservoir-rw-susceptible"] if "reservoir-rw-susceptible" in kwargs else 0, "observe":True},
{"name":"reservoir-rw-exposed", "population": kwargs["reservoir-rw-exposed"] if "reservoir-rw-exposed" in kwargs else 0, "observe":True},
{"name":"reservoir-rw-infectious", "population": kwargs["reservoir-rw-infectious"] if "reservoir-rw-infectious" in kwargs else 0, "observe":True},
{"name":"reservoir-rw-recovered", "population": kwargs["reservoir-rw-recovered"] if "reservoir-rw-recovered" in kwargs else 0, "observe":True},
{"name":"non-reservoir-hosts", "population": kwargs["non-reservoir-hosts"] if "non-reservoir-hosts" in kwargs else 0, "observe":True}
]
def _add_species(name: str, population: int, observe: bool):
model.add_species(name, population, observe)
for specie in species:
_add_species(**specie)
parameters = [
{"name":"sigma-h", "value":0.083333333}, # incubation rate (human E->I1)
{"name":"phi-h", "value":1/23}, # progression from human I1->I2
{"name":"omega-h", "value":1/(50-23)}, # rate of progression from I2-> death
{"name":"beta-v", "value":0.212}, # beta for tsetse fly infection from infectious human or reservoir animal
{"name":"p-human-feed", "value":0.05}, # probability of human feed
#{"name":"p-reservoir-feed", "value":0.85}, # probability of reservoir host feed
{"name":"sigma-v", "value":0.06}, # incubation rate (tsetse E->I)
{"name":"mu-v", "value":0.03846154}, # tsetse fly mortality rate
{"name":"treatment-one", "value":0}, # probability a human is treated in stage 1
{"name":"treatment-two", "value":0.083333333}, # probability a human is treated in stage 2
# _not_ from the paper referenced above
{"name":"p-feed", "value":1.0/3}, # probability of feeding in a given 24 hours
{"name":"beta-h", "value":beta_h}, # beta for human infection by infectious tsetse fly
{"name":"beta-r", "value":0.1345}, # beta for reservoir infection by infectious tsetse fly; FIT
{"name":"phi-r-s", "value":0.14285714}, # reservoir incubation rate, swine
{"name":"phi-r-r", "value":0.083333333}, # reservoir incubation rate, ruminants
{"name":"omega-r-nt-s", "value":1/182.5}, # reservoir recovery rate with treatment (assume no recovery otherwise); assume treatment q6 months. FIT
{"name":"omega-r-nt-r", "value":1/225}, # reservoir recovery rate with treatment (assume no recovery otherwise); assume treatment q6 months. FIT
{"name":"omega-r-t", "value":1/91.25}, # reservoir recovery rate without treatment (assume no recovery otherwise); assume treatment q6 months. FIT
{"name":"treatment-reservoir-d", "value":0.5}, # probability domestic reservoir is treated (assume lifelong infection otherwise)
{"name":"treatment-reservoir-w", "value":0}, # probability wild reservoir is treated (assume lifelong infection otherwise)
{"name":"mu-h", "value":0.00053}, # human mortality rate
{"name":"mu-r-sd", "value":0.001369863}, # reservoir mortality rate
{"name":"mu-r-rd", "value":0.000176757}, # reservoir mortality rate
{"name":"mu-r-sw", "value":0.000156556}, # reservoir mortality rate
{"name":"mu-r-rw", "value":0.000182648}, # reservoir mortality rate
{"name":"wane_immunity", "value":1/50} # same for animals and humans
]
def _add_parameter(name: str, value: float):
model.add_parameter(name, value)
| |
u'AA', u'AE', u'AF', u'AG', u'AO', u'CI',
u'E8', u'IA', u'MA'])
),
Element(
u'AAA04', Properties(
desc=u'Follow-up Action Code', req_sit=u'R',
data_type=(u'ID', u'1', u'1'), position=4,
codes=[u'C', u'N', u'R', u'W', u'X', u'Y'])
),
),
parsed_271_2115D,
Segment(
u'MSG', Properties(
syntax=u'C0302', position=u'2500', req_sit=u'S', repeat=u'10',
desc=u'Message Text'),
Element(
u'MSG01', Properties(
desc=u'Free-form Message Text', req_sit=u'R',
data_type=(u'AN', u'1', u'264'), position=1,
codes=[])
),
Element(
u'MSG02', Properties(
desc=u'Printer Carriage Control Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=2,
codes=[])
),
Element(
u'MSG03',
Properties(
desc=u'Number', req_sit=u'N', data_type=(u'N0', u'1', u'9'),
position=3,
codes=[])
),
), Segment(
u'III', Properties(
syntax=u'P0102 L030405', position=u'2600', req_sit=u'S',
repeat=u'1',
desc=u'Dependent Eligibility or Benefit Additional '
u'Information'),
Element(
u'III01',
Properties(
desc=u'Code List Qualifier Code', req_sit=u'S',
data_type=(u'ID', u'1', u'3'), position=1,
codes=[u'GR', u'NI', u'ZZ'])
),
Element(
u'III02', Properties(
desc=u'Industry Code', req_sit=u'S',
data_type=(u'AN', u'1', u'30'), position=2,
codes=[])
),
Element(
u'III03', Properties(
desc=u'Code Category', req_sit=u'S',
data_type=(u'ID', u'2', u'2'), position=3,
codes=[u'44'])
),
Element(
u'III04', Properties(
desc=u'Free-form Message Text', req_sit=u'S',
data_type=(u'AN', u'1', u'264'), position=4,
codes=[])
),
Element(
u'III05', Properties(
desc=u'Quantity', req_sit=u'N',
data_type=(u'R', u'1', u'15'), position=5,
codes=[])
),
Composite(
u'C001', Properties(
req_sit=u'N', repeat='', refdes='', seq=u'06',
desc=u'Composite Unit of Measure'),
),
Element(
u'III07',
Properties(
desc=u'Surface/Layer/Position Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=7,
codes=[])
),
Element(
u'III08',
Properties(
desc=u'Surface/Layer/Position Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=8,
codes=[])
),
Element(
u'III09',
Properties(
desc=u'Surface/Layer/Position Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=9,
codes=[])
),
),
)
parsed_271_2100D = Loop(
u'2100D',
Properties(
position=u'0300', looptype='', repeat=u'1', req_sit=u'R', desc=u'Dependent Name'),
Segment(
u'NM1',
Properties(
syntax=u'P0809 C1110 C1203', position=u'0300', req_sit=u'R', repeat=u'1',
desc=u'Dependent Name'),
Element(
u'NM101', Properties(
desc=u'Entity Identifier Code', req_sit=u'R',
data_type=(u'ID', u'2', u'3'), position=1,
codes=[u'03'])
),
Element(
u'NM102', Properties(
desc=u'Entity Type Qualifier', req_sit=u'R',
data_type=(u'ID', u'1', u'1'), position=2,
codes=[u'1'])
),
Element(
u'NM103', Properties(
desc=u'Name Last or Organization Name', req_sit=u'S',
data_type=(u'AN', u'1', u'60'), position=3,
codes=[])
),
Element(
u'NM104',
Properties(
desc=u'Name First', req_sit=u'S', data_type=(u'AN', u'1', u'35'),
position=4,
codes=[])
),
Element(
u'NM105',
Properties(
desc=u'Name Middle', req_sit=u'S', data_type=(u'AN', u'1', u'25'),
position=5,
codes=[])
),
Element(
u'NM106',
Properties(
desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN', u'1', u'10'),
position=6,
codes=[])
),
Element(
u'NM107',
Properties(
desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN', u'1', u'10'),
position=7,
codes=[])
),
Element(
u'NM108', Properties(
desc=u'Identification Code Qualifier', req_sit=u'N',
data_type=(u'ID', u'1', u'2'), position=8,
codes=[])
),
Element(
u'NM109', Properties(
desc=u'Identification Code', req_sit=u'N',
data_type=(u'AN', u'2', u'80'), position=9,
codes=[])
),
Element(
u'NM110', Properties(
desc=u'Entity Relationship Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=10,
codes=[])
),
Element(
u'NM111', Properties(
desc=u'Entity Identifier Code', req_sit=u'N',
data_type=(u'ID', u'2', u'3'), position=11,
codes=[])
),
Element(
u'NM112', Properties(
desc=u'Name Last or Organization Name', req_sit=u'N',
data_type=(u'AN', u'1', u'60'), position=12,
codes=[])
),
),
Segment(
u'REF', Properties(
syntax=u'R0203', position=u'0400', req_sit=u'S', repeat=u'9',
desc=u'Dependent Additional Identification'),
Element(
u'REF01', Properties(
desc=u'Reference Identification Qualifier', req_sit=u'R',
data_type=(u'ID', u'2', u'3'), position=1,
codes=[u'18', u'1L', u'1W', u'49', u'6P', u'CE', u'CT',
u'EA', u'EJ', u'F6', u'GH', u'HJ', u'IF', u'IG',
u'MRC', u'N6', u'NQ', u'Q4', u'SY', u'Y4'])
),
Element(
u'REF02', Properties(
desc=u'Reference Identification', req_sit=u'R',
data_type=(u'AN', u'1', u'50'), position=2,
codes=[])
),
Element(
u'REF03',
Properties(
desc=u'Description', req_sit=u'S', data_type=(u'AN', u'1', u'80'),
position=3,
codes=[])
),
Composite(
u'C040', Properties(
req_sit=u'N', repeat='', refdes='', seq=u'04',
desc=u'Reference Identifier'),
),
),
Segment(
u'N3', Properties(
syntax='', position=u'0600', req_sit=u'S', repeat=u'1',
desc=u'Dependent Address'),
Element(
u'N301', Properties(
desc=u'Address Information', req_sit=u'R',
data_type=(u'AN', u'1', u'55'), position=1,
codes=[])
),
Element(
u'N302', Properties(
desc=u'Address Information', req_sit=u'S',
data_type=(u'AN', u'1', u'55'), position=2,
codes=[])
),
),
Segment(
u'N4',
Properties(
syntax=u'E0207 C0605 C0704', position=u'0700', req_sit=u'S', repeat=u'1',
desc=u'Dependent City, State, ZIP Code'),
Element(
u'N401',
Properties(
desc=u'City Name', req_sit=u'R', data_type=(u'AN', u'2', u'30'),
position=1,
codes=[])
),
Element(
u'N402', Properties(
desc=u'State or Province Code', req_sit=u'S',
data_type=(u'ID', u'2', u'2'), position=2,
codes=[])
),
Element(
u'N403',
Properties(
desc=u'Postal Code', req_sit=u'S', data_type=(u'ID', u'3', u'15'),
position=3,
codes=[])
),
Element(
u'N404',
Properties(
desc=u'Country Code', req_sit=u'S', data_type=(u'ID', u'2', u'3'),
position=4,
codes=[])
),
Element(
u'N405', Properties(
desc=u'Location Qualifier', req_sit=u'N',
data_type=(u'ID', u'1', u'2'), position=5,
codes=[])
),
Element(
u'N406', Properties(
desc=u'Location Identifier', req_sit=u'N',
data_type=(u'AN', u'1', u'30'), position=6,
codes=[])
),
Element(
u'N407', Properties(
desc=u'Country Subdivision Code', req_sit=u'S',
data_type=(u'ID', u'1', u'3'), position=7,
codes=[])
),
),
Segment(
u'AAA', Properties(
syntax='', position=u'0850', req_sit=u'S', repeat=u'9',
desc=u'Dependent Request Validation'),
Element(
u'AAA01', Properties(
desc=u'Yes/No Condition or Response Code', req_sit=u'R',
data_type=(u'ID', u'1', u'1'), position=1,
codes=[u'N', u'Y'])
),
Element(
u'AAA02', Properties(
desc=u'Agency Qualifier Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=2,
codes=[])
),
Element(
u'AAA03', Properties(
desc=u'Reject Reason Code', req_sit=u'R',
data_type=(u'ID', u'2', u'2'), position=3,
codes=[u'15', u'35', u'42', u'43', u'45', u'47', u'48',
u'49', u'51', u'52', u'56', u'57', u'58', u'60',
u'61', u'62', u'63', u'64', u'65', u'66', u'67',
u'68', u'71', u'77'])
),
Element(
u'AAA04', Properties(
desc=u'Follow-up Action Code', req_sit=u'R',
data_type=(u'ID', u'1', u'1'), position=4,
codes=[u'C', u'N', u'R', u'S', u'W', u'X', u'Y'])
),
),
Segment(
u'PRV', Properties(
syntax=u'P0203', position=u'0900', req_sit=u'S', repeat=u'1',
desc=u'Provider Information'),
Element(
u'PRV01',
Properties(
desc=u'Provider Code', req_sit=u'R', data_type=(u'ID', u'1', u'3'),
position=1,
codes=[u'AD', u'AT', u'BI', u'CO', u'CV', u'H', u'HH', u'LA', u'OT',
u'P1', u'P2', u'PC', u'PE', u'R', u'RF', u'SK', u'SU'])
),
Element(
u'PRV02', Properties(
desc=u'Reference Identification Qualifier', req_sit=u'S',
data_type=(u'ID', u'2', u'3'), position=2,
codes=[u'PXC'])
),
Element(
u'PRV03', Properties(
desc=u'Reference Identification', req_sit=u'S',
data_type=(u'AN', u'1', u'50'), position=3,
codes=[])
),
Element(
u'PRV04', Properties(
desc=u'State or Province Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=4,
codes=[])
),
Composite(
u'C035', Properties(
req_sit=u'N', repeat='', refdes='', seq=u'05',
desc=u'Provider Specialty Information'),
),
Element(
u'PRV06', Properties(
desc=u'Provider Organization Code', req_sit=u'N',
data_type=(u'ID', u'3', u'3'), position=6,
codes=[])
),
),
Segment(
u'DMG',
Properties(
syntax=u'P0102 P1011 C1105', position=u'1000', req_sit=u'S', repeat=u'1',
desc=u'Dependent Demographic Information'),
Element(
u'DMG01', Properties(
desc=u'Date Time Period Format Qualifier', req_sit=u'S',
data_type=(u'ID', u'2', u'3'), position=1,
codes=[u'D8'])
),
Element(
u'DMG02', Properties(
desc=u'Date Time Period', req_sit=u'S',
data_type=(u'AN', u'1', u'35'), position=2,
codes=[])
),
Element(
u'DMG03',
Properties(
desc=u'Gender Code', req_sit=u'S', data_type=(u'ID', u'1', u'1'),
position=3,
codes=[u'F', u'M', u'U'])
),
Element(
u'DMG04', Properties(
desc=u'Marital Status Code', req_sit=u'N',
data_type=(u'ID', u'1', u'1'), position=4,
codes=[])
),
Composite(
u'C056', Properties(
req_sit=u'N', repeat=u'10', refdes='', seq=u'05',
desc=u'Composite Race or Ethnicity Information'),
),
Element(
u'DMG06', Properties(
desc=u'Citizenship Status Code', req_sit=u'N',
data_type=(u'ID', u'1', u'2'), position=6,
codes=[])
),
Element(
u'DMG07',
Properties(
desc=u'Country Code', req_sit=u'N', data_type=(u'ID', u'2', u'3'),
position=7,
codes=[])
),
Element(
u'DMG08', Properties(
desc=u'Basis of Verification Code', req_sit=u'N',
data_type=(u'ID', u'1', u'2'), position=8,
codes=[])
),
Element(
u'DMG09',
Properties(
desc=u'Quantity', req_sit=u'N', data_type=(u'R', u'1', u'15'),
position=9,
codes=[])
),
Element(
u'DMG10', Properties(
desc=u'Code List Qualifier Code', req_sit=u'N',
data_type=(u'ID', u'1', u'3'), position=10,
codes=[])
),
Element(
u'DMG11',
Properties(
desc=u'Industry Code', req_sit=u'N', data_type=(u'AN', u'1', u'30'),
position=11,
codes=[])
),
),
Segment(
u'INS', Properties(
syntax=u'P1112', position=u'1100', req_sit=u'S', repeat=u'1',
desc=u'Dependent Relationship'),
Element(
u'INS01', Properties(
desc=u'Yes/No Condition or Response Code', req_sit=u'R',
data_type=(u'ID', u'1', u'1'), position=1,
codes=[u'N'])
),
Element(
u'INS02', Properties(
desc=u'Individual Relationship Code', req_sit=u'R',
data_type=(u'ID', u'2', u'2'), position=2,
codes=[u'01', u'19', u'20', u'21', u'39', u'40', u'53',
u'G8'])
),
Element(
u'INS03', Properties(
desc=u'Maintenance Type Code', req_sit=u'S',
data_type=(u'ID', u'3', u'3'), position=3,
codes=[u'001'])
),
Element(
u'INS04', Properties(
desc=u'Maintenance Reason Code', req_sit=u'S',
data_type=(u'ID', u'2', u'3'), position=4,
codes=[u'25'])
),
Element(
u'INS05', Properties(
desc=u'Benefit Status Code', req_sit=u'N',
data_type=(u'ID', u'1', u'1'), position=5,
codes=[])
),
Composite(
u'C052', Properties(
req_sit=u'N', repeat='', refdes='', seq=u'06',
desc=u'Medicare Status Code'),
),
Element(
u'INS07', Properties(
desc=u'Consolidated Omnibus Budget Reconciliation Act (COBRA) Qualifying',
req_sit=u'N', data_type=(u'ID', u'1', u'2'), position=7,
codes=[])
),
Element(
u'INS08', Properties(
desc=u'Employment Status Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=8,
codes=[])
),
Element(
u'INS09', Properties(
desc=u'Student Status Code', req_sit=u'N',
data_type=(u'ID', u'1', u'1'), position=9,
codes=[])
),
Element(
u'INS10', Properties(
desc=u'Yes/No Condition or Response Code', req_sit=u'N',
data_type=(u'ID', u'1', u'1'), position=10,
codes=[])
),
Element(
u'INS11', Properties(
desc=u'Date Time Period Format Qualifier', req_sit=u'N',
data_type=(u'ID', u'2', u'3'), position=11,
codes=[])
),
Element(
u'INS12', Properties(
desc=u'Date Time Period', req_sit=u'N',
data_type=(u'AN', u'1', u'35'), position=12,
codes=[])
),
Element(
u'INS13', Properties(
desc=u'Confidentiality Code', req_sit=u'N',
data_type=(u'ID', u'1', u'1'), position=13,
codes=[])
),
Element(
u'INS14',
Properties(
desc=u'City Name', req_sit=u'N', data_type=(u'AN', u'2', u'30'),
position=14,
codes=[])
),
Element(
u'INS15', Properties(
desc=u'State or Province Code', req_sit=u'N',
data_type=(u'ID', u'2', u'2'), position=15,
codes=[])
),
Element(
u'INS16',
Properties(
desc=u'Country Code', req_sit=u'N', data_type=(u'ID', u'2', u'3'),
position=16,
codes=[])
),
Element(
u'INS17',
Properties(
desc=u'Number', req_sit=u'S', data_type=(u'N0', u'1', u'9'),
position=17,
codes=[])
),
),
Segment(
u'HI', Properties(
syntax='', position=u'1150', req_sit=u'S', repeat=u'1',
desc=u'Dependent Health Care Diagnosis Code'),
Composite(
u'C022', Properties(
req_sit=u'R', repeat='', refdes='', seq=u'01',
desc=u'Health Care Code Information'),
Element(
u'HI01-01', Properties(
desc=u'Code List Qualifier Code', req_sit=u'R',
data_type=(u'ID', u'1', u'3'), position=0,
codes=[u'ABK', u'BK'])
),
Element(
u'HI01-02', Properties(
desc=u'Industry Code', req_sit=u'R',
data_type=(u'AN', u'1', u'30'), position=1,
codes=[])
),
Element(
u'HI01-03',
Properties(
desc=u'Date Time Period Format Qualifier', req_sit=u'N',
data_type=(u'ID', u'2', u'3'), position=2,
codes=[])
),
Element(
u'HI01-04', Properties(
desc=u'Date Time Period', req_sit=u'N',
data_type=(u'AN', u'1', u'35'), position=3,
codes=[])
),
Element(
u'HI01-05', Properties(
desc=u'Monetary Amount', req_sit=u'N',
data_type=(u'R', u'1', u'18'), position=4,
codes=[])
),
| |
import logging
from collections import Counter
from pathlib import Path
from typing import Dict, List, Optional, Set, Union
import jsonschema
import pyam
import pydantic
import yaml
from nomenclature.codelist import PYAM_AGG_KWARGS
from nomenclature.definition import DataStructureDefinition
from nomenclature.error.region import (
ModelMappingCollisionError,
RegionNameCollisionError,
RegionNotDefinedError,
ExcludeRegionOverlapError,
)
from nomenclature.processor.utils import get_relative_path
from pyam import IamDataFrame
from pyam.logging import adjust_log_level
from pydantic import BaseModel, root_validator, validate_arguments, validator
from pydantic.error_wrappers import ErrorWrapper
from pydantic.types import DirectoryPath, FilePath
AGG_KWARGS = PYAM_AGG_KWARGS + ["region-aggregation"]
logger = logging.getLogger(__name__)
here = Path(__file__).parent.absolute()
class NativeRegion(BaseModel):
"""Defines a model native region.
Can optionally have a renaming attribute which is applied in the region processing.
Attributes
----------
name : str
Name of the model native region.
rename: Optional[str]
Optional second name that the region will be renamed to.
"""
name: str
rename: Optional[str]
@property
def target_native_region(self) -> str:
"""Returns the resulting name, i.e. either rename or, if not given, name.
Returns
-------
str
Resulting name.
"""
return self.rename if self.rename is not None else self.name
class CommonRegion(BaseModel):
"""Common region used for model intercomparison.
Parameters
----------
name : str
Name of the common region.
constituent_regions:
List of strings which refer to the original (not renamed, see
:class:`NativeRegion`) names of model native regions.
"""
name: str
constituent_regions: List[str]
class RegionAggregationMapping(BaseModel):
"""Holds information for region processing on a per-model basis.
Region processing is comprised of native region selection and potentially renaming
as well as aggregation to "common regions" (regions used for reporting and
comparison by multiple models).
Attributes
----------
model: str
Name of the model for which RegionAggregationMapping is defined.
file: FilePath
File path of the mapping file. Saved mostly for error reporting purposes.
native_regions: Optional[List[NativeRegion]]
Optionally, list of model native regions to select and potentially rename.
common_regions: Optional[List[CommonRegion]]
Optionally, list of common regions where aggregation will be performed.
"""
model: List[str]
file: FilePath
native_regions: Optional[List[NativeRegion]]
common_regions: Optional[List[CommonRegion]]
exclude_regions: Optional[List[str]]
@validator("model", pre=True)
def convert_to_list(cls, v):
return pyam.utils.to_list(v)
@validator("native_regions")
def validate_native_regions(cls, v, values):
target_names = [nr.target_native_region for nr in v]
duplicates = [
item for item, count in Counter(target_names).items() if count > 1
]
if duplicates:
# Raise the custom RegionNameCollisionError and give the parameters
# duplicates and file.
raise RegionNameCollisionError(
location="native regions", duplicates=duplicates, file=values["file"]
)
return v
@validator("common_regions")
def validate_common_regions(cls, v, values):
names = [cr.name for cr in v]
duplicates = [item for item, count in Counter(names).items() if count > 1]
if duplicates:
raise RegionNameCollisionError(
location="common regions", duplicates=duplicates, file=values["file"]
)
return v
@root_validator(skip_on_failure=True)
def check_native_or_common_regions(cls, values):
# Check that we have at least one of the two: native and common regions
if (
values.get("native_regions") is None
and values.get("common_regions") is None
):
raise ValueError(
"At least one of the two: 'native_regions', 'common_regions' must be"
f"given in {values['file']}"
)
return values
@root_validator(skip_on_failure=True)
def check_illegal_renaming(cls, values):
"""Check if any renaming overlaps with common regions"""
# Skip if only either native-regions or common-regions are specified
if values.get("native_regions") is None or values.get("common_regions") is None:
return values
native_region_names = {
nr.target_native_region for nr in values["native_regions"]
}
common_region_names = {cr.name for cr in values["common_regions"]}
overlap = list(native_region_names & common_region_names)
if overlap:
raise RegionNameCollisionError(
location="native and common regions",
duplicates=overlap,
file=values["file"],
)
return values
@root_validator(skip_on_failure=True)
def check_exclude_native_region_overlap(cls, values):
return _check_exclude_region_overlap(values, "native_regions")
@root_validator(skip_on_failure=True)
def check_exclude_common_region_overlap(cls, values):
return _check_exclude_region_overlap(values, "common_regions")
@classmethod
def from_file(cls, file: Union[Path, str]):
"""Initialize a RegionAggregationMapping from a file.
Parameters
----------
file : Union[Path, str]
Path to a yaml file which contains region aggregation information for one
model.
Returns
-------
RegionAggregationMapping
The resulting region aggregation mapping.
Raises
------
jsonschema.ValidationError
Raised in case there are any errors in the provided yaml mapping file.
Notes
-----
This function is used to convert a model mapping yaml file into a dictionary
which is used to initialize a RegionAggregationMapping.
"""
SCHEMA_FILE = here / "../validation_schemas" / "region_mapping_schema.yaml"
file = Path(file) if isinstance(file, str) else file
with open(file, "r") as f:
mapping_input = yaml.safe_load(f)
with open(SCHEMA_FILE, "r") as f:
schema = yaml.safe_load(f)
# Validate the input data using jsonschema
try:
jsonschema.validate(mapping_input, schema)
except jsonschema.ValidationError as e:
# Add file information in case of error
raise jsonschema.ValidationError(
f"{e.message} in {get_relative_path(file)}"
)
# Add the file name to mapping_input
mapping_input["file"] = get_relative_path(file)
# Reformat the "native_regions"
if "native_regions" in mapping_input:
native_region_list: List[Dict] = []
for nr in mapping_input["native_regions"]:
if isinstance(nr, str):
native_region_list.append({"name": nr})
elif isinstance(nr, dict):
native_region_list.append(
{"name": list(nr)[0], "rename": list(nr.values())[0]}
)
mapping_input["native_regions"] = native_region_list
# Reformat the "common_regions"
if "common_regions" in mapping_input:
common_region_list: List[Dict[str, List[Dict[str, str]]]] = []
for cr in mapping_input["common_regions"]:
cr_name = list(cr)[0]
common_region_list.append(
{
"name": cr_name,
"constituent_regions": cr[cr_name],
}
)
mapping_input["common_regions"] = common_region_list
return cls(**mapping_input)
@property
def all_regions(self) -> List[str]:
# For the native regions we take the **renamed** (if given) names
nr_list = [x.target_native_region for x in self.native_regions or []]
return nr_list + self.common_region_names
@property
def model_native_region_names(self) -> List[str]:
# List of the **original** model native region names
return [x.name for x in self.native_regions or []]
@property
def common_region_names(self) -> List[str]:
# List of the **original** model native region names
return [x.name for x in self.common_regions or []]
@property
def rename_mapping(self) -> Dict[str, str]:
return {r.name: r.target_native_region for r in self.native_regions or []}
def validate_regions(self, dsd: DataStructureDefinition) -> None:
if hasattr(dsd, "region"):
invalid = [c for c in self.all_regions if c not in dsd.region]
if invalid:
raise RegionNotDefinedError(region=invalid, file=self.file)
class RegionProcessor(BaseModel):
"""Region aggregation mappings for scenario processing"""
mappings: Dict[str, RegionAggregationMapping]
@classmethod
@validate_arguments
def from_directory(cls, path: DirectoryPath):
"""Initialize a RegionProcessor from a directory of model-aggregation mappings.
Parameters
----------
path : DirectoryPath
Directory which holds all the mappings.
Returns
-------
RegionProcessor
The resulting region processor object.
Raises
------
ModelMappingCollisionError
Raised in case there are multiple mappings defined for the same model.
"""
mapping_dict: Dict[str, RegionAggregationMapping] = {}
errors: List[ErrorWrapper] = []
for file in (f for f in path.glob("**/*") if f.suffix in {".yaml", ".yml"}):
try:
mapping = RegionAggregationMapping.from_file(file)
for m in mapping.model:
if m not in mapping_dict:
mapping_dict[m] = mapping
else:
errors.append(
ErrorWrapper(
ModelMappingCollisionError(
model=m,
file1=mapping.file,
file2=mapping_dict[m].file,
),
"__root__",
)
)
except (pydantic.ValidationError, jsonschema.ValidationError) as e:
errors.append(ErrorWrapper(e, "__root__"))
if errors:
raise pydantic.ValidationError(errors, model=RegionProcessor)
return cls(mappings=mapping_dict)
def validate_mappings(self, dsd: DataStructureDefinition) -> None:
"""Check if all mappings are valid and collect all errors."""
errors = []
for mapping in self.mappings.values():
try:
mapping.validate_regions(dsd)
except RegionNotDefinedError as rnde:
errors.append(ErrorWrapper(rnde, f"mappings -> {mapping.model}"))
if errors:
raise pydantic.ValidationError(errors, model=self.__class__)
def apply(self, df: IamDataFrame, dsd: DataStructureDefinition) -> IamDataFrame:
"""Apply region processing
Parameters
----------
df : IamDataFrame
Input data that the region processing is applied to
dsd : DataStructureDefinition
Used for region validation and variable information for performing region
processing
Returns
-------
IamDataFrame
Processed data
Raises
------
ValueError
* If *df* contains regions that are not listed in the model mapping, or
* If the region-processing results in an empty **IamDataFrame**.
"""
processed_dfs: List[IamDataFrame] = []
for model in df.model:
model_df = df.filter(model=model)
# If no mapping is defined the data frame is returned unchanged
if model not in self.mappings:
logger.info(f"No model mapping found for model {model}")
processed_dfs.append(model_df)
# Otherwise we first rename, then aggregate
else:
# before aggregating, check that all regions are valid
self.mappings[model].validate_regions(dsd)
logger.info(
f"Applying region-processing for model {model} from file "
f"{self.mappings[model].file}"
)
# Check for regions not mentioned in the model mapping
_check_unexpected_regions(model_df, self.mappings[model])
_processed_dfs = []
# Silence pyam's empty filter warnings
with adjust_log_level(logger="pyam", level="ERROR"):
# Rename
if self.mappings[model].native_regions is not None:
_df = model_df.filter(
region=self.mappings[model].model_native_region_names
)
if not _df.empty:
_processed_dfs.append(
_df.rename(region=self.mappings[model].rename_mapping)
)
# Aggregate
if self.mappings[model].common_regions is not None:
vars = self._filter_dict_args(model_df.variable, dsd)
vars_default_args = [
var for var, kwargs in vars.items() if not kwargs
]
# TODO skip if required weight does not exist
vars_kwargs = {
var: kwargs
for var, kwargs in vars.items()
if var not in vars_default_args
}
for cr in self.mappings[model].common_regions:
regions = [cr.name, cr.constituent_regions]
# First, perform 'simple' aggregation (no arguments)
_processed_dfs.append(
model_df.aggregate_region(vars_default_args, *regions)
)
# Second, special weighted aggregation
for var, kwargs in vars_kwargs.items():
if "region-aggregation" not in kwargs:
_df = _aggregate_region(
model_df,
var,
*regions,
**kwargs,
)
if _df is not None and not _df.empty:
_processed_dfs.append(_df)
else:
for rename_var in kwargs["region-aggregation"]:
for _rename, _kwargs in rename_var.items():
_df | |
os.path.exists(fig_path)):
os.makedirs(fig_path)
plt.savefig(os.path.join(fig_path, fig_name),
pad_inches=1,
bbox_inches='tight')
plt.close(fig)
def variable_genes(adata,
show_texts=False,
n_texts=10,
size=8,
text_size=10,
pad=1.08,
w_pad=None,
h_pad=None,
fig_size=(4, 4),
save_fig=None,
fig_path=None,
fig_name='plot_variable_genes.pdf',
**kwargs):
"""Plot highly variable genes.
Parameters
----------
adata : `Anndata`
Annotated data matrix.
show_texts : `bool`, optional (default: False)
If True, text annotation will be shown.
n_texts : `int`, optional (default: 10)
The number of texts to plot.
size : `int`, optional (default: 8)
The marker size
text_size : `int`, optional (default: 10)
The text size
pad: `float`, optional (default: 1.08)
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad: `float`, optional (default: None)
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size. Defaults to pad.
fig_size: `tuple`, optional (default: (3,3))
figure size.
save_fig: `bool`, optional (default: False)
if True,save the figure.
fig_path: `str`, optional (default: None)
If save_fig is True, specify figure path.
fig_name: `str`, optional (default: 'plot_variable_genes.pdf')
if `save_fig` is True, specify figure name.
**kwargs: `dict`, optional
Other keyword arguments are passed through to ``plt.scatter``
Returns
-------
None
"""
if fig_size is None:
fig_size = mpl.rcParams['figure.figsize']
if save_fig is None:
save_fig = settings.save_fig
if fig_path is None:
fig_path = os.path.join(settings.workdir, 'figures')
means = adata.var['means']
variances_norm = adata.var['variances_norm']
mask = adata.var['highly_variable']
genes = adata.var_names
fig, ax = plt.subplots(figsize=fig_size)
ax.scatter(means[~mask],
variances_norm[~mask],
s=size,
c='#1F2433',
**kwargs)
ax.scatter(means[mask],
variances_norm[mask],
s=size,
c='#ce3746',
**kwargs)
ax.set_xscale(value='log')
if show_texts:
ids = variances_norm.values.argsort()[-n_texts:][::-1]
texts = [plt.text(means[i], variances_norm[i], genes[i],
fontdict={'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': text_size})
for i in ids]
adjust_text(texts,
arrowprops=dict(arrowstyle='-', color='black'))
ax.set_xlabel('average expression')
ax.set_ylabel('standardized variance')
ax.locator_params(axis='x', tight=True)
ax.locator_params(axis='y', tight=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
if(save_fig):
if(not os.path.exists(fig_path)):
os.makedirs(fig_path)
fig.savefig(os.path.join(fig_path, fig_name),
pad_inches=1,
bbox_inches='tight')
plt.close(fig)
def _scatterplot2d(df,
x,
y,
list_hue=None,
hue_palette=None,
drawing_order='sorted',
dict_drawing_order=None,
size=8,
show_texts=False,
texts=None,
text_size=10,
text_expand=(1.05, 1.2),
fig_size=None,
fig_ncol=3,
fig_legend_ncol=1,
fig_legend_order=None,
vmin=None,
vmax=None,
alpha=0.8,
pad=1.08,
w_pad=None,
h_pad=None,
save_fig=None,
fig_path=None,
fig_name='scatterplot2d.pdf',
copy=False,
**kwargs):
"""2d scatter plot
Parameters
----------
data: `pd.DataFrame`
Input data structure of shape (n_samples, n_features).
x: `str`
Variable in `data` that specify positions on the x axis.
y: `str`
Variable in `data` that specify positions on the x axis.
list_hue: `str`, optional (default: None)
A list of variables that will produce points with different colors.
drawing_order: `str` (default: 'sorted')
The order in which values are plotted, This can be
one of the following values
- 'original': plot points in the same order as in input dataframe
- 'sorted' : plot points with higher values on top.
- 'random' : plot points in a random order
fig_size: `tuple`, optional (default: None)
figure size.
fig_ncol: `int`, optional (default: 3)
the number of columns of the figure panel
fig_legend_order: `dict`,optional (default: None)
Specified order for the appearance of the annotation keys.
Only valid for categorical/string variable
e.g. fig_legend_order = {'ann1':['a','b','c'],'ann2':['aa','bb','cc']}
fig_legend_ncol: `int`, optional (default: 1)
The number of columns that the legend has.
vmin,vmax: `float`, optional (default: None)
The min and max values are used to normalize continuous values.
If None, the respective min and max of continuous values is used.
alpha: `float`, optional (default: 0.8)
0.0 transparent through 1.0 opaque
pad: `float`, optional (default: 1.08)
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad: `float`, optional (default: None)
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size. Defaults to pad.
save_fig: `bool`, optional (default: False)
if True,save the figure.
fig_path: `str`, optional (default: None)
If save_fig is True, specify figure path.
fig_name: `str`, optional (default: 'scatterplot2d.pdf')
if save_fig is True, specify figure name.
Returns
-------
None
"""
if fig_size is None:
fig_size = mpl.rcParams['figure.figsize']
if save_fig is None:
save_fig = settings.save_fig
if fig_path is None:
fig_path = os.path.join(settings.workdir, 'figures')
list_ax = list()
if list_hue is None:
list_hue = [None]
else:
for hue in list_hue:
if(hue not in df.columns):
raise ValueError(f"could not find {hue}")
if hue_palette is None:
hue_palette = dict()
assert isinstance(hue_palette, dict), "`hue_palette` must be dict"
legend_order = {hue: np.unique(df[hue]) for hue in list_hue
if (is_string_dtype(df[hue])
or is_categorical_dtype(df[hue]))}
if(fig_legend_order is not None):
if(not isinstance(fig_legend_order, dict)):
raise TypeError("`fig_legend_order` must be a dictionary")
for hue in fig_legend_order.keys():
if(hue in legend_order.keys()):
legend_order[hue] = fig_legend_order[hue]
else:
print(f"{hue} is ignored for ordering legend labels"
"due to incorrect name or data type")
if dict_drawing_order is None:
dict_drawing_order = dict()
assert drawing_order in ['sorted', 'random', 'original'],\
"`drawing_order` must be one of ['original', 'sorted', 'random']"
if(len(list_hue) < fig_ncol):
fig_ncol = len(list_hue)
fig_nrow = int(np.ceil(len(list_hue)/fig_ncol))
fig = plt.figure(figsize=(fig_size[0]*fig_ncol*1.05, fig_size[1]*fig_nrow))
for i, hue in enumerate(list_hue):
ax_i = fig.add_subplot(fig_nrow, fig_ncol, i+1)
if hue is None:
sc_i = sns.scatterplot(ax=ax_i,
x=x,
y=y,
data=df,
alpha=alpha,
linewidth=0,
s=size,
**kwargs)
else:
if(is_string_dtype(df[hue]) or is_categorical_dtype(df[hue])):
if hue in hue_palette.keys():
palette = hue_palette[hue]
else:
palette = None
if hue in dict_drawing_order.keys():
param_drawing_order = dict_drawing_order[hue]
else:
param_drawing_order = drawing_order
if param_drawing_order == 'sorted':
df_updated = df.sort_values(by=hue)
elif param_drawing_order == 'random':
df_updated = df.sample(frac=1, random_state=100)
else:
df_updated = df
sc_i = sns.scatterplot(ax=ax_i,
x=x,
y=y,
hue=hue,
hue_order=legend_order[hue],
data=df_updated,
alpha=alpha,
linewidth=0,
palette=palette,
s=size,
**kwargs)
ax_i.legend(bbox_to_anchor=(1, 0.5),
loc='center left',
ncol=fig_legend_ncol,
frameon=False,
)
else:
vmin_i = df[hue].min() if vmin is None else vmin
vmax_i = df[hue].max() if vmax is None else vmax
if hue in dict_drawing_order.keys():
param_drawing_order = dict_drawing_order[hue]
else:
param_drawing_order = drawing_order
if param_drawing_order == 'sorted':
df_updated = df.sort_values(by=hue)
elif param_drawing_order == 'random':
df_updated = df.sample(frac=1, random_state=100)
else:
df_updated = df
sc_i = ax_i.scatter(df_updated[x],
df_updated[y],
c=df_updated[hue],
vmin=vmin_i,
vmax=vmax_i,
alpha=alpha,
s=size,
**kwargs)
cbar = plt.colorbar(sc_i,
ax=ax_i,
pad=0.01,
fraction=0.05,
aspect=40)
cbar.solids.set_edgecolor("face")
cbar.ax.locator_params(nbins=5)
if show_texts:
if texts is not None:
plt_texts = [plt.text(df[x][t],
df[y][t],
t,
fontdict={'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': text_size})
for t in texts]
adjust_text(plt_texts,
expand_text=text_expand,
expand_points=text_expand,
expand_objects=text_expand,
arrowprops=dict(arrowstyle='->', color='black'))
ax_i.set_xlabel(x)
ax_i.set_ylabel(y)
ax_i.locator_params(axis='x', nbins=5)
ax_i.locator_params(axis='y', nbins=5)
ax_i.tick_params(axis="both", labelbottom=True, labelleft=True)
ax_i.set_title(hue)
list_ax.append(ax_i)
plt.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
if save_fig:
if(not os.path.exists(fig_path)):
os.makedirs(fig_path)
plt.savefig(os.path.join(fig_path, fig_name),
pad_inches=1,
bbox_inches='tight')
plt.close(fig)
if copy:
return list_ax
# def _scatterplot2d_plotly(df,
# x,
# y,
# list_hue=None,
# hue_palette=None,
# drawing_order='sorted',
# fig_size=None,
# fig_ncol=3,
# fig_legend_order=None,
# alpha=0.8,
# save_fig=None,
# fig_path=None,
# **kwargs):
# """interactive 2d scatter plot by Plotly
# Parameters
# ----------
# data: `pd.DataFrame`
# Input data structure of shape (n_samples, n_features).
# x: `str`
# Variable in `data` that specify positions on the x axis.
# y: `str`
# Variable in `data` that specify positions on the x axis.
# list_hue: `str`, optional (default: None)
# A list of variables that will produce points with different colors.
# drawing_order: `str` (default: 'sorted')
# The order in which values are plotted, This can be
# one of the following values
# - 'original': plot points in the same order as in input dataframe
# - 'sorted' : plot points with higher values on top.
# - 'random' : plot points in a random order
# fig_size: `tuple`, optional (default: None)
# figure size.
# fig_ncol: `int`, optional (default: 3)
# the number of columns of the figure panel
# fig_legend_order: `dict`,optional (default: None)
# Specified order for the appearance of the annotation keys.
# Only valid for categorical/string variable
# e.g. fig_legend_order = {'ann1':['a','b','c'],
# 'ann2':['aa','bb','cc']}
# fig_legend_ncol: `int`, optional (default: 1)
# The number of columns that the legend has.
# vmin,vmax: `float`, optional (default: None)
# The min and max values are used to normalize continuous values.
# If None, the respective min and max of continuous values is used.
# alpha: `float`, optional (default: 0.8)
# 0.0 transparent through 1.0 opaque
# pad: `float`, optional (default: 1.08)
# Padding between the figure edge and the edges of subplots,
# as a fraction of the font size.
# h_pad, w_pad: `float`, optional (default: None)
# Padding (height/width) between edges of adjacent subplots,
# as a fraction of the font size. Defaults to pad.
# save_fig: `bool`, optional (default: False)
# if True,save the figure.
# fig_path: `str`, optional (default: None)
# If save_fig is True, specify figure path.
# fig_name: `str`, optional (default: 'scatterplot2d.pdf')
# if save_fig is True, specify figure name.
# Returns
# -------
# None
# """
# if fig_size is None:
# fig_size = mpl.rcParams['figure.figsize']
# if save_fig is None:
# save_fig = settings.save_fig
# if fig_path is None:
# fig_path = os.path.join(settings.workdir, 'figures')
# for hue in list_hue:
# if(hue not in df.columns):
# raise ValueError(f"could not find {hue} in `df.columns`")
# if hue_palette is None:
# hue_palette = dict()
# assert | |
<filename>plugins/levelops-gcloud.py
#!/usr/bin/env python
##################################################################
# Copyright (C) 2019-2020 LevelOps Inc <<EMAIL>>
#
# This file is part of the LevelOps Inc Tools.
#
# This tool is licensed under Apache License, Version 2.0
##################################################################
import logging
import os
import shlex
import subprocess
from argparse import ArgumentParser
from queue import Empty, Queue
from sys import exit
from threading import Thread
from time import sleep
from uuid import uuid4
from io import StringIO
from kubernetes import client as k_client
from kubernetes import config as k_config
from kubernetes.client.configuration import Configuration
from kubernetes.client.rest import ApiException
try:
from ujson import dump as jdump
from ujson import dumps
from ujson import load as jload
from ujson import loads
except:
from json import dump as jdump
from json import dumps
from json import load as jload
from json import loads
from yaml import dump as ydump
from yaml import dump_all, full_load, full_load_all
log = logging.getLogger(__name__)
excluded_namespaces = ['kube-system']
cleanup = []
p_pool = []
r_pool = []
backup = {}
SUCCESS = 0
items = [
["filestore", "instances", "list"],
["pubsub", "topics", "list"],
["bigtable", "instances", "list"],
["bigtable", "clusters", "list"],
["compute", "backend-services", "list"],
["dataflow", "jobs", "list"],
["dataproc", "clusters", "list"],
["dataproc", "jobs", "list"],
["deployment-manager", "deployments", "list"],
["kms", "--location", "us", "list"],
["redis", "instances", "list", "--region", "asia-east1"],
["redis", "instances", "list", "--region", "asia-east2"],
["redis", "instances", "list", "--region", "asia-northeast1"],
["redis", "instances", "list", "--region", "asia-northeast2"],
["redis", "instances", "list", "--region", "asia-south1"],
["redis", "instances", "list", "--region", "asia-southeast1"],
["redis", "instances", "list", "--region", "australia-southeast1"],
["redis", "instances", "list", "--region", "europe-north1"],
["redis", "instances", "list", "--region", "europe-west1"],
["redis", "instances", "list", "--region", "europe-west2"],
["redis", "instances", "list", "--region", "europe-west3"],
["redis", "instances", "list", "--region", "europe-west4"],
["redis", "instances", "list", "--region", "europe-west5"],
["redis", "instances", "list", "--region", "europe-west6"],
["redis", "instances", "list", "--region", "northamerica-northeast1"],
["redis", "instances", "list", "--region", "southamerica-east1"],
["redis", "instances", "list", "--region", "us-central1"],
["redis", "instances", "list", "--region", "us-east1"],
["redis", "instances", "list", "--region", "us-east2"],
["redis", "instances", "list", "--region", "us-west1"],
["redis", "instances", "list", "--region", "us-west2"],
["sql", "instances", "list"]
# ["resource-manager", "folders"]
]
class Control(object):
def __init__(self):
self.terminate = False
def stop(self):
self.terminate = True
class Report(object):
def __init__(self):
self.orgs = []
self.projects = []
self.folders = {}
self.errors = []
def add_org(self, org):
self.orgs.append(org)
def add_project(self, project):
if not isinstance(project, Project):
raise Exception("projects need to be of type 'Project'")
self.projects.append(project)
def add_folder(self, org_id, folder):
self.folders["%s-%s"%(org_id, folder.id)] = folder
def add_error(self, error):
self.errors.append(error)
class Resource(object):
def __init__(self, r_id, name, state=None):
self.id = r_id
self.name = name
self.state = state
class Organization(Resource):
def __init__(self, r_id, name, state=None):
super().__init__(r_id=r_id, name=name, state=state)
self.folders = []
def add_folder(self, folder):
if not isinstance(folder, Resource):
raise Exception("a folder needs to be of type 'Resource'")
self.folders.append(folder)
class Project(Resource):
def __init__(self, r_id, name, state, parent=None, apis=None, paas=None, services=None, k8s=None):
super().__init__(r_id=r_id, name=name, state=state)
if paas:
self.paas = paas
else:
self.paas = []
if apis:
self.apis = apis
else:
self.apis = []
if services:
self.services = services
else:
self.services = []
if k8s:
self.k8s = k8s
else:
self.k8s = []
self.parent = parent
self.k8s_total_count = 0
def add_api(self, api):
if not isinstance(api, Service):
raise Exception("api needs to be of type 'Service'")
self.apis.append(api)
def add_paas(self, paas):
if not isinstance(paas, dict):
raise Exception("paas needs to be of type 'Resource'")
self.paas.append(paas)
def add_service(self, service):
if not isinstance(service, Service):
raise Exception("service needs to be of type 'Service'")
self.services.append(service)
def add_k8s_cluster(self, cluster):
if not isinstance(cluster, KCluster):
raise Exception("cluster needs to be of type 'KCluster'")
self.k8s.append(cluster)
def add_all_k8s_clusters(self, clusters):
if not isinstance(clusters, list):
raise Exception("clusters needs to be of type 'list[KCluster]' but was '%s'" % str(type(clusters)))
self.k8s.extend(clusters)
class Service(Resource):
def __init__(self, r_id, name, state, meta = None):
super().__init__(r_id=r_id, name=name, state=state)
# self.meta = meta
class KComponent(Resource):
def __init__(self, r_id, name, kind, namespace, selectors=None, ports=None, containers=None, labels=None, state=None, meta=None, subtype=None):
super().__init__(r_id=r_id, name=name, state=state)
self.meta = meta
self.kind = kind
self.labels = labels
self.namespace = namespace
self.containers = containers
self.selectors = selectors
self.ports = ports
self.subtype = subtype
class KCluster(Resource):
def __init__(self, r_id, name, state, zone, location=None, locations=None, creation_time=None, masters_version=None, nodes_version=None, initial_version=None, resources=None):
super().__init__(r_id=r_id, name=name, state=state)
self.zone = zone
self.location = location
self.locations = locations
self.creation_time = creation_time
self.masters_version = masters_version
self.nodes_version = nodes_version
self.initial_version = initial_version
if not resources:
self.resources = []
else:
self.resources = resources
def add_resources(self, resources):
if not isinstance(resources, list):
raise Exception("'resources' needs to be of type 'list[KComponent]' but was '%s'" % str(type(resources)))
self.resources.extend(resources)
def enable_api(apis, project):
return _change_apis(apis=apis, project=project, state="enable")
def disable_apis(apis, project):
return _change_apis(apis=apis, project=project, state="disable")
def _change_apis(apis, project, state):
args = ["gcloud", "services", state,"--project", project, "--quiet"]
args.extend(apis)
p_apis = subprocess.run(args=args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if p_apis.returncode != SUCCESS:
message = "[%s] Couldn't change the state of the API(s) '%s' to '%s': %s" % (project.name, str(apis), state, p_apis.stderr)
report.add_error(message)
log.debug(message)
return False
return "finished successfully" in p_apis.stdout
def get_google_credentials(report):
config_file = "/tmp/%s.google" % str(uuid4())
log.info("google credentials: %s", config_file)
p_credentials = subprocess.run(args=shlex.split("gcloud config config-helper --format=json"), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cleanup.append(config_file)
if p_credentials.returncode != SUCCESS:
message = "Coudn't get the google credentials for the current user."
report.add_error(message)
log.debug(message)
return None
with open(config_file,'w') as f:
f.write(p_credentials.stdout)
return config_file
def get_cluster_credentials(report, project, zone, cluster_name):
config_file = "/tmp/%s.config" % str(uuid4())
log.info("k8s config file: %s", config_file)
env = {}
env.update(os.environ)
env['KUBECONFIG'] = config_file
p_credentials = subprocess.run(args=["gcloud", "container", "clusters", "get-credentials", cluster_name, "--zone", zone, "--project", project.name, "--quiet"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cleanup.append(config_file)
if p_credentials.returncode != SUCCESS:
message = "[%s] Coudn't get the cluster credentials for the cluster '%s': %s" % (project.name, cluster_name, p_credentials.stderr)
report.add_error(message)
log.debug(message)
return None
return config_file
def get_k8s_clusters(report, project):
p_clusters = subprocess.run(args=["gcloud", "container", "clusters", "list", "--project", project.name, "--format", "json", "--quiet"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if p_clusters.returncode != SUCCESS:
message = "[%s] %s" % (project.name, p_clusters.stderr)
report.add_error(message)
log.debug(message)
return None
clusters = loads(p_clusters.stdout)
project.k8s_total_count = len(clusters)
k8s = []
for cluster in clusters:
k8s.append(
KCluster(
location=cluster['location'],
locations=cluster['locations'],
name=cluster['name'],
state=cluster['status'],
zone=cluster['zone'],
masters_version=cluster['currentMasterVersion'],
nodes_version=cluster['currentNodeVersion'],
initial_version=cluster['initialClusterVersion'],
r_id=cluster['labelFingerprint'],
creation_time=cluster['createTime']
)
)
return k8s
def get_k8s_resources(report, cluster, project):
config_file = get_cluster_credentials(report=report, project=project, zone=cluster.zone, cluster_name=cluster.name)
if not config_file:
return None
with open(g_credentials, 'r') as f:
google = jload(f)
with open(config_file, 'r') as f:
kcfg = full_load(f)
for user in kcfg.get('users'):
config = user.get('user').get('auth-provider').get('config')
config['expiry'] = google['credential']['token_expiry']
config['access-token'] = google['credential']['access_token']
with open(config_file, 'w') as f:
ydump(kcfg, f)
configuration = Configuration()
k_config.load_kube_config(config_file=config_file, client_configuration=configuration)
api_client = k_client.ApiClient(configuration=configuration)
k = k_client.CoreV1Api(api_client=api_client)
apps = k_client.AppsV1Api(api_client=api_client)
resources = []
set_names = [
"list_deployment_for_all_namespaces",
"list_replica_set_for_all_namespaces",
"list_daemon_set_for_all_namespaces",
"list_stateful_set_for_all_namespaces",
"list_service_for_all_namespaces"
]
for set_name in set_names:
if set_name.startswith("list_service"):
k_set = getattr(k, set_name)()
else:
k_set = getattr(apps, set_name)()
collection = {}
for s in k_set.items:
if s.metadata.namespace in excluded_namespaces:
log.debug("Skipping resource since it is located in the excluded namespace '%s'", s.metadata.namespace)
continue
if s.kind:
kind = s.kind
elif 'deployment' in str(type(s)).lower():
kind = "Deployment"
elif 'service' in str(type(s)).lower():
kind = "Service"
elif 'replicaset' in str(type(s)).lower():
kind = "ReplicaSet"
elif 'daemonset' in str(type(s)).lower():
kind = "DaemonSet"
elif 'statefulset' in str(type(s)).lower():
kind = "StatefulSet"
s_type = None
ports = None
selectors = None
meta = None
containers = None
if kind == "Service":
s_type = s.spec.type
ports = [{'protocol': port.protocol, 'target': port.target_port, 'port': port.port} for port in s.spec.ports]
selectors = s.spec.selector
if s_type == 'ExternalName':
meta = {"external_ip": s.spec.externalIPs}
elif kind == "Deployment":
containers = [x.image for x in s.spec.template.spec.containers]
else:
containers = [x.image for x in s.spec.template.spec.containers]
if kind == "Deployment" or kind == "Service" or not s.metadata.labels:
_id = s.metadata.self_link
else:
_id = s.metadata.self_link.replace('-'+s.metadata.labels.get('pod-template-hash',''),'')
version = int(s.metadata.resource_version)
ref = collection.get(_id, {"version": -1, "resource": None})
if ref['version'] < version:
collection[_id] = {"version": version, "resource": KComponent(
r_id=s.metadata.name,
name=s.metadata.name,
namespace=s.metadata.namespace,
kind=kind,
labels=s.metadata.labels,
containers=containers,
ports=ports,
selectors=selectors,
subtype=s_type,
meta=meta)}
for _id in collection:
resources.append(collection[_id]["resource"])
return resources
def restore_permisions(backup_file):
with open(backup_file, 'r') as f:
backup = jload(f)
# get current enabled apis
# disable apis that should not be enabled
def __boot(name, thread_count, thread_queue_size, pool, control, report, timeout=2):
queue = Queue(maxsize=thread_count*thread_queue_size)
for i in range(0, thread_count):
t = Thread(name=name+"-"+str(i), daemon=True, target=_worker, args=(queue, report, control, timeout))
t.start()
pool.append(t)
return queue
def _worker(queue, report, control, timeout=2):
log.debug('Starting...')
while not control.terminate:
try:
task = queue.get(block=True, timeout=timeout)
except Empty as e:
log.debug('Empty response from the queue, will try again...')
continue
log.debug('Task | |
if realAddr not in self.goTo:
self.goTo.append(realAddr)
unknown.add(realAddr)
return unknown
HIGHLIGHT_OPS = {'Yield': '#ff00ff',
'Gosub': '#ff00ff',
'Return': '#ff00ff'}
INVOC_THRESHOLDS = [0, 1, 2, 10, 100, 1000]
PAGE_THRESHOLDS = [0, 1, 2, 8, 64, 256]
def graphStr(self, schemaInfo):
def bar(count, thresholds):
if count == thresholds[0]:
return "<font color='#ffffff'>||||| </font>"
elif count == thresholds[1]:
return ("<font color='#4aba4a'>|</font>" +
"<font color='#ffffff'>|||| </font>")
elif count == thresholds[2]:
return ("<font color='#119125'>|</font>" +
"<font color='#ffffff'>|||| </font>")
elif count <= thresholds[3]:
return ("<font color='#119125'>||</font>" +
"<font color='#ffffff'>||| </font>")
elif count <= thresholds[4]:
return ("<font color='#cccf3c'>|||</font>" +
"<font color='#ffffff'>|| </font>")
elif count <= thresholds[5]:
return ("<font color='#ad1316'>||||</font>" +
"<font color='#ffffff'>| </font>")
else:
return "<font color='#ad1316'>||||| </font>"
if HAVE_COUNTS:
s = bar(self.invocCount or 0, self.INVOC_THRESHOLDS)
s += bar(self.pageCount or 0, self.PAGE_THRESHOLDS)
else:
s = ''
if self.usesCursor:
s += "<font color='%s'>%d %s [%s]</font>" % (
self.usesCursor.color, self.addr, self.name,
self.usesCursor.handle)
elif self.name in self.HIGHLIGHT_OPS:
s += "%d <font color='%s'>%s</font>" % (
self.addr, self.HIGHLIGHT_OPS[self.name],
escapeHTML(self.name))
else:
s += '%d %s' % (self.addr, escapeHTML(self.name))
if self.affectedByCursors:
cursors = list(self.affectedByCursors)
cursors.sort(lambda a, b: a.handle - b.handle)
cursorStrings = []
for cursor in cursors:
if cursor == self.usesCursor:
continue
cursorStrings.append(
"<font color='%s'>%d</font>" % (
cursor.color, cursor.handle))
if cursorStrings:
s += ' (' + ' '.join(cursorStrings) + ')'
if self.usesCursor and self.births:
if self.usesCursor in self.births and self.usesCursor.on:
s += ' %s' % (escapeHTML(self.usesCursor.on.name),)
if self.usesImmediate is not None:
s += ' imm %s' % (self.usesImmediate)
if self.usesColumns:
schema = self.usesCursor.on.schema
if schema:
colNames = []
for colNum in self.usesColumns:
colNames.append(escapeHTML(schema.columns[colNum]))
s += ' col %s' % (', '.join(colNames))
if self.regReads:
s += ' using r%s' % (', r'.join(map(str, self.regReads)),)
if self.regWrites:
s += ' to r%s' % (', r'.join(map(str, self.regWrites)),)
if self.comment:
s += " <font color='#888888'>%s</font>" % (escapeHTML(self.comment),
)
if HAVE_COUNTS:
s = ("<tr><td align='left'>%s</td>" +
" <td align='left'>%d</td><td>%d</td></tr>") % (
s, self.invocCount or 0, self.pageCount or 0)
else:
s = "<tr><td align='left'>%s</td></tr>" % (s,)
return s
def dump(self):
if self.comeFrom:
print ' ', self.comeFrom, '---->'
print '%d %s' % (self.addr, self.name),
print ' reads: %s writes: %s' % (self.regReads, self.regWrites)
if self.goTo:
print ' ---->', self.goTo
class ExplainGrokker(object):
def __init__(self):
self.ephemeralTables = []
self.virtualTables = []
#: maps "vtab:*:*" strings to Table instances...
self.vtableObjs = {}
self.realTables = []
self.realTablesByName = {}
self.pseudoTables = []
self.allTables = []
self.indices = []
self.realIndicesByName = {}
self.cursors = []
self.code = []
self.cursorByHandle = {}
self.resultRowOps = []
def _newEphemeralTable(self, **kwargs):
table = Table(ephemeral=True, openedAt=self.op, **kwargs)
self.ephemeralTables.append(table)
self.allTables.append(table)
self.op.births.append(table)
return table
def _newVirtualTable(self, vtabkey, **kwargs):
table = Table(virtual=True, openedAt=self.op, **kwargs)
self.vtableObjs[vtabkey] = table
self.virtualTables.append(table)
self.allTables.append(table)
self.op.births.append(table)
return table
def _newRealTable(self, nameAndInfo, **kwargs):
rootIdb, name = nameAndInfo.split('; ')
if name in self.realTablesByName:
table = self.realTablesByName[name]
else:
table = Table(name=name, openedAt=self.op, **kwargs)
self.realTables.append(table)
self.realTablesByName[name] = table
self.allTables.append(table)
self.op.births.append(table)
return table
def _newPseudoTable(self, **kwargs):
table = Table(pseudo=True,openedAt=self.op, **kwargs)
self.pseudoTables.append(table)
self.allTables.append(table)
self.op.births.append(table)
return table
def _parseKeyinfo(self, indexDetails):
# see displayP4 in the source
# right now we just care about the first arg, nField which is the
# "number of key colums in the index"
keyparts = indexDetails[2:-1].split(',')
numColumns = int(keyparts[0])
return numColumns
def _newIndexOn(self, indexDetails, nameAndInfo, **kwargs):
# indexDetails is of the form: "keyinfo(%d,"...
numColumns = self._parseKeyinfo(indexDetails)
rootIdb, name = nameAndInfo.split('; ')
if name in self.realIndicesByName:
index = self.realIndicesByName[name]
else:
index = Index(columns=numColumns, openedAt=self.op, name=name,
**kwargs)
self.indices.append(index)
self.realIndicesByName[name] = index
self.op.births.append(index)
return index
def _newCursor(self, handle, thing, **kwargs):
if handle in self.cursorByHandle:
# R/W change is okay
cursor = self.cursorByHandle[handle]
if cursor.on != thing:
raise Exception('ERROR! Cursor handle collision!')
else:
cursor = Cursor(handle=handle, on=thing, openedAt=self.op, **kwargs)
self.cursors.append(cursor)
self.cursorByHandle[handle] = cursor
self.op.births.append(cursor)
self.op.usesCursor = cursor
self.op.affectedByCursors.add(cursor)
return cursor
def _getCursor(self, handle, write=False, seek=False):
cursor = self.cursorByHandle[handle]
self.op.usesCursor = cursor
self.op.writesCursor = write
self.op.seeksCursor = seek
self.op.affectedByCursors.add(cursor)
return cursor
def _getVtable(self, vtabkey, write=False):
'''
Given a P4-resident "vtab:*:*"
'''
if vtabkey in self.cursorByHandle:
cursor = self.cursorByHandle[vtabkey]
else:
cursor = Cursor(handle=vtabkey, on=None)
self.cursors.append(cursor)
self.cursorByHandle[vtabkey] = cursor
self.op.usesCursor = cursor
self.op.writesCursor = write
self.op.affectedByCursors.add(cursor)
return cursor
def _killThing(self, thing):
self.op.kills.append(thing)
thing.closedAt = self.op
if thing.on:
self._killThing(thing.on)
def _killCursor(self, handle):
if handle not in self.cursorByHandle:
print 'Warning; tried to close a non-open cursor; might be our bad'
return
cursor = self._getCursor(handle)
self._killThing(cursor)
def _op_OpenCommon(self, params, writable):
# if P4 is a keyinfo, then it's an index and comment is the name of the
# index.
# if P4 is not a keyinfo, then it's the number of columns in the table
# and comment is the name of the table.
cursorNum = params[0]
if isinstance(params[3], basestring):
indexDetails = params[3]
cursorOn = self._newIndexOn(indexDetails,
self.op.comment)
else:
cursorOn = self._newRealTable(self.op.comment,
columns=params[3])
self._newCursor(cursorNum, cursorOn, writable=writable)
def _op_OpenRead(self, params):
self._op_OpenCommon(params, False)
def _op_OpenWrite(self, params):
self._op_OpenCommon(params, True)
def _op_OpenPseudo(self, params):
# a psuedo-table is a 'fake table with a single row of data'
# cursor is P1
# the pseudo-table is stored into a blob in P2
# number of fields/columns is p3
# XXX our Column opcode might benefit from being aware that it's dealing
# with a cursor to a psuedo table so that it can translate its actions
# into actions on the underlying register too. but we no longer really
# care about that so much these days.
cursorNum = params[0]
table = self._newPseudoTable(
name=("pseudo%d" % (cursorNum,)),
columns=params[2])
self._newCursor(cursorNum, table)
self.op.regWrites.append(params[1])
def _op_VOpen(self, params):
# p1: cursor number
# p4: vtable structure
cursorNum = params[0]
table = self._newVirtualTable(params[3],
name=("virtual%d" % (cursorNum,)))
self._newCursor(cursorNum, table)
def _op_OpenEphemeral(self, params):
cursorNum = params[0]
numColumns = params[1]
indexDetails = params[3]
table = self._newEphemeralTable(
name=("ephemeral%d" % (cursorNum,)),
columns=numColumns)
if indexDetails:
cursorOn = self._newIndexOn(indexDetails,
table=table,
name="eindex%d" % (cursorNum,))
else:
cursorOn = table
self._newCursor(cursorNum, cursorOn)
def _op_SorterOpen(self, params):
# per docs, this is just like OpenEphemeral but it's for "large tables
# using an external merge-sort algorithm".
pass
def _op_SorterInsert(self, params):
# same as IdxInsert
pass
def _op_SorterSort(self, params):
# same as Sort
pass
def _op_SorterData(self, params):
# Its own thing
pass
def _op_SorterNext(self, params):
# advance read cursor to next sorted element
pass
def _op_Permute(self, params):
# it just prints "intarray" at least for non-debug. not very helpful!
pass
def _op_Compare(self, params):
# P1..(P1+P3-1)
self.op.regReads.extend([params[0] + x for x in range(params[2])])
# P2..(P2+P3-1)
self.op.regReads.extend([params[1] + x for x in range(params[2])])
# uh, we don't use this yet.
self._parseKeyinfo(params[3])
# we contaminate the jump decision...
self.op.regWrites.append('for_jump')
def _condJump(self, regs, target):
if regs:
if isinstance(regs, list):
self.op.regReads.extend(regs)
else:
self.op.regReads.append(regs)
self.op.goTo.append(self.op.addr + 1)
self.op.goTo.append(target)
self.op.usesImmediate = target
def _jump(self, target):
self.op.goTo.append(target)
self.op.usesImmediate = target
def _op_Goto(self, params):
self._jump(params[1])
def _op_Init(self, params):
# says to jump to P2 if P2 is not zero
if (params[1]):
self._jump(params[1])
def _op_Jump(self, params):
# we base our decision on the result of the last compare
self.op.regReads.append('for_jump')
self._jump(params[0])
self._jump(params[1])
self._jump(params[2])
self.op.usesImmediate = None # too many for now... XXX
def _op_Gosub(self, params):
self.op.regWrites.append(params[0])
self.op.dynamicWritePC = params[0]
self._jump(params[1])
if NO_YIELDS:
self.op.goTo.append(self.op.addr + 1)
# def _op_InitCoroutine(self, params):
# pass
# def _op_EndCoroutine(self, params):
# pass
def _op_Yield(self, params):
self.op.regReads.append(params[0])
self.op.regWrites.append(params[0])
if not NO_YIELDS:
self.op.dynamicWritePC = params[0]
# we won't know where out goTo goes to until dataflow analysis, nor
# where we would 'come from' to the next opcode. But we do know that
# after us is a basic block break, so let's hint that.
self.op.dynamicGoTo = params[0]
# do not arbitrarily flow to the next dude!
self.op.terminate = True
def _op_Return(self, params):
# just like for Yield, we have no idea where we are going until
# dataflow.
self.op.regReads.append(params[0])
self.op.dynamicGoTo = params[0]
def _op_NullRow(self, params):
# moves us to a no-op row
self._getCursor(params[0], False, True)
def _op_Seek(self, params):
self._getCursor(params[0], False, True)
self.op.regReads.append(params[1])
def _op_SeekCommon(self, params, comparison):
cursor = self._getCursor(params[0], False, True)
if isinstance(cursor.on, Table):
self.op.regReads.append(params[2])
else:
for x in range(params[3]):
self.op.regReads.append(params[2] + x)
if params[1]:
self._condJump(None, params[1])
def _op_SeekLT(self, params):
self._op_SeekCommon(params, '<')
def _op_SeekLE(self, params):
self._op_SeekCommon(params, '<=')
def _op_SeekGE(self, params):
self._op_SeekCommon(params, '>=')
def _op_SeekGT(self, params):
self._op_SeekCommon(params, '>')
def _op_IdxCommon(self, params, comparison):
self._getCursor(params[0])
| |
<gh_stars>10-100
from collections import namedtuple
import numpy as np
from os.path import join, dirname
import pandas as pd
from scipy.interpolate import splrep, splev
from scipy.stats import norm
from ...util import ResError
_P = namedtuple('PowerCurve', 'ws cf')
_synthetic_power_curve_data = None
def synthetic_power_curve_data() -> pd.DataFrame:
"""
Reads the data used for creating a synthetic power curve.
Returns
-------
pandas DataFrame
DataFrame with the data to create a synthetic power curve.
"""
global _synthetic_power_curve_data
if _synthetic_power_curve_data is None:
_synthetic_power_curve_data = pd.read_csv(
join(dirname(__file__), "data", "synthetic_turbine_params.csv"), header=1)
return _synthetic_power_curve_data
def compute_specific_power(capacity, rotor_diam, **k):
"""
Calculates the corresponding specific power for a wind turbine in kW/m2 from values of capacity in kW and rotor diameter in m.
Parameters
----------
capacity : float or array-like
Turbine's nominal capacity in kW.
rotor_diam : float or array-like
Turbine's hub height in m.
Returns
-------
float or array-like
Specific power in kW/m2
"""
return capacity * 1000 / rotor_diam**2 / np.pi * 4
class PowerCurve():
"""
Creates a wind turbine's power curve represented by a set of (wind-speed,capacity-factor) pairs.
Initialization:
Parameters
----------
wind_speed : array-like
The wind speeds values
capacity_factor : array-like
The corresponding capacity factor
Returns
-------
PowerCurve object
"""
def __init__(self, wind_speed, capacity_factor):
self.wind_speed = np.array(wind_speed)
self.capacity_factor = np.array(capacity_factor)
def __str__(self):
out = ""
for ws, cf in zip(self.wind_speed, self.capacity_factor):
out += "%6.2f - %4.2f\n" % (ws, cf)
return out
def _repr_svg_(self):
# return str(self)
import matplotlib.pyplot as plt
from io import BytesIO
plt.figure(figsize=(7, 3))
plt.plot(self.wind_speed, self.capacity_factor, color=(0, 91 / 255, 130 / 255), linewidth=3)
plt.tick_params(labelsize=12)
plt.xlabel("wind speed [m/s]", fontsize=13)
plt.ylabel("capacity output", fontsize=13)
plt.tight_layout()
plt.grid()
f = BytesIO()
plt.savefig(f, format="svg", dpi=100)
plt.close()
f.seek(0)
return f.read().decode('ascii')
@staticmethod
def from_specific_power(specific_power, cutout=25):
"""
Creates a synthetic wind turbine power curve based on observed relationships between turbine specific power and known power curves according to Ryberg et al. [1].
Parameters
----------
specific_power : float
Turbines's specific power in m/s
cutout : int, optional
Cut out wind speed in m/s, by default 25
Returns
-------
PowerCurve
Sources
-------
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The future of European onshore wind energy potential: Detailed distribution and simulation of advanced turbine designs. Energy. https://doi.org/10.1016/j.energy.2019.06.052
See also
--------
PowerCurve.from_capacity_and_rotor_diam( <turbine capacity>, <turbine rotor diameter> )
"""
# Create ws
ws = [0, ]
spcd = synthetic_power_curve_data()
ws.extend(np.exp(spcd.const + spcd.scale * np.log(specific_power)))
ws.extend(np.linspace(ws[-1], cutout, 20)[1:])
ws = np.array(ws)
# create capacity factor output
cf = [0, ]
cf.extend(spcd.perc_capacity / 100)
cf.extend([1] * 19)
cf = np.array(cf)
# Done!
return PowerCurve(ws, cf)
@staticmethod
def from_capacity_and_rotor_diam(capacity, rotor_diam, cutout=25):
"""
Creates a synthetic wind turbine power curve based on observed relationships between turbine's capacity, rotor diameter and known power curves according to Ryberg et al. [1].
Parameters
----------
capacity : numeric
Baseline turbine capacity in kW.
rotor_diam : numeric
turbine rotor diameter in m
cutout : int, optional
Cut out wind speed in m/s, by default 25
Returns
-------
PowerCurve
Sources
-------
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The future of European onshore wind energy potential: Detailed distribution and simulation of advanced turbine designs. Energy. https://doi.org/10.1016/j.energy.2019.06.052
See also
--------
PowerCurve.from_specific_power( <turbine specific power> )
"""
return PowerCurve.from_specific_power(compute_specific_power(capacity, rotor_diam))
def simulate(self, wind_speed):
"""
Applies the invoking power curve to the given wind speeds.
Parameters
----------
wind_speed : array_like
Local average wind speed close to or at the hub height.
Returns
-------
array_like
CorrespongDing capacity fators for the given wind speeds
"""
powerCurveInterp = splrep(self.wind_speed, self.capacity_factor)
output = splev(wind_speed, powerCurveInterp)
if isinstance(wind_speed, pd.DataFrame):
output = pd.DataFrame(output, index=wind_speed.index, columns=wind_speed.columns)
return output
def expected_capacity_factor_from_weibull(self, mean_wind_speed=5, weibull_shape=2):
"""
Computes the expected average capacity factor of a wind turbine based on a Weibull distribution of wind speeds.
Parameters
----------
mean_wind_speed : int, optional
mean wind speed at the location in m/s, by default 5
weibull_shape : int, optional
Weibull shape parameter, by default 2
Returns
-------
numeric
Average capacity factor
See also
-------
PowerCurve.expected_capacity_factor_from_distribution
"""
from scipy.special import gamma
from scipy.stats import exponweib
# Get windspeed distribution
lam = mean_wind_speed / gamma(1 + 1 / weibull_shape)
dws = 0.001
ws = np.arange(0, 40, dws)
pdf = exponweib.pdf(ws, 1, weibull_shape, scale=lam)
# Estimate generation
power_curveInterp = splrep(self.wind_speed, self.capacity_factor)
gen = splev(ws, power_curveInterp)
# Do some "just in case" clean-up
cutin = self.wind_speed.min() # use the first defined windspeed as the cut in
cutout = self.wind_speed.max() # use the last defined windspeed as the cut out
gen[gen < 0] = 0 # floor to zero
gen[ws < cutin] = 0 # Drop power to zero before cutin
gen[ws > cutout] = 0 # Drop power to zero after cutout
# Done
meanCapFac = (gen * pdf).sum() * dws
return meanCapFac
def expected_capacity_factor_from_distribution(self, wind_speed_values, wind_speed_counts):
"""
Computes the expected average capacity factor of a wind turbine based on an explicitly-provided wind speed distribution
Parameters
----------
wind_speed_values : numeric or array-like
wind speed values in m/s
wind_speed_counts : numeric or array-like
corresponding counts (number of occurrence) of the given wind speed values.
Counts will be normalized within the function
Example
-------
pc.expected_capacity_factor_from_distribution(
wind_speed_values=[ 1, 2, 3, 4, 5, 6], # Units of m/s
wind_speed_counts=[0.1, 0.3, 0.5, 0.3, 0.1, 0.025 ] # Units of "counts"
)
Returns
-------
numeric
Average capacity factor
See also
-------
PowerCurve.expected_capacity_factor_from_weibull
"""
wind_speed_values = np.array(wind_speed_values)
wind_speed_counts = np.array(wind_speed_counts)
if not len(wind_speed_values.shape) == 1:
raise ResError("wind_speed_values must be 1-dimensional")
# Handle 2 dimensional counts with 1 dimensional wind speeds
if len(wind_speed_counts.shape) > 1:
if not wind_speed_counts.shape[0] == wind_speed_values.shape[0]:
raise ResError("Dimensional incompatibility")
wind_speed_values = np.reshape(wind_speed_values, (wind_speed_counts.shape[0], 1))
# Estimate generation distribution
gen = np.interp(wind_speed_values, self.wind_speed, self.capacity_factor, left=0, right=0) * wind_speed_counts
meanGen = gen.sum(0) / wind_speed_counts.sum(0)
# Done
return meanGen
def convolute_by_gaussian(self, scaling=0.06, base=0.1, extend_beyond_cut_out=True, _min_speed=0.01, _max_speed=40, _steps=4000):
"""
Convolutes a turbine power curve by a normal distribution function with wind-speed-dependent standard deviation.
Parameters
----------
scaling : float, optional
scaling factor, by default 0.06
base : float, optional
base value, by default 0.1
extend_beyond_cut_out : bool, optional
extend the estimation beyond the turbine's cut out wind speed, by default True
_min_speed : float, optional
minimum wind speed value in m/s to be considered, by default 0.01
_max_speed : int, optional
maximum wind speed value in m/s to be considered, by default 40
_steps : int, optional
number of steps in between the wind speed range, by default 4000
Returns
-------
PowerCurve
The resulting convoluted power curve
Notes
------
The wind-speed-dependent standard deviation is computed with: std = wind_speed * scaling + base
"""
# Initialize windspeed axis
ws = np.linspace(_min_speed, _max_speed, _steps)
dws = ws[1] - ws[0]
# check if we have enough resolution
tmp = (scaling * 5 + base) / dws
if tmp < 1.0: # manually checked threshold
if tmp < 0.25: # manually checked threshold
raise ResError("Insufficient number of 'steps'")
else:
print("WARNING: 'steps' may not be high enough to properly compute the convoluted power curve. Check results or use a higher number of steps")
# Initialize vanilla power curve
selfInterp = splrep(ws, np.interp(ws, self.wind_speed, self.capacity_factor))
cf = np.zeros(_steps)
sel = ws < self.wind_speed.max()
cf[sel] = splev(ws[sel], selfInterp)
cf[ws < self.wind_speed.min()] = 0 # set all windspeed less than cut-in speed to 0
cf[ws > self.wind_speed.max()] = 0 # set all windspeed greater than cut-out speed to 0 (just in case)
cf[cf < 0] = 0 # force a floor of 0
# cf[cf>self[:,1].max()] = self[:,1].max() # force a ceiling of the max capacity
# Begin convolution
convolutedCF = np.zeros(_steps)
for i, ws_ in enumerate(ws):
convolutedCF[i] = (norm.pdf(ws, loc=ws_, scale=scaling * ws_ + base) * cf).sum() * dws
# Correct cutoff, maybe
if not extend_beyond_cut_out:
convolutedCF[ws > self.wind_speed[-1]] = 0
# Done!
ws = ws[::40]
convolutedCF = convolutedCF[::40]
return PowerCurve(ws, convolutedCF)
def apply_loss_factor(self, loss):
"""
Applies a loss factor onto the power curve. It can be a single value, or a function which takes a 'capacity factor' array as input.
Parameters
----------
loss : numeric or function
If | |
""" models of Generators, Endoders and Discriminators at various image sizes
following deep convolutionnal model of DCGAN
cf https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
and https://github.com/pytorch/examples/tree/master/dcgan
the final non linearity of the generator should be tanh ( positive and negative values, centered at zero) for GAN, but sigmoid for VAE,
where image pixel values are coded as probabilities between 0 and 1
"""
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
from Param import nz, nc, device
class VAE_Attention_model(nn.Module):
def __init__(self, nz=nz, nef=8, ngf=8, nc=nc):
super(VAE_Attention_model, self).__init__()
self.nz=nz
self.nc=nc
assert nef == ngf , "encoder and decoder outputs should have the same dimensions at each level"
###########################################
# Encoder's layers
###########################################
self.Encoder_layer1 = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer2 = nn.Sequential(
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer3 = nn.Sequential(
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer4 = nn.Sequential(
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer5 = nn.Sequential(
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer6 = nn.Sequential(
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer7 = nn.Sequential(
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer8 = nn.Sequential(
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 3, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True))
self.Encoder_layer9 = nn.Sequential(
# state size. (nef*128) x 2 x 2
nn.Conv2d(nef * 128, nz, 2, 1, 0, bias=True),
nn.Sigmoid()
# state size. (nz) x 1 x 1
)
###########################################
# Decoder's layers
###########################################
# state size. (nz) x 1 x 1
self.Decoder_layer1 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True))
self.Decoder_layer2 = nn.Sequential(
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True))
self.Decoder_layer3 = nn.Sequential(
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True))
self.Decoder_layer4 = nn.Sequential(
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True))
self.Decoder_layer5 = nn.Sequential(
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True))
self.Decoder_layer6 = nn.Sequential(
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
self.Decoder_layer7 = nn.Sequential(
# state size. (ngf*4) x 64 x 64
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
self.Decoder_layer8 = nn.Sequential(
# state size. (ngf*2) x 128 x 128
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True))
self.Decoder_layer9 = nn.Sequential(
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
# nn.Tanh()
nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
#------------------------
# Attention Modules
#------------------------
self.attention_layer = Attention_layer(nef)
self.attention_layer2 = Attention_layer(nef*2)
self.attention_layer4 = Attention_layer(nef*4)
self.attention_layer8 = Attention_layer(nef*8)
self.attention_layer16 = Attention_layer(nef*16)
self.attention_layer32 = Attention_layer(nef*32)
self.attention_layer64 = Attention_layer(nef*64)
self.attention_layer128 = Attention_layer(nef*128)
#
self.fc1 = nn.Linear(nz, 64)
self.fc2 = nn.Linear(nz, 64)
self.fc3 = nn.Linear(64, nz)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_().to(device)
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, input):
batch_size = input.shape[0]
num_channels = input.shape[1]
input_img = input[:,:3,:,:]
input_flow = input[:,3:num_channels,:,:]
#########################
# Encoding
#########################
# N x nc x 512 x 512
x1 = self.Encoder_layer1(input_img)
m_x1 = self.Encoder_layer1(input_flow)
# N x nef x 256 x 256
x2 = self.Encoder_layer2(x1)
m_x2 = self.Encoder_layer2(m_x1)
# N x nef*2 x 128 x 128
x3 = self.Encoder_layer3(x2)
m_x3 = self.Encoder_layer3(m_x2)
# N x nef*4 x 64 x 64
x4 = self.Encoder_layer4(x3)
m_x4 = self.Encoder_layer4(m_x3)
# N x nef*8 x 32 x 32
x5 = self.Encoder_layer5(x4)
m_x5 = self.Encoder_layer5(m_x4)
# N x nef*16 x 16 x 16
x6 = self.Encoder_layer6(x5)
m_x6 = self.Encoder_layer6(m_x5)
# N x nef*32 x 8 x 8
x7 = self.Encoder_layer7(x6)
m_x7 = self.Encoder_layer7(m_x6)
# N x nef*64 x 4 x 4
x8 = self.Encoder_layer8(x7)
m_x8 = self.Encoder_layer8(m_x7)
# N x nef*128 x 2 x 2
Encoder_out = self.Encoder_layer9(x8)
m_Encoder_out = self.Encoder_layer9(m_x8)
# N x nz x 1 x 1
Encoder_out = Encoder_out.reshape(batch_size, self.nz, 1, 1)
m_Encoder_out = m_Encoder_out.reshape(batch_size, self.nz, 1, 1)
Encoder_out = Encoder_out.view(batch_size, self.nz)
m_Encoder_out = m_Encoder_out.view(batch_size, self.nz)
mu = self.fc1(Encoder_out) #fc1
logvar = self.fc2(Encoder_out) #fc2
z = self.reparametrize(mu, logvar)
z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3
m_mu = self.fc1(m_Encoder_out) #fc1
m_logvar = self.fc2(m_Encoder_out) #fc2
m_z = self.reparametrize(m_mu, m_logvar)
m_z = self.fc3(m_z).reshape(-1, self.nz, 1, 1) #fc3
#########################
# Decoding
#########################
# N x nz x 1 x 1
y1 = self.Decoder_layer1(z)
m_y1 = self.Decoder_layer1(m_z)
#print(m_x8.shape, m_y1.shape)
weight1 = self.attention_layer128(m_x8, m_y1)
# N x ngf*128 x 2 x 2
a1 = torch.mul(x8, weight1) + y1
y2 = self.Decoder_layer2(a1)
m_y2 = self.Decoder_layer2(m_y1)
weight2 = self.attention_layer64(m_x7, m_y2)
del y1, m_y1, a1, weight1, m_x7, m_x8, x8
# N x ngf*64 x 4 x 4
a2 = torch.mul(x7, weight2) + y2
y3 = self.Decoder_layer3(a2)
m_y3 = self.Decoder_layer3(m_y2)
weight3 = self.attention_layer32(m_x6, m_y3)
del y2, m_y2, a2, weight2, m_x6, x7
# N x ngf*32 x 8 x 8
a3 = torch.mul(x6, weight3) + y3
y4 = self.Decoder_layer4(a3)
m_y4 = self.Decoder_layer4(m_y3)
weight4 = self.attention_layer16(m_x5,m_y4)
del y3, m_y3, a3, weight3, m_x5, x6
# N x ngf*16 x 16 x 16
a4 = torch.mul(x5, weight4) + y4
y5 = self.Decoder_layer5(a4)
m_y5 = self.Decoder_layer5(m_y4)
weight5 = self.attention_layer8(m_x4,m_y5)
del y4, m_y4, a4, weight4, m_x4, x5
# N x ngf*8 x 32 x 32
a5 = torch.mul(x4, weight5) + y5
y6 = self.Decoder_layer6(a5)
m_y6 = self.Decoder_layer6(m_y5)
weight6 = self.attention_layer4(m_x3,m_y6)
del y5, m_y5, a5, weight5, m_x3, x4
# N x ngf*4 x 64 x 64
a6 = torch.mul(x3, weight6) + y6
y7 = self.Decoder_layer7(a6)
m_y7 = self.Decoder_layer7(m_y6)
weight7 = self.attention_layer2(m_x2,m_y7)
del y6, m_y6, a6, weight6, m_x2, x3
# N x ngf*2 x 128 x 128
a7 = torch.mul(x2, weight7) + y7
y8 = self.Decoder_layer8(a7)
m_y8 = self.Decoder_layer8(m_y7)
weight8 = self.attention_layer(m_x1,m_y8)
del y7, m_y7, a7, weight7, m_x1, x2
# N x ngf x 256 x 256
a8 = torch.mul(x1, weight8) + y8
out = self.Decoder_layer9(a8)
m_out = self.Decoder_layer9(m_y8)
del y8, m_y8, a8, weight8, x1
torch.cuda.empty_cache()
return out.reshape(batch_size, self.nc, 512, 512), mu, logvar
def attention_logit(f):
return nn.Conv2d(f, f, 1, 1, 0, bias=True), nn.Sigmoid() # nn.Softmax() #
class Attention_layer(nn.Module):
def __init__(self, n):
self.n = n
super().__init__()
self.attention_logit = nn.Sequential(*attention_logit(self.n))
def forward(self, Enc, Dec):
b1, n1, h1, w1 = Enc.shape
b2, n2, h2, w2 = Dec.shape
assert b1 == b2 and n1 == n2 and h1 == h2 and w1 == w2, "encoder and decoder outputs should have the same dimensions"
h, w, n = h1, w1, n1
batch_size = b1
y = self.attention_logit(Enc) # y dimension: n x h x w
out = torch.ones(y.shape).to(device) - y
#x = torch.mul(Dec, y) # pixel-wise multiplication
#out = x + Dec
return out.view(batch_size, n, h, w)
class Renet(nn.Module):
def __init__(self, size, in_channel, out_channel):
super(Renet, self).__init__()
self.size = size
self.in_channel = in_channel
self.out_channel = out_channel
self.vertical = nn.LSTM(input_size=in_channel, hidden_size=256, batch_first=True,
bidirectional=True) # each row
self.horizontal = nn.LSTM(input_size=512, hidden_size=256, batch_first=True,
bidirectional=True) # each column
self.conv = nn.Conv2d(512, out_channel, 1)
def forward(self, *input):
x = input[0]
temp = []
x = torch.transpose(x, 1, 3) # batch, width, height, in_channel
for i in range(self.size):
h, _ = self.vertical(x[:, :, i, :])
temp.append(h) # batch, width, 512
x = torch.stack(temp, dim=2) # batch, width, height, 512
temp = | |
:data:`False` or a string).
:returns: A dynamically constructed subclass of
:class:`custom_property` with the given options.
To construct an instance:
:param args: The first positional argument is the function that's
called to compute the value of the property.
:returns: A :class:`custom_property` instance corresponding to the
class whose constructor was called.
Here's an example of how the subclass constructor can be used to
dynamically construct custom properties with specific options:
.. code-block:: python
from property_manager import custom_property
class WritableCachedPropertyDemo(object):
@custom_property(cached=True, writable=True)
def customized_test_property(self):
return 42
The example above defines and uses a property whose computed value is
cached and which supports assignment of new values. The example could
have been made even simpler:
.. code-block:: python
from property_manager import cached_property
class WritableCachedPropertyDemo(object):
@cached_property(writable=True)
def customized_test_property(self):
return 42
Basically you can take any of the custom property classes defined in
the :mod:`property_manager` module and call the class with keyword
arguments corresponding to the options you'd like to change.
"""
if options:
# Keyword arguments construct subclasses.
name = args[0] if args else 'customized_property'
options['dynamic'] = True
return type(name, (cls,), options)
else:
# Positional arguments construct instances.
return super(custom_property, cls).__new__(cls, *args)
def __init__(self, *args, **kw):
"""
Initialize a :class:`custom_property` object.
:param args: Any positional arguments are passed on to the initializer
of the :class:`property` class.
:param kw: Any keyword arguments are passed on to the initializer of
the :class:`property` class.
Automatically calls :func:`inject_usage_notes()` during initialization
(only if :data:`USAGE_NOTES_ENABLED` is :data:`True`).
"""
# It's not documented so I went to try it out and apparently the
# property class initializer performs absolutely no argument
# validation. The first argument doesn't have to be a callable,
# in fact none of the arguments are even mandatory?! :-P
super(custom_property, self).__init__(*args, **kw)
# Explicit is better than implicit so I'll just go ahead and check
# whether the value(s) given by the user make sense :-).
self.ensure_callable('fget')
# We only check the 'fset' and 'fdel' values when they are not None
# because both of these arguments are supposed to be optional :-).
for name in 'fset', 'fdel':
if getattr(self, name) is not None:
self.ensure_callable(name)
# Copy some important magic members from the decorated method.
for name in '__doc__', '__module__', '__name__':
value = getattr(self.fget, name, None)
if value is not None:
setattr(self, name, value)
# Inject usage notes when running under Sphinx.
if USAGE_NOTES_ENABLED:
self.inject_usage_notes()
def ensure_callable(self, role):
"""
Ensure that a decorated value is in fact callable.
:param role: The value's role (one of 'fget', 'fset' or 'fdel').
:raises: :exc:`exceptions.ValueError` when the value isn't callable.
"""
value = getattr(self, role)
if not callable(value):
msg = "Invalid '%s' value! (expected callable, got %r instead)"
raise ValueError(msg % (role, value))
def inject_usage_notes(self):
"""
Inject the property's semantics into its documentation.
Calls :func:`compose_usage_notes()` to get a description of the property's
semantics and appends this to the property's documentation. If the
property doesn't have documentation it will not be added.
"""
if self.usage_notes and self.__doc__ and isinstance(self.__doc__, basestring):
notes = self.compose_usage_notes()
if notes:
self.__doc__ = "\n\n".join([
textwrap.dedent(self.__doc__),
".. note:: %s" % " ".join(notes),
])
def compose_usage_notes(self):
"""
Get a description of the property's semantics to include in its documentation.
:returns: A list of strings describing the semantics of the
:class:`custom_property` in reStructuredText_ format with
Sphinx_ directives.
.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
.. _Sphinx: http://sphinx-doc.org/
"""
template = DYNAMIC_PROPERTY_NOTE if self.dynamic else CUSTOM_PROPERTY_NOTE
cls = custom_property if self.dynamic else self.__class__
dotted_path = "%s.%s" % (cls.__module__, cls.__name__)
notes = [format(template, name=self.__name__, type=dotted_path)]
if self.environment_variable:
notes.append(format(ENVIRONMENT_PROPERTY_NOTE, variable=self.environment_variable))
if self.required:
notes.append(format(REQUIRED_PROPERTY_NOTE, name=self.__name__))
if self.key:
notes.append(KEY_PROPERTY_NOTE)
if self.writable:
notes.append(WRITABLE_PROPERTY_NOTE)
if self.cached:
notes.append(CACHED_PROPERTY_NOTE)
if self.resettable:
if self.cached:
notes.append(RESETTABLE_CACHED_PROPERTY_NOTE)
else:
notes.append(RESETTABLE_WRITABLE_PROPERTY_NOTE)
return notes
def __get__(self, obj, type=None):
"""
Get the assigned, cached or computed value of the property.
:param obj: The instance that owns the property.
:param type: The class that owns the property.
:returns: The value of the property.
"""
if obj is None:
# Called to get the attribute of the class.
return self
else:
# Called to get the attribute of an instance. We calculate the
# property's dotted name here once to minimize string creation.
dotted_name = format_property(obj, self.__name__)
if self.key or self.writable or self.cached:
# Check if a value has been assigned or cached.
value = obj.__dict__.get(self.__name__, NOTHING)
if value is not NOTHING:
logger.spam("%s reporting assigned or cached value (%r) ..", dotted_name, value)
return value
# Check if the property has an environment variable. We do this
# after checking for an assigned value so that the `writable' and
# `environment_variable' options can be used together.
if self.environment_variable:
value = os.environ.get(self.environment_variable, NOTHING)
if value is not NOTHING:
logger.spam("%s reporting value from environment variable (%r) ..", dotted_name, value)
return value
# Compute the property's value.
value = super(custom_property, self).__get__(obj, type)
logger.spam("%s reporting computed value (%r) ..", dotted_name, value)
if self.cached:
# Cache the computed value.
logger.spam("%s caching computed value ..", dotted_name)
set_property(obj, self.__name__, value)
return value
def __set__(self, obj, value):
"""
Override the computed value of the property.
:param obj: The instance that owns the property.
:param value: The new value for the property.
:raises: :exc:`~exceptions.AttributeError` if :attr:`writable` is
:data:`False`.
"""
# Calculate the property's dotted name only once.
dotted_name = format_property(obj, self.__name__)
# Evaluate the property's setter (if any).
try:
logger.spam("%s calling setter with value %r ..", dotted_name, value)
super(custom_property, self).__set__(obj, value)
except AttributeError:
logger.spam("%s setter raised attribute error, falling back.", dotted_name)
if self.writable:
# Override a computed or previously assigned value.
logger.spam("%s overriding computed value to %r ..", dotted_name, value)
set_property(obj, self.__name__, value)
else:
# Check if we're setting a key property during initialization.
if self.key and obj.__dict__.get(self.__name__, None) is None:
# Make sure we were given a hashable value.
if not isinstance(value, Hashable):
msg = "Invalid value for key property '%s'! (expected hashable object, got %r instead)"
raise ValueError(msg % (self.__name__, value))
# Set the key property's value.
logger.spam("%s setting initial value to %r ..", dotted_name, value)
set_property(obj, self.__name__, value)
else:
# Refuse to override the computed value.
msg = "%r object attribute %r is read-only"
raise AttributeError(msg % (obj.__class__.__name__, self.__name__))
def __delete__(self, obj):
"""
Reset the assigned or cached value of the property.
:param obj: The instance that owns the property.
:raises: :exc:`~exceptions.AttributeError` if :attr:`resettable` is
:data:`False`.
Once the property has been deleted the next read will evaluate the
decorated function to compute the value.
"""
# Calculate the property's dotted name only once.
dotted_name = format_property(obj, self.__name__)
# Evaluate the property's deleter (if any).
try:
logger.spam("%s calling deleter ..", dotted_name)
super(custom_property, self).__delete__(obj)
except AttributeError:
logger.spam("%s deleter raised attribute error, falling back.", dotted_name)
if self.resettable:
# Reset the computed or overridden value.
logger.spam("%s clearing assigned or computed value ..", dotted_name)
clear_property(obj, self.__name__)
else:
msg = "%r object attribute %r is read-only"
raise AttributeError(msg % (obj.__class__.__name__, self.__name__))
class writable_property(custom_property):
"""
A computed property that supports assignment.
This is a variant of :class:`custom_property`
that has the :attr:`~custom_property.writable`
option enabled by default.
"""
writable = True
class required_property(writable_property):
"""
A property that requires a value to be set.
This is a variant of :class:`writable_property` that has the
:attr:`~custom_property.required` option enabled by default. Refer to the
documentation of the :attr:`~custom_property.required` option for an
example.
"""
required = True
class key_property(custom_property):
"""
A property whose value is used for comparison and hashing.
This is a variant of :class:`custom_property` that has the
:attr:`~custom_property.key` and :attr:`~custom_property.required`
options enabled by default.
"""
key = True
required = True
class mutable_property(writable_property):
"""
A computed property that can be assigned and reset.
This is a variant of :class:`writable_property` that
has the :attr:`~custom_property.resettable`
option enabled by default.
"""
resettable = True
class lazy_property(custom_property):
"""
A computed property whose value is computed once and cached.
This is a variant of :class:`custom_property` that
has the :attr:`~custom_property.cached`
option enabled by default.
"""
cached = True
class cached_property(lazy_property):
"""
A computed property whose value is computed once and cached, but can be reset.
| |
<filename>mapss/static/packages/arches/tests/importer/jsonld_import_tests.py
import os
import json
import csv
import base64
import datetime
from io import BytesIO
from tests import test_settings
from operator import itemgetter
from django.core import management
from django.test.client import RequestFactory, Client
from django.contrib.auth.models import User, Group, AnonymousUser
from django.urls import reverse
from django.db import connection
from tests.base_test import ArchesTestCase, CREATE_TOKEN_SQL
from arches.app.utils.skos import SKOSReader
from arches.app.models.models import TileModel, ResourceInstance
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.utils.data_management.resources.importer import BusinessDataImporter
from arches.app.utils.data_management.resources.exporter import ResourceExporter as BusinessDataExporter
from arches.app.utils.data_management.resource_graphs.importer import import_graph as ResourceGraphImporter
from arches.app.utils.data_management.resources.formats import rdffile
from arches.app.utils.data_management.resources.formats.rdffile import JsonLdReader
from pyld.jsonld import expand
# these tests can be run from the command line via
# python manage.py test tests/importer/jsonld_import_tests.py --settings="tests.test_settings"
class JsonLDImportTests(ArchesTestCase):
@classmethod
def setUpClass(cls):
# This runs once per instantiation
cls.loadOntology()
cls.factory = RequestFactory()
cls.token = "abc123"
cls.client = Client(HTTP_AUTHORIZATION="Bearer %s" % cls.token)
sql_str = CREATE_TOKEN_SQL.format(token=cls.token, user_id=1)
cursor = connection.cursor()
cursor.execute(sql_str)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/jsonld_test_thesaurus.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/jsonld_test_collections.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5098-thesaurus.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5098-collections.xml")
ret = skos.save_concepts_from_skos(rdf)
# Load up the models and data only once
with open(os.path.join("tests/fixtures/jsonld_base/models/test_1_basic_object.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
with open(os.path.join("tests/fixtures/jsonld_base/models/test_2_complex_object.json"), "rU") as f:
archesfile2 = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile2["graph"])
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5098-thesaurus.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5098-collections.xml")
ret = skos.save_concepts_from_skos(rdf)
with open(os.path.join("tests/fixtures/jsonld_base/models/5098_concept_list.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
management.call_command("datatype", "register", source="tests/fixtures/datatypes/color.py")
management.call_command("datatype", "register", source="tests/fixtures/datatypes/semantic_like.py")
with open(os.path.join("tests/fixtures/jsonld_base/models/5299-basic.json"), "rU") as f:
archesfile2 = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile2["graph"])
with open(os.path.join("tests/fixtures/jsonld_base/models/5299_complex.json"), "rU") as f:
archesfile2 = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile2["graph"])
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5600-external-thesaurus.xml")
ret = skos.save_concepts_from_skos(rdf)
skos = SKOSReader()
rdf = skos.read_file("tests/fixtures/jsonld_base/rdm/5600-external-collections.xml")
ret = skos.save_concepts_from_skos(rdf)
# Load up the models and data only once
with open(os.path.join("tests/fixtures/jsonld_base/models/5121_false_ambiguity.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
with open(os.path.join("tests/fixtures/jsonld_base/models/5121_external_model.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
with open(os.path.join("tests/fixtures/jsonld_base/models/6235_parenttile_id.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
def setUp(self):
pass
@classmethod
def tearDownClass(cls):
pass
def tearDown(self):
pass
def _create_url(self, graph_id, resource_id):
base_url = reverse(
"resources_graphid",
kwargs={"graphid": graph_id, "resourceid": resource_id},
)
return base_url + "?format=json-ld"
def test_context_caching(self):
data = {
"@context": "https://linked.art/ns/v1/linked-art.json",
"id": "https://linked.art/example/object/3",
"type": "HumanMadeObject",
"_label": "Black and White Photograph of '<NAME>'",
"classified_as": [{"id": "http://vocab.getty.edu/aat/300128359", "type": "Type", "_label": "Black and White Photograph"}],
}
fetch = rdffile.fetch
def tempFetch(url):
raise Exception("This should not happen becauase we cached the doc")
# rdffile.fetch = tempFetch
# # first we test that we can override the fetch function and confirm that it gets called
# with self.assertRaises(Exception):
# jsonld_document = expand(data)
# now set the function back and test normally
rdffile.fetch = fetch
jsonld_document = expand(data)
self.assertTrue(data["@context"] in rdffile.docCache)
# now set it to the temp fetch and confirm that the tempFetch isn't called on subsequent uses as it was initially
rdffile.fetch = tempFetch
jsonld_document = expand(data)
rdffile.fetch = fetch
# now invalidate the cache and make sure it refreshes the doc
rdffile.docCache[data["@context"]]["expires"] = datetime.datetime.now()
jsonld_document = expand(data)
self.assertTrue(rdffile.docCache[data["@context"]]["expires"] > datetime.datetime.now())
self.assertTrue(data["@context"] in rdffile.docCache)
def test_1_basic_import(self):
data = """{
"@id": "http://localhost:8000/resources/221d1154-fa8e-11e9-9cbb-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note": "test!"
}"""
url = self._create_url(
graph_id="bf734b4e-f6b5-11e9-8f09-a4d18cec433a",
resource_id="221d1154-fa8e-11e9-9cbb-3af9d3b32b71",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
self.assertTrue("@id" in js)
self.assertTrue(js["@id"] == "http://localhost:8000/resources/221d1154-fa8e-11e9-9cbb-3af9d3b32b71")
self.assertTrue("http://www.cidoc-crm.org/cidoc-crm/P3_has_note" in js)
self.assertTrue(js["http://www.cidoc-crm.org/cidoc-crm/P3_has_note"] == "test!")
def test_1b_basic_post(self):
data = """{
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note": "test!"
}"""
url = self._create_url(
graph_id="bf734b4e-f6b5-11e9-8f09-a4d18cec433a",
resource_id="",
)
response = self.client.post(url, data=data, content_type="application/json", HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
self.assertTrue("@id" in js)
self.assertTrue("http://www.cidoc-crm.org/cidoc-crm/P3_has_note" in js)
self.assertTrue(js["http://www.cidoc-crm.org/cidoc-crm/P3_has_note"] == "test!")
def test_2_complex_import_data(self):
# Note that this tests #5136, as the P101 -> P2 is a concept with a concept
data = """
{
"@id": "http://localhost:8000/resources/12345678-abcd-11e9-9cbb-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P101_had_as_general_use": {
"@id": "http://localhost:8000/concepts/fb457e76-e018-41e7-9be3-0f986816450a",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type": {
"@id": "http://localhost:8000/concepts/14c92c17-5e2f-413a-95c2-3c5e41ee87d2",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Meta Type A"
},
"http://www.w3.org/2000/01/rdf-schema#label": "Test Type A"
},
"http://www.cidoc-crm.org/cidoc-crm/P160_has_temporal_projection": {
"@id": "http://localhost:8000/tile/9c1ec6b9-1094-427f-acf6-e9c3fca643b6/node/127193ea-fa6d-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E52_Time-Span",
"http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by": "example",
"http://www.cidoc-crm.org/cidoc-crm/P82a_begin_of_the_begin": {
"@type": "http://www.w3.org/2001/XMLSchema#dateTime",
"@value": "2019-10-01"
}
},
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type": {
"@id": "http://localhost:8000/concepts/6bac5802-a6f8-427c-ba5f-d4b30d5b070e",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Single Type A"
},
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note": "Test Data",
"http://www.cidoc-crm.org/cidoc-crm/P45_consists_of": [
{
"@id": "http://localhost:8000/concepts/9b61c995-71d8-4bce-987b-0ffa3da4c71c",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E57_Material",
"http://www.w3.org/2000/01/rdf-schema#label": "material b"
},
{
"@id": "http://localhost:8000/concepts/36c8d7a3-32e7-49e4-bd4c-2169a06b240a",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E57_Material",
"http://www.w3.org/2000/01/rdf-schema#label": "material a"
}
],
"http://www.cidoc-crm.org/cidoc-crm/P57_has_number_of_parts": 12
}
"""
url = self._create_url(
graph_id="ee72fb1e-fa6c-11e9-b369-3af9d3b32b71",
resource_id="12345678-abcd-11e9-9cbb-3af9d3b32b71",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
self.assertTrue("@id" in js)
self.assertTrue(js["@id"] == "http://localhost:8000/resources/12345678-abcd-11e9-9cbb-3af9d3b32b71")
hagu = "http://www.cidoc-crm.org/cidoc-crm/P101_had_as_general_use"
p2 = "http://www.cidoc-crm.org/cidoc-crm/P2_has_type"
temp = "http://www.cidoc-crm.org/cidoc-crm/P160_has_temporal_projection"
qual = "http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by"
note = "http://www.cidoc-crm.org/cidoc-crm/P3_has_note"
pts = "http://www.cidoc-crm.org/cidoc-crm/P57_has_number_of_parts"
self.assertTrue(hagu in js)
use = js[hagu]
self.assertTrue("@id" in use)
self.assertTrue(use["@id"] == "http://localhost:8000/concepts/fb457e76-e018-41e7-9be3-0f986816450a")
self.assertTrue(p2 in use)
self.assertTrue(use[p2]["@id"] == "http://localhost:8000/concepts/14c92c17-5e2f-413a-95c2-3c5e41ee87d2")
self.assertTrue(temp in js)
proj = js[temp]
self.assertTrue(qual in proj)
self.assertTrue(proj[qual] == "example")
self.assertTrue(note in js)
self.assertTrue(js[note] == "Test Data")
self.assertTrue(pts in js)
self.assertTrue(js[pts] == 12)
def test_2b_complex_multiple(self):
data = """
{
"@id": "http://localhost:8000/resources/5e9baff0-109b-11ea-957a-acde48001122",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P101_had_as_general_use": {
"@id": "http://localhost:8000/concepts/fb457e76-e018-41e7-9be3-0f986816450a",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type": {
"@id": "http://localhost:8000/concepts/dcd28b8a-0840-4a7f-a0d6-0341438552e6",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Meta Type B"
},
"http://www.w3.org/2000/01/rdf-schema#label": "Test Type A"
},
"http://www.cidoc-crm.org/cidoc-crm/P160_has_temporal_projection": [
{
"@id": "http://localhost:8000/tile/7e0371da-c62f-46c1-899b-d1e9419a76d5/node/127193ea-fa6d-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E52_Time-Span",
"http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by": "example 2"
},
{
"@id": "http://localhost:8000/tile/8cc347a4-265d-4a06-8327-e198e1d1d0c5/node/127193ea-fa6d-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E52_Time-Span",
"http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by": "example",
"http://www.cidoc-crm.org/cidoc-crm/P82a_begin_of_the_begin": {
"@type": "http://www.w3.org/2001/XMLSchema#dateTime",
"@value": "1903-10-28"
}
},
{
"@id": "http://localhost:8000/tile/6011c512-47e9-46c3-b6f3-034dcc6f2a9d/node/127193ea-fa6d-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E52_Time-Span",
"http://www.cidoc-crm.org/cidoc-crm/P82a_begin_of_the_begin": {
"@type": "http://www.w3.org/2001/XMLSchema#dateTime",
"@value": "2019-11-15"
}
},
{
"@id": "http://localhost:8000/tile/7d42af30-4d00-434f-95d4-7a3b3f9bfec8/node/127193ea-fa6d-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E52_Time-Span",
"http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by": "example"
}
],
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type": {
"@id": "http://localhost:8000/concepts/6bac5802-a6f8-427c-ba5f-d4b30d5b070e",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Single Type A"
},
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note": [
"asdfasdfa",
"1903-10-21"
],
"http://www.cidoc-crm.org/cidoc-crm/P45_consists_of": {
"@id": "http://localhost:8000/concepts/36c8d7a3-32e7-49e4-bd4c-2169a06b240a",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E57_Material",
"http://www.w3.org/2000/01/rdf-schema#label": "material a"
},
"http://www.cidoc-crm.org/cidoc-crm/P57_has_number_of_parts": [
2,
1
]
}
"""
url = self._create_url(
graph_id="ee72fb1e-fa6c-11e9-b369-3af9d3b32b71",
resource_id="5e9baff0-109b-11ea-957a-acde48001122",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
self.assertTrue("@id" in js)
self.assertTrue(js["@id"] == "http://localhost:8000/resources/5e9baff0-109b-11ea-957a-acde48001122")
pts = "http://www.cidoc-crm.org/cidoc-crm/P57_has_number_of_parts"
note = "http://www.cidoc-crm.org/cidoc-crm/P3_has_note"
temp = "http://www.cidoc-crm.org/cidoc-crm/P160_has_temporal_projection"
qual = "http://www.cidoc-crm.org/cidoc-crm/P79_beginning_is_qualified_by"
botb = "http://www.cidoc-crm.org/cidoc-crm/P82a_begin_of_the_begin"
self.assertTrue(pts in js)
self.assertTrue(set(js[pts]) == set([1, 2]))
self.assertTrue(note in js)
self.assertTrue(set(js[note]) == set(["asdfasdfa", "1903-10-21"]))
self.assertTrue(temp in js)
temps = js[temp]
self.assertTrue(len(temps) == 4)
for t in temps:
if qual in t:
self.assertTrue(t[qual] in ["example", "example 2"])
if botb in t:
self.assertTrue(t[botb]["@value"] in ["2019-11-15", "1903-10-28"])
def test_3_5098_concepts(self):
data = """
{
"@id": "http://localhost:8000/resources/0b4439a8-beca-11e9-b4dc-0242ac160002",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E21_Person",
"http://www.cidoc-crm.org/cidoc-crm/P67i_is_referred_to_by": {
"@id": "http://localhost:8000/tile/cad329aa-1802-416e-bbce-5f71e21b1a47/node/accb030c-bec9-11e9-b4dc-0242ac160002",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E33_Linguistic_Object",
"http://www.cidoc-crm.org/cidoc-crm/P2_has_type": [
{
"@id": "http://localhost:8000/concepts/c3c4b8a8-39bb-41e7-af45-3a0c60fa4ddf",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Concept 2"
},
{
"@id": "http://localhost:8000/concepts/0bb450bc-8fe3-46cb-968e-2b56849e6e96",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E55_Type",
"http://www.w3.org/2000/01/rdf-schema#label": "Concept 1"
}
]
}
}
"""
url = self._create_url(
graph_id="92ccf5aa-bec9-11e9-bd39-0242ac160002",
resource_id="0b4439a8-beca-11e9-b4dc-0242ac160002",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
print(f"Got JSON for test 3: {js}")
self.assertTrue("@id" in js)
self.assertTrue(js["@id"] == "http://localhost:8000/resources/0b4439a8-beca-11e9-b4dc-0242ac160002")
types = js["http://www.cidoc-crm.org/cidoc-crm/P67i_is_referred_to_by"]["http://www.cidoc-crm.org/cidoc-crm/P2_has_type"]
self.assertTrue(type(types) == list)
self.assertTrue(len(types) == 2)
cids = [
"http://localhost:8000/concepts/c3c4b8a8-39bb-41e7-af45-3a0c60fa4ddf",
"http://localhost:8000/concepts/0bb450bc-8fe3-46cb-968e-2b56849e6e96",
]
self.assertTrue(types[0]["@id"] in cids)
self.assertTrue(types[1]["@id"] in cids)
self.assertTrue(types[0]["@id"] != types[1]["@id"])
def test_4_5098_resinst(self):
# Make instances for this new one to reference
BusinessDataImporter("tests/fixtures/jsonld_base/data/test_2_instances.json").import_business_data()
data = """
{
"@id": "http://localhost:8000/resources/abcd1234-1234-1129-b6e7-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P130_shows_features_of": [
{
"@id": "http://localhost:8000/resources/12bbf5bc-fa85-11e9-91b8-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object"
},
{
"@id": "http://localhost:8000/resources/24d0d25a-fa75-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object"
}
],
"http://www.cidoc-crm.org/cidoc-crm/P3_has_note": "res inst list import"
}
"""
url = self._create_url(
graph_id="ee72fb1e-fa6c-11e9-b369-3af9d3b32b71",
resource_id="abcd1234-1234-1129-b6e7-3af9d3b32b71",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
print(f"Test 4: {response.content}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
# print(f"Got json for test 4: {js}")
self.assertTrue("@id" in js)
self.assertTrue(js["@id"] == "http://localhost:8000/resources/abcd1234-1234-1129-b6e7-3af9d3b32b71")
self.assertTrue("http://www.cidoc-crm.org/cidoc-crm/P130_shows_features_of" in js)
feats = js["http://www.cidoc-crm.org/cidoc-crm/P130_shows_features_of"]
self.assertTrue(type(feats) == list)
self.assertTrue(len(feats) == 2)
rids = [
"http://localhost:8000/resources/12bbf5bc-fa85-11e9-91b8-3af9d3b32b71",
"http://localhost:8000/resources/24d0d25a-fa75-11e9-b369-3af9d3b32b71",
]
self.assertTrue(feats[0]["@id"] in rids)
self.assertTrue(feats[1]["@id"] in rids)
# test that the default ontologyProperties and inverseOntologyProperties are used
tiles = TileModel.objects.filter(resourceinstance_id="abcd1234-1234-1129-b6e7-3af9d3b32b71")
for tile in tiles:
if "ae93f844-fa6d-11e9-b369-3af9d3b32b71" in tile.data:
self.assertEqual(
tile.data["ae93f844-fa6d-11e9-b369-3af9d3b32b71"][0]["ontologyProperty"],
"http://www.cidoc-crm.org/cidoc-crm/P62_depicts",
)
self.assertEqual(
tile.data["ae93f844-fa6d-11e9-b369-3af9d3b32b71"][0]["inverseOntologyProperty"],
"http://www.cidoc-crm.org/cidoc-crm/P62i_is_depicted_by",
)
def test_5_5098_resinst_branch(self):
# 2019-11-01 - Conversely this fails, as it is in a branch
BusinessDataImporter("tests/fixtures/jsonld_base/data/test_2_instances.json").import_business_data()
data = """
{
"@id": "http://localhost:8000/resources/7fffffff-faa1-11e9-84de-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object",
"http://www.cidoc-crm.org/cidoc-crm/P67i_is_referred_to_by": {
"@id": "http://localhost:8000/tile/a4896405-5c73-49f4-abd3-651911e82fde/node/51c3ede8-faa1-11e9-84de-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E33_Linguistic_Object",
"http://www.cidoc-crm.org/cidoc-crm/P128i_is_carried_by": [
{
"@id": "http://localhost:8000/resources/24d0d25a-fa75-11e9-b369-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object"
},
{
"@id": "http://localhost:8000/resources/12bbf5bc-fa85-11e9-91b8-3af9d3b32b71",
"@type": "http://www.cidoc-crm.org/cidoc-crm/E22_Man-Made_Object"
}
]
}
}
"""
# Load up the models and data only once
with open(os.path.join("tests/fixtures/jsonld_base/models/5098_b_resinst.json"), "rU") as f:
archesfile = JSONDeserializer().deserialize(f)
ResourceGraphImporter(archesfile["graph"])
url = self._create_url(
graph_id="40dbcffa-faa1-11e9-84de-3af9d3b32b71",
resource_id="7fffffff-faa1-11e9-84de-3af9d3b32b71",
)
response = self.client.put(url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.token}")
self.assertEqual(response.status_code, 201)
js = response.json()
if type(js) == list:
js = js[0]
print(f"Got json for test 5: {js}")
self.assertTrue("@id" in js)
| |
Path being processed
3. segment_index (int) Segment index of the YAML Path to process
Keyword Arguments:
* parent (ruamel.yaml node) The parent node from which this query
originates
* parentref (Any) The Index or Key of data within parent
* traverse_lists (Boolean) Indicate whether KEY searches against lists
are permitted to automatically traverse into the list; Default=True
* translated_path (YAMLPath) YAML Path indicating precisely which node
is being evaluated
* ancestry (List[AncestryEntry]) Stack of ancestors preceding the
present node under evaluation
Returns: (Generator[Any, None, None]) Each node coordinate or list of
node coordinates as they are matched. You must check with isinstance()
to determine whether you have received a NodeCoords or a
List[NodeCoords].
Raises:
- `NotImplementedError` when the segment indicates an unknown
PathSegmentTypes value.
"""
parent: Any = kwargs.pop("parent", None)
parentref: Any = kwargs.pop("parentref", None)
traverse_lists: bool = kwargs.pop("traverse_lists", True)
translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
segments = yaml_path.escaped
if not (segments and len(segments) > segment_index):
self.logger.debug(
"Bailing out because there are not {} segments in:"
.format(segment_index),
prefix="Processor::_get_nodes_by_path_segment: ",
data=segments)
return
pathseg: PathSegment = yaml_path.unescaped[segment_index]
(unesc_type, unesc_attrs) = pathseg
(segment_type, stripped_attrs) = segments[segment_index]
# Disallow traversal recursion (because it creates a denial-of-service)
if segment_index > 0 and segment_type == PathSegmentTypes.TRAVERSE:
(prior_segment_type, _) = segments[segment_index - 1]
if prior_segment_type == PathSegmentTypes.TRAVERSE:
raise YAMLPathException(
"Repeating traversals are not allowed because they cause"
" recursion which leads to excessive CPU and RAM"
" consumption while yielding no additional useful data",
str(yaml_path), "**")
# NodeCoords cannot be directly evaluated as data, so pull out their
# wrapped data for evaluation.
if isinstance(data, NodeCoords):
ancestry = data.ancestry
translated_path = YAMLPath(data.path)
parent = data.parent
parentref = data.parentref
data = data.node
node_coords: Any = None
if segment_type == PathSegmentTypes.KEY:
node_coords = self._get_nodes_by_key(
data, yaml_path, segment_index, traverse_lists=traverse_lists,
translated_path=translated_path, ancestry=ancestry)
elif segment_type == PathSegmentTypes.INDEX:
node_coords = self._get_nodes_by_index(
data, yaml_path, segment_index,
translated_path=translated_path, ancestry=ancestry)
elif segment_type == PathSegmentTypes.ANCHOR:
node_coords = self._get_nodes_by_anchor(
data, yaml_path, segment_index,
translated_path=translated_path, ancestry=ancestry)
elif (
segment_type == PathSegmentTypes.KEYWORD_SEARCH
and isinstance(stripped_attrs, SearchKeywordTerms)
):
node_coords = self._get_nodes_by_keyword_search(
data, yaml_path, stripped_attrs, parent=parent,
parentref=parentref, traverse_lists=traverse_lists,
translated_path=translated_path, ancestry=ancestry,
relay_segment=pathseg)
elif (
segment_type == PathSegmentTypes.SEARCH
and isinstance(stripped_attrs, SearchTerms)
):
node_coords = self._get_nodes_by_search(
data, stripped_attrs, parent=parent, parentref=parentref,
traverse_lists=traverse_lists, translated_path=translated_path,
ancestry=ancestry)
elif (
unesc_type == PathSegmentTypes.COLLECTOR
and isinstance(unesc_attrs, CollectorTerms)
):
node_coords = self._get_nodes_by_collector(
data, yaml_path, segment_index, unesc_attrs, parent=parent,
parentref=parentref, translated_path=translated_path,
ancestry=ancestry)
elif segment_type == PathSegmentTypes.TRAVERSE:
node_coords = self._get_nodes_by_traversal(
data, yaml_path, segment_index, parent=parent,
parentref=parentref, translated_path=translated_path,
ancestry=ancestry)
else:
raise NotImplementedError
for node_coord in node_coords:
yield node_coord
def _get_nodes_by_key(
self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs: Any
) -> Generator[NodeCoords, None, None]:
"""
Get nodes from a Hash by their unique key name.
Returns zero or more NodeCoords identified by a dict key found at a
specific segment of a YAML Path within the present data context.
Parameters:
1. data (ruamel.yaml data) The parsed YAML data to process
2. yaml_path (yamlpath.Path) The YAML Path being processed
3. segment_index (int) Segment index of the YAML Path to process
Keyword Arguments:
* traverse_lists (Boolean) Indicate whether KEY searches against lists
are permitted to automatically traverse into the list; Default=True
* translated_path (YAMLPath) YAML Path indicating precisely which node
is being evaluated
* ancestry (List[AncestryEntry]) Stack of ancestors preceding the
present node under evaluation
Returns: (Generator[NodeCoords, None, None]) Each NodeCoords as they
are matched
Raises: N/A
"""
traverse_lists: bool = kwargs.pop("traverse_lists", True)
translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
pathseg: PathSegment = yaml_path.escaped[segment_index]
(_, stripped_attrs) = pathseg
str_stripped = str(stripped_attrs)
next_ancestry: List[AncestryEntry] = []
self.logger.debug((
"Seeking KEY node, {}, in data:"
).format(str_stripped),
prefix="Processor::_get_nodes_by_key: ",
data={"KEY": stripped_attrs,
"DATA": data})
if isinstance(data, dict):
next_translated_path = (translated_path +
YAMLPath.escape_path_section(
str_stripped, translated_path.seperator))
next_ancestry = ancestry + [(data, stripped_attrs)]
if stripped_attrs in data:
self.logger.debug(
"Processor::_get_nodes_by_key: FOUND key node by name at"
" {}."
.format(str_stripped))
yield NodeCoords(
data[stripped_attrs], data, stripped_attrs,
next_translated_path, next_ancestry, pathseg)
else:
# Check for a string/int type mismatch
try:
intkey = int(str_stripped)
if intkey in data:
yield NodeCoords(
data[intkey], data, intkey, next_translated_path,
ancestry + [(data, intkey)], pathseg)
except ValueError:
pass
elif isinstance(data, list):
try:
# Try using the ref as a bare Array index
idx = int(str_stripped)
if len(data) > idx:
self.logger.debug(
"Processor::_get_nodes_by_key: FOUND key node as a"
" bare Array index at [{}]."
.format(str_stripped))
next_translated_path = translated_path + "[{}]".format(idx)
next_ancestry = ancestry + [(data, idx)]
yield NodeCoords(
data[idx], data, idx,
next_translated_path, next_ancestry, pathseg)
except ValueError:
# Pass-through search against possible Array-of-Hashes, if
# allowed.
if not traverse_lists:
self.logger.debug(
"Processor::_get_nodes_by_key: Refusing to traverse a"
" list.")
return
for eleidx, element in enumerate(data):
next_translated_path = translated_path + "[{}]".format(
eleidx)
next_ancestry = ancestry + [(data, stripped_attrs)]
for node_coord in self._get_nodes_by_path_segment(
element, yaml_path, segment_index, parent=data,
parentref=eleidx, traverse_lists=traverse_lists,
translated_path=next_translated_path,
ancestry=next_ancestry):
self.logger.debug(
"Processor::_get_nodes_by_key: FOUND key node "
" via pass-through Array-of-Hashes search at {}."
.format(next_translated_path))
yield node_coord
elif isinstance(data, (set, CommentedSet)):
for ele in data:
if ele == stripped_attrs or (
isinstance(ele, TaggedScalar)
and ele.value == stripped_attrs
):
self.logger.debug((
"Processor::_get_nodes_by_key: FOUND set node by"
" name at {}."
).format(str_stripped))
next_translated_path = (translated_path +
YAMLPath.escape_path_section(
ele, translated_path.seperator))
next_ancestry = ancestry + [(data, ele)]
yield NodeCoords(
ele, data, stripped_attrs,
next_translated_path, next_ancestry, pathseg)
break
# pylint: disable=locally-disabled,too-many-locals
def _get_nodes_by_index(
self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs
) -> Generator[NodeCoords, None, None]:
"""
Get nodes from a List by their index.
Returns zero or more NodeCoords identified by a list element index
found at a specific segment of a YAML Path within the present data
context.
Parameters:
1. data (Any) The parsed YAML data to process
2. yaml_path (YAMLPath) The YAML Path being processed
3. segment_index (int) Segment index of the YAML Path to process
Keyword Arguments:
* translated_path (YAMLPath) YAML Path indicating precisely which node
is being evaluated
* ancestry (List[AncestryEntry]) Stack of ancestors preceding the
present node under evaluation
Returns: (Generator[NodeCoords, None, None]) Each NodeCoords as they
are matched
Raises: N/A
"""
translated_path: YAMLPath = kwargs.pop("translated_path", YAMLPath(""))
ancestry: List[AncestryEntry] = kwargs.pop("ancestry", [])
pathseg: PathSegment = yaml_path.escaped[segment_index]
(_, stripped_attrs) = pathseg
(_, unstripped_attrs) = yaml_path.unescaped[segment_index]
str_stripped = str(stripped_attrs)
self.logger.debug(
"Processor::_get_nodes_by_index: Seeking INDEX node at {}."
.format(str_stripped))
if ':' in str_stripped:
# Array index or Hash key slice
slice_parts: List[str] = str_stripped.split(':', 1)
min_match: str = slice_parts[0]
max_match: str = slice_parts[1]
if isinstance(data, list):
try:
intmin: int = int(min_match)
intmax: int = int(max_match)
except ValueError as wrap_ex:
raise YAMLPathException(
"{} is not an integer array slice"
.format(str_stripped),
str(yaml_path),
str(unstripped_attrs)
) from wrap_ex
if intmin == intmax and len(data) > intmin:
yield NodeCoords(
[data[intmin]], data, intmin,
translated_path + "[{}]".format(intmin),
ancestry + [(data, intmin)], pathseg)
else:
sliced_elements = []
for slice_index in range(intmin, intmax):
sliced_elements.append(NodeCoords(
data[slice_index], data, intmin,
translated_path + "[{}]".format(slice_index),
ancestry + [(data, slice_index)], pathseg))
yield NodeCoords(
sliced_elements, data, intmin,
translated_path + "[{}:{}]".format(intmin, intmax),
ancestry + [(data, intmin)], pathseg)
elif isinstance(data, dict):
for key, val in data.items():
if min_match <= key <= max_match:
yield NodeCoords(
val, data, key,
translated_path + YAMLPath.escape_path_section(
key, translated_path.seperator),
ancestry + [(data, key)], pathseg)
elif isinstance(data, (CommentedSet, set)):
for ele in data:
if min_match <= ele <= max_match:
yield NodeCoords(
ele, data, ele,
translated_path + YAMLPath.escape_path_section(
ele, translated_path.seperator),
ancestry + [(data, ele)], pathseg)
else:
try:
idx: int = int(str_stripped)
except ValueError as wrap_ex:
raise YAMLPathException(
"{} is not an integer array index"
.format(str_stripped),
str(yaml_path),
str(unstripped_attrs)
) from wrap_ex
if isinstance(data, list) and len(data) > idx:
yield NodeCoords(
data[idx], data, idx, translated_path + "[{}]".format(idx),
ancestry + [(data, idx)], pathseg)
elif isinstance(data, (CommentedSet, set)):
raise YAMLPathException(
"Array indexing is invalid against unordered set data"
" because element positioning is not guaranteed in"
" unordered data; rather, match set entries by their"
" actual values. This error was encountered",
str(yaml_path),
str(unstripped_attrs)
)
def _get_nodes_by_anchor(
self, data: Any, yaml_path: YAMLPath, segment_index: int, **kwargs
) -> Generator[NodeCoords, None, None]:
"""
Get nodes matching an Anchor name.
Returns zero or more NodeCoords identified by an Anchor name found at a
specific segment of a YAML Path within the | |
#!/usr/bin/env python3
import os
import time
import scipy
import mnist
import pickle
import matplotlib
import numpy as np
import itertools as it
from numpy import random
matplotlib.use("agg")
from matplotlib import pyplot as plt
from scipy.special import softmax
mnist_data_directory = os.path.join(os.path.dirname(__file__), "data")
# TODO add any additional imports and global variables
def load_MNIST_dataset():
PICKLE_FILE = os.path.join(mnist_data_directory, "MNIST.pickle")
try:
dataset = pickle.load(open(PICKLE_FILE, "rb"))
except:
# load the MNIST dataset
mnist_data = mnist.MNIST(mnist_data_directory, return_type="numpy", gz=True)
Xs_tr, Lbls_tr = mnist_data.load_training()
Xs_tr = Xs_tr.transpose() / 255.0
Ys_tr = np.zeros((10, 60000))
for i in range(60000):
Ys_tr[Lbls_tr[i], i] = 1.0 # one-hot encode each label
# shuffle the training data
np.random.seed(8675309)
perm = np.random.permutation(60000)
Xs_tr = np.ascontiguousarray(Xs_tr[:, perm])
Ys_tr = np.ascontiguousarray(Ys_tr[:, perm])
Xs_te, Lbls_te = mnist_data.load_testing()
Xs_te = Xs_te.transpose() / 255.0
Ys_te = np.zeros((10, 10000))
for i in range(10000):
Ys_te[Lbls_te[i], i] = 1.0 # one-hot encode each label
Xs_te = np.ascontiguousarray(Xs_te)
Ys_te = np.ascontiguousarray(Ys_te)
dataset = (Xs_tr, Ys_tr, Xs_te, Ys_te)
pickle.dump(dataset, open(PICKLE_FILE, "wb"))
return dataset
# compute the gradient of the multinomial logistic regression objective, with regularization (SAME AS PROGRAMMING ASSIGNMENT 2)
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# ii the list/vector of indexes of the training example to compute the gradient with respect to
# gamma L2 regularization constant
# W0 parameters (c * d)
#
# returns the average gradient of the regularized loss of the examples in vector ii with respect to the model parameters
def multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W0):
# TODO students should use their implementation from programming assignment 2
d, n = Xs.shape
c, n = Ys.shape
batchSize = len(ii)
X_batch = Xs[:, ii]
Y_batch = Ys[:, ii]
yHat = softmax(np.matmul(W0, X_batch), axis=0) - Y_batch
ans = np.matmul(yHat, X_batch.T) + batchSize * gamma * W0
return ans / batchSize
# compute the error of the classifier (SAME AS PROGRAMMING ASSIGNMENT 1)
#
# Xs examples (d * n)
# Ys labels (c * n)
# W0 parameters (c * d)
#
# returns the model error as a percentage of incorrect labels
def multinomial_logreg_error(Xs, Ys, W0):
# TODO students should use their implementation from programming assignment 1
Ys = Ys.T
yHat = softmax(np.dot(W0, Xs), axis=0).T
count = 0
for i in range(len(Ys)):
pred = np.argmax(yHat[i])
if Ys[i, pred] != 1:
count += 1
return count / len(Ys)
# compute the cross-entropy loss of the classifier
#
# Xs examples (d * n)
# Ys labels (c * n)
# gamma L2 regularization constant
# W0 parameters (c * d)
#
# returns the model cross-entropy loss
def multinomial_logreg_loss(Xs, Ys, gamma, W0):
# TODO students should implement this
(d, n) = Xs.shape
ret = 0
# Numpy Code
y_hat = softmax(np.dot(W0, Xs), axis=0)
log_y_hat = -1 * np.log(y_hat)
y_dot_y_hat = np.multiply(log_y_hat, Ys)
L_y_y_hat = np.sum(y_dot_y_hat)
ret = L_y_y_hat + (gamma / 2) * (np.linalg.norm(W0, "fro")) ** 2
return ret / n
# gradient descent (SAME AS PROGRAMMING ASSIGNMENT 1)
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# num_epochs number of epochs (passes through the training set, or equivalently iterations of gradient descent) to run
# monitor_period how frequently, in terms of epochs/iterations to output the parameter vector
#
# returns a list of model parameters, one every "monitor_period" epochs
def gradient_descent(Xs, Ys, gamma, W0, alpha, num_epochs, monitor_period):
# TODO students should use their implementation from programming assignment 1
params = []
grad_range = list(range(Xs.shape[1]))
for i in range(num_epochs):
if i % monitor_period == 0:
params.append(W0)
W0 = W0 - alpha * multinomial_logreg_grad_i(Xs, Ys, grad_range, gamma, W0)
params.append(W0)
return params
# gradient descent with nesterov momentum
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# beta momentum hyperparameter
# num_epochs number of epochs (passes through the training set, or equivalently iterations of gradient descent) to run
# monitor_period how frequently, in terms of epochs/iterations to output the parameter vector
#
# returns a list of model parameters, one every "monitor_period" epochs
def multinomial_logreg_total_grad(Xs, Ys, gamma, W0):
# TODO students should implement this
# a starter solution using an average of the example gradients
(d, n) = Xs.shape
ret = 0
# ----- NUMPY CODE
y_hat = softmax(np.dot(W0, Xs), axis=0)
del_L = np.dot(y_hat - Ys, Xs.T)
ret = del_L + n * gamma * W0
return ret / n
def gd_nesterov(Xs, Ys, gamma, W0, alpha, beta, num_epochs, monitor_period):
# TODO students should implement this
params = []
loss = []
error = []
v = W0
for i in range(num_epochs):
if i % monitor_period == 0:
params.append(W0)
vPrev = v[:]
v = W0 - alpha * multinomial_logreg_grad_i(
Xs, Ys, range(Xs.shape[1]), gamma, W0
)
W0 = v + beta * (v - vPrev)
params.append(W0)
return params
# SGD: run stochastic gradient descent with minibatching and sequential sampling order (SAME AS PROGRAMMING ASSIGNMENT 2)
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# B minibatch size
# num_epochs number of epochs (passes through the training set) to run
# monitor_period how frequently, in terms of batches (not epochs) to output the parameter vector
#
# returns a list of model parameters, one every "monitor_period" batches
def sgd_minibatch_sequential_scan(
Xs, Ys, gamma, W0, alpha, B, num_epochs, monitor_period
):
# TODO students should use their implementation from programming assignment 2
params = []
for t in range(num_epochs):
for j in range(Xs.shape[1] // B):
if j % monitor_period == 0:
params.append(W0)
ii = [(j * B + i) for i in range(B)]
W0 = W0 - alpha * (multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W0))
params.append(W0)
return params
# SGD + Momentum: add momentum to the previous algorithm
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# beta momentum hyperparameter
# B minibatch size
# num_epochs number of epochs (passes through the training set) to run
# monitor_period how frequently, in terms of batches (not epochs) to output the parameter vector
#
# returns a list of model parameters, one every "monitor_period" batches
def sgd_mss_with_momentum(
Xs, Ys, gamma, W0, alpha, beta, B, num_epochs, monitor_period
):
# TODO students should implement this
params = []
W0 = W0
v = 0
d, n = Xs.shape
for t in range(0, num_epochs):
for i in range(n // B):
if i % monitor_period == 0:
params.append(W0)
ii = [(i * B + j) for j in range(B)]
g = multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W0)
v = (beta * v) - (alpha * g)
W0 = W0 + v
params.append(W0)
return params
# Adam Optimizer
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# rho1 first moment decay rate ρ1
# rho2 second moment decay rate ρ2
# B minibatch size
# eps small factor used to prevent division by zero in update step
# num_epochs number of epochs (passes through the training set) to run
# monitor_period how frequently, in terms of batches (not epochs) to output the parameter vector
#
# returns a list of model parameters, one every "monitor_period" batches
def adam(Xs, Ys, gamma, W0, alpha, rho1, rho2, B, eps, num_epochs, monitor_period):
# TODO students should implement this
params = []
d, n = Xs.shape
t = 0
s = [0 for i in range(d)]
r = [0 for i in range(d)]
for k in range(0, num_epochs):
for i in range(n // B):
if i % monitor_period == 0:
params.append(W0)
t += 1
ii = [(i * B + j) for j in range(B)]
g = (multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W0))
s = rho1 * np.asarray(s) + (1 - rho1) * np.asarray(g)
r = rho2 * np.asarray(r) + (1 - rho2) * np.asarray(g) ** 2
s_cap = np.array([i / (1 - (rho1 ** t)) for i in | |
name,
mode,
rule_type,
platform,
compiler.get_compiler_for_current_buildfile(),
)
)
passthrough_args.append(
"--build-info-build-mode=" + info.build_mode,
)
passthrough_args.append("--build-info-build-tool=buck")
if info.package_name != None:
passthrough_args.append(
"--build-info-package-name=" + info.package_name,
)
if info.package_release != None:
passthrough_args.append(
"--build-info-package-release=" + info.package_release,
)
if info.package_version != None:
passthrough_args.append(
"--build-info-package-version=" + info.package_version,
)
passthrough_args.append("--build-info-platform=" + info.platform)
passthrough_args.append("--build-info-rule-name=" + info.rule)
passthrough_args.append("--build-info-rule-type=" + info.rule_type)
build_args.extend(["--passthrough=" + a for a in passthrough_args])
# Arguments for stripping libomnibus. dbg builds should never strip.
if not build_mode.startswith("dbg"):
if strip_libpar == True:
build_args.append("--omnibus-debug-info=strip")
elif strip_libpar == "extract":
build_args.append("--omnibus-debug-info=extract")
else:
build_args.append("--omnibus-debug-info=separate")
# Set an explicit python interpreter.
if python != None:
build_args.append("--python-override=" + python)
return build_args
def _associated_targets_library(base_path, name, deps, visibility):
"""
Associated Targets are buck rules that need to be built, when This
target is built, but are not a code dependency. Which is why we
wrap them in a cxx_library so they could never be a code dependency
TODO: Python just needs the concept of runtime deps if it doesn't have it.
Also, what is the actual use case for this?
"""
rule_name = name + "-build_also"
buck_platform = platform_utils.get_buck_platform_for_base_path(base_path)
fb_native.cxx_library(
name = rule_name,
visibility = visibility,
deps = deps,
default_platform = buck_platform,
defaults = {"platform": buck_platform},
)
return rule_name
def _jemalloc_malloc_conf_library(base_path, name, malloc_conf, deps, visibility):
"""
Build a rule which wraps the JEMalloc allocator and links default
configuration via the `jemalloc_conf` variable.
"""
buck_platform = platform_utils.get_buck_platform_for_base_path(base_path)
jemalloc_config_line = ",".join([
"{}:{}".format(k, v)
for k, v in sorted(malloc_conf.items())
])
src_rule_name = "__{}_jemalloc_conf_src__".format(name)
fb_native.genrule(
name = src_rule_name,
visibility = visibility,
out = "jemalloc_conf.c",
cmd = 'echo \'const char* malloc_conf = "{}";\' > "$OUT"'.format(jemalloc_config_line),
)
deps, platform_deps = src_and_dep_helpers.format_all_deps(deps)
lib_rule_name = "__{}_jemalloc_conf_lib__".format(name)
fb_native.cxx_library(
name = lib_rule_name,
visibility = visibility,
srcs = [":" + src_rule_name],
default_platform = buck_platform,
defaults = {"platform": buck_platform},
deps = deps,
platform_deps = platform_deps,
)
return target_utils.RootRuleTarget(base_path, lib_rule_name)
def _convert_needed_coverage_spec(base_path, spec):
"""
Converts `needed_coverage` from fbcode's spec into the buck native spec
Args:
base_path: The base path for this rule; used to get fully qualified targets
spec: A tuple of (<needed percentage as int>, <target as a string>)
Returns:
A buck-compatible spec. This is a tuple of two elements if no source name
is detected in the target name (with an =) or three elements if it is
detected in the form of
(<percentage as int>, <full target as string>, <file as string>?)
"""
if len(spec) != 2:
fail((
"parameter `needed_coverage`: `{}` must have exactly 2 " +
"elements, a ratio and a target."
).format(spec))
ratio, target = spec
if "=" not in target:
return (
ratio,
src_and_dep_helpers.convert_build_target(base_path, target),
)
target, path = target.rsplit("=", 1)
return (ratio, src_and_dep_helpers.convert_build_target(base_path, target), path)
def _should_generate_interp_rules(helper_deps):
"""
Return whether we should generate the interp helpers.
This is controlled by both the mode, the property, and buckconfig settings
Args:
helper_deps: The value of the `helper_deps` attribute on the users rule.
Should be True or False
"""
# We can only work in @mode/dev
if not config.get_build_mode().startswith("dev"):
return False
# Our current implementation of the interp helpers is costly when using
# omnibus linking, only generate these if explicitly set via config or TARGETS
config_setting = read_bool("python", "helpers", required = False)
if config_setting == None:
# No CLI option is set, respect the TARGETS file option.
return helper_deps
return config_setting
def _preload_deps(base_path, name, allocator, jemalloc_conf = None, visibility = None):
"""
Add C/C++ deps which need to preloaded by Python binaries.
Returns:
A list of additional dependencies (as strings) which should be added to the
python binary
"""
deps = []
sanitizer = sanitizers.get_sanitizer()
# If we're using sanitizers, add the dep on the sanitizer-specific
# support library.
if sanitizer != None:
sanitizer = sanitizers.get_short_name(sanitizer)
deps.append(
target_utils.RootRuleTarget(
"tools/build/sanitizers",
"{}-py".format(sanitizer),
),
)
# Generate sanitizer configuration even if sanitizers are not used
deps.append(
cpp_common.create_sanitizer_configuration(
base_path,
name,
enable_lsan = False,
),
)
# If we're using an allocator, and not a sanitizer, add the allocator-
# specific deps.
if allocator != None and sanitizer == None:
allocator_deps = allocators.get_allocator_deps(allocator)
if allocator.startswith("jemalloc") and jemalloc_conf != None:
conf_dep = _jemalloc_malloc_conf_library(
base_path,
name,
jemalloc_conf,
allocator_deps,
visibility,
)
allocator_deps = [conf_dep]
deps.extend(allocator_deps)
return deps
def _get_ldflags(base_path, name, fbconfig_rule_type, strip_libpar = True):
"""
Return ldflags to use when linking omnibus libraries in python binaries.
"""
# We override stripping for python binaries unless we're in debug mode
# (which doesn't get stripped by default). If either `strip_libpar`
# is set or any level of stripping is enabled via config, we do full
# stripping.
strip_mode = cpp_common.get_strip_mode(base_path, name)
if (not config.get_build_mode().startswith("dbg") and
(strip_mode != "none" or strip_libpar == True)):
strip_mode = "full"
return cpp_common.get_ldflags(
base_path,
name,
fbconfig_rule_type,
strip_mode = strip_mode,
)
def _get_package_style():
"""
Get the package_style to use for binary rules from the configuration
See https://buckbuild.com/rule/python_binary.html#package_style
"""
return read_choice(
"python",
"package_style",
("inplace", "standalone"),
"standalone",
)
def _implicit_python_library(
name,
is_test_companion,
base_module = None,
srcs = (),
versioned_srcs = (),
gen_srcs = (),
deps = (),
tests = (),
tags = (),
external_deps = (),
visibility = None,
resources = (),
cpp_deps = (),
py_flavor = "",
version_subdirs = None): # Not used for now, will be used in a subsequent diff
"""
Creates a python_library and all supporting libraries
This library may or may not be consumed as a companion library to a
python_binary, or a python_test. The attributes returned vary based on how
it will be used.
Args:
name: The name of this library
is_test_companion: Whether this library is being created and consumed
directly by a test rule
base_module: The basemodule for the library (https://buckbuild.com/rule/python_library.html#base_module)
srcs: A sequence of sources/targets to use as srcs. Note that only files
ending in .py are considered sources. All other srcs are added as
resources. Note if this is a dictionary, the key and value are swapped
from the official buck implementation. That is,this rule expects
{<src>: <destination in the library>}
versioned_srcs: If provided, a list of tuples of
(<python version constraint string>, <srcs as above>)
These sources are then added to the versioned_srcs attribute
in the library
gen_srcs: DEPRECATED A list of srcs that come from `custom_rule`s to be
merged into the final srcs list.
deps: A sequence of dependencies for the library. These should only be python
libraries, as python's typing support assumes that dependencies also
have a companion -typing rule
tests: The targets that test this library
tags: Arbitrary metadata to attach to this library. See https://buckbuild.com/rule/python_library.html#labels
external_deps: A sequence of tuples of external dependencies
visibility: The visibility of the library
resources: A sequence of sources/targets that should be explicitly added
as resoruces. Note that if a dictionary is used, the key and
value are swapped from the official buck implementation. That is,
this rule expects {<src>: <destination in the library>}
cpp_deps: A sequence of C++ library depenencies that will be loaded at
runtime
py_flavor: The flavor of python to use. By default ("") this is cpython
version_subdirs: A sequence of tuples of
(<buck version constring>, <version subdir>). This points
to the subdirectory (or "") that each version constraint
uses. This helps us rewrite things like versioned_srcs for
third-party2 targets.
Returns:
The kwargs to pass to a native.python_library rule
"""
base_path = native.package_name()
attributes = {}
attributes["name"] = name
# Normalize all the sources from the various parameters.
parsed_srcs = {} # type: Dict[str, Union[str, RuleTarget]]
parsed_srcs.update(_parse_srcs(base_path, "srcs", srcs))
parsed_srcs.update(_parse_gen_srcs(base_path, gen_srcs))
# Parse the version constraints and normalize all source paths in
# `versioned_srcs`:
parsed_versioned_srcs = [
(
python_versioning.python_version_constraint(pvc),
_parse_srcs(base_path, "versioned_srcs", vs),
)
for pvc, vs in versioned_srcs
]
# Contains a mapping of platform name to sources to use for that
# platform.
all_versioned_srcs = []
# If we're TP project, install all sources via the `versioned_srcs`
# parameter. `py_flavor` is ignored since flavored Pythons are only
# intended for use by internal projects.
if third_party.is_tp2(base_path):
if version_subdirs == None:
fail("`version_subdirs` must be specified on third-party projects")
# TP2 projects have multiple "pre-built" source dirs, so we install
| |
# Copyright 2016 Dravetech AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Napalm driver for Metamako MOS.
Utilizes JSONRPC interface in MOS 0.17.0
Piggybacks pyeapi Node class heavily.
Read https://napalm.readthedocs.io for more information.
"""
from __future__ import print_function
from __future__ import unicode_literals
# std libs
import ast
import difflib
import pyeapi
import re
import time
import inspect
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from ipaddress import IPv4Network
from pyeapi.client import Node as EapiNode
from pyeapi.eapilib import ConnectionError
import napalm.base.helpers
from napalm.base import NetworkDriver
from napalm.base.utils import string_parsers
from napalm.base.exceptions import (
ConnectionException,
CommandErrorException,
SessionLockedException,
)
from napalm_mos.constants import LLDP_CAPAB_TRANFORM_TABLE
import napalm.base.constants as c
class MOSDriver(NetworkDriver):
"""Napalm driver for Metamako MOS."""
SUPPORTED_OC_MODELS = []
_RE_UPTIME = re.compile(
r"^((?P<day>\d+)\s+days?,\s+)?"
r"(?P<hour>\d+):(?P<minute>\d+):(?P<second>\d+)",
re.VERBOSE,
)
_RE_ARP = re.compile(
r"^(?P<address>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
r"\s+\S+\s+"
r"(?P<hwAddress>([0-9A-F]{2}[:-]){5}([0-9A-F]{2}))"
r"\s+\S+\s+"
r"(?P<interface>\S+)$",
re.VERBOSE | re.IGNORECASE,
)
_RE_NTP_SERVERS = re.compile(r"^ntp server (?P<server>\S+)", re.MULTILINE)
_RE_SNMP_COMM = re.compile(
r"\s*Community\sname:\s+(?P<community>\S+)\n"
r"Community\saccess:\s+(?P<mode>\S+)"
r"(\nCommunity\ssource:\s+(?P<v4_acl>\S+))?",
re.VERBOSE,
)
_RE_IP = re.compile(r"ip address (?P<ip>[^\s]+) (?P<mask>.+)")
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""Constructor."""
self.device = None
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.config_session = None
self._current_config = None
self._replace_config = False
self._ssh = None
self._version = LooseVersion("0")
self.platform = "mos"
self._process_optional_args(optional_args or {})
def _process_optional_args(self, optional_args):
self.enablepwd = optional_args.pop("enable_password", "")
self.config_timeout = optional_args.pop("config_timeout", 300)
transport = optional_args.get(
"transport", optional_args.get("eos_transport", "https")
)
try:
self.transport_class = pyeapi.client.TRANSPORTS[transport]
except KeyError:
raise ConnectionException("Unknown transport: {}".format(self.transport))
init_args = inspect.getfullargspec(self.transport_class.__init__)[0]
init_args.pop(0) # Remove "self"
init_args.append("enforce_verification") # Not an arg for unknown reason
filter_args = ["host", "username", "password", "timeout"]
self.eapi_kwargs = {
k: v
for k, v in optional_args.items()
if k in init_args and k not in filter_args
}
def _run_translated_commands(self, commands, **kwargs):
"""
In 0.22.0+ some commands had their syntax change. This function translates those command
syntaxs to their post 0.22.0 version
"""
if self._version >= LooseVersion("0.22.0"):
# Map of translate command syntax to 0.23.0+ syntax
translations = {
"show snmp chassis-id": "show snmp v2-mib chassis-id",
"show snmp location": "show snmp v2-mib location",
"show snmp contact": "show snmp v2-mib contact",
"show environment all": "show system environment all",
}
commands = [
i if i not in translations.keys() else translations[i] for i in commands
]
return self.device.run_commands(commands, **kwargs)
def open(self):
"""Implementation of NAPALM method open."""
try:
connection = self.transport_class(
host=self.hostname,
username=self.username,
password=<PASSWORD>,
timeout=self.timeout,
**self.eapi_kwargs,
)
if self.device is None:
self.device = EapiNode(connection, enablepwd=self.enablepwd)
sw_version = self.device.run_commands(["show version"])[0].get(
"softwareImageVersion", "0.0.0"
)
if LooseVersion(sw_version) < LooseVersion("0.17.9"):
raise NotImplementedError(
"MOS Software Version 0.17.9 or better required"
)
self._version = LooseVersion(sw_version)
except ConnectionError as ce:
raise ConnectionException(ce.message)
def close(self):
"""Implementation of NAPALM method close."""
if self.config_session is not None:
# Only doing this because discard_config is broke
self.discard_config()
def is_alive(self):
return {"is_alive": True}
def get_facts(self):
"""Implementation of NAPALM method get_facts."""
commands_json = ["show version", "show interfaces status"]
commands_text = ["show hostname"]
result_json = self.device.run_commands(commands_json, encoding="json")
result_text = self.device.run_commands(commands_text, encoding="text")
version = result_json[0]
hostname = result_text[0]["output"].splitlines()[0].split(" ")[-1]
fqdn = result_text[0]["output"].splitlines()[1].split(" ")[-1]
interfaces = result_json[1]["interfaces"].keys()
interfaces = string_parsers.sorted_nicely(interfaces)
u_match = re.match(self._RE_UPTIME, version["uptime"]).groupdict()
if u_match["day"] is None:
u_match["day"] = 0
uptime = timedelta(
days=int(u_match["day"]),
hours=int(u_match["hour"]),
seconds=int(u_match["second"]),
).total_seconds()
return {
"hostname": hostname,
"fqdn": fqdn,
"vendor": "Metamako",
"model": re.sub(r"^[Mm]etamako ", "", version["device"]),
"serial_number": version["serialNumber"],
"os_version": version["softwareImageVersion"],
"uptime": int(uptime),
"interface_list": interfaces,
}
def _lock(self):
if self.config_session is None:
self.config_session = "napalm_{}".format(datetime.now().microsecond)
commands = ["copy running-config flash:{}".format(self.config_session)]
self.device.run_commands(commands)
if any(k for k in self._get_sessions() if k != self.config_session):
self.device.run_commands(["delete flash:{}".format(self.config_session)])
self.config_session = None
raise SessionLockedException(
"Session already in use - session file present on flash!"
)
def _unlock(self):
if self.config_session is not None:
self.device.run_commands(["delete flash:{}".format(self.config_session)])
self.config_session = None
self._replace_config = False
def _get_sessions(self):
return [
line.split()[-1]
for line in self.device.run_commands(["dir flash:"], encoding="text")[0][
"output"
].splitlines()
if "napalm_" in line.split()[-1]
]
def _load_config(self, filename=None, config=None, replace=False):
if filename and config:
raise ValueError("Cannot simultaneously set filename and config")
self._lock()
self._candidate = ["copy running-config flash:rollback-0"]
if replace:
self._candidate.append("copy default-config running-config")
self._replace_config = True
self._candidate.append("configure terminal")
if filename is not None:
with open(filename, "r") as f:
self._candidate = f.readlines()
else:
if isinstance(config, list):
lines = config
else:
lines = config.splitlines()
for line in lines:
if line.strip() == "":
continue
if line.startswith("!"):
continue
self._candidate.append(line)
self._candidate.append("end")
if any(
"source mac" in line for line in self._candidate
) and self._version < LooseVersion("0.19.2"):
# Waiting for fixed release
raise CommandErrorException(
"Cannot set source mac in MOS versions prior to 0.19.2"
)
if any("banner motd" in line for line in self._candidate):
raise CommandErrorException("Cannot set banner via JSONRPC API")
def _wait_for_reload(self, timeout=None):
timeout = timeout or self.config_timeout
end_timeout = time.time() + timeout
while True:
time.sleep(10)
try:
self.device.run_commands(["show version"])
break
except pyeapi.eapilib.ConnectionError:
if time.time() > end_timeout:
raise
def load_merge_candidate(self, filename=None, config=None):
self._load_config(filename=filename, config=config, replace=False)
def load_replace_candidate(self, filename=None, config=None):
self._load_config(filename=filename, config=config, replace=True)
def compare_config(self):
# There's no good way to do this yet
if self._replace_config:
cur = self.get_config("running")["running"].splitlines()[4:]
return "\n".join(difflib.unified_diff(cur, self._candidate[3:]))
else:
return "\n".join(self._candidate[2:])
def discard_config(self):
if self.config_session is not None:
self._candidate = None
self._unlock()
def commit_config(self, message="", revert_in=None):
if revert_in is not None:
raise NotImplementedError(
"Commit confirm has not been implemented on this platform."
)
if message:
raise NotImplementedError(
"Commit message not implemented for this platform"
)
if self.config_session is not None and self._candidate:
if self._replace_config:
try:
self.device.run_commands(
self._candidate + ["copy running-config startup-config"]
)
except pyeapi.eapilib.ConnectionError:
self._wait_for_reload()
else:
self.device.run_commands(
self._candidate + ["copy running-config startup-config"]
)
self._unlock()
def rollback(self):
commands = [
"copy flash:rollback-0 running-config",
"copy running-config startup-config",
]
for command in commands:
self.device.run_commands(command)
def get_interfaces(self):
def _parse_mm_speed(speed):
"""Parse the Metamako speed string from 'sh int status' into an Mbit/s int"""
factormap = {"": 1e-6, "k": 1e-3, "M": 1, "G": 1e3, "T": 1e6}
match = re.match(r"^(?P<speed>\d+)(?P<unit>\D)?$", speed)
if match:
match_dict = match.groupdict("")
return int(int(match_dict["speed"]) * factormap[match_dict["unit"]])
return 0
commands = ["show interfaces status", "show interfaces description"]
output = self.device.run_commands(commands, encoding="json")
descriptions = {d["Port"]: d["Description"] for d in output[1]}
interfaces = {}
for interface, values in output[0]["interfaces"].items():
interfaces[interface] = {}
# A L1 device doesn't really have a line protocol.
# Let's say an rx signal means line protocol is up for now.
if values["rx"].startswith("up"):
interfaces[interface]["is_up"] = True
interfaces[interface]["is_enabled"] = True
else:
interfaces[interface]["is_up"] = False
if "shutdown" in values["rx"]:
interfaces[interface]["is_enabled"] = False
else:
interfaces[interface]["is_enabled"] = True
interfaces[interface]["description"] = descriptions.get(interface, "")
interfaces[interface]["last_flapped"] = 0.0
interfaces[interface]["speed"] = _parse_mm_speed(values["speed"])
interfaces[interface]["mac_address"] = ""
# L1 device has no concept of MTU
interfaces[interface]["mtu"] = -1
return interfaces
def get_interfaces_ip(self):
run = self.device.run_commands(["show running-config"], encoding="json")[0]
iface_keys = [
k
for k in run.keys()
if k.startswith("interface") and not k.startswith("interfaceAp")
]
interfaces = {}
for k in iface_keys:
for config_line in run[k]:
m = self._RE_IP.match(config_line)
if not m:
continue
ip = m.group("ip")
mask = m.group("mask")
_net = IPv4Network(f"{ip}/{mask}", strict=False)
prefix_length = _net.prefixlen
# Ma1 is reported as interfaceMa1
# Et1 as interfaceEt1, etc
iface = k.replace("interface", "").lower()
interfaces[iface] = {"ipv4": {ip: {"prefix_length": prefix_length}}}
return interfaces
def get_lldp_neighbors(self):
commands = []
commands.append("show lldp neighbor")
output = self.device.run_commands(commands, encoding="json")[0]
lldp = {}
for n in output:
# MOS Has a line for every port, regardless of neighbor
if n["Neighbor_Device"] != "" and n["Neighbor_Port"] != "":
if n["Port"] not in lldp.keys():
lldp[n["Port"]] = []
lldp[n["Port"]].append(
{"hostname": n["Neighbor_Device"], "port": n["Neighbor_Port"]}
)
return lldp
def get_interfaces_counters(self):
commands = ["show interfaces counters", "show interfaces counters errors"]
output = self.device.run_commands(commands, encoding="json")
interface_counters = {}
errors_dict = output[1]["interfaces"]
for interface, counters in output[0]["interfaces"].items():
interface_counters[interface] = {}
interface_counters[interface].update(
tx_errors=int(
errors_dict.get(interface, {}).get("tx", -1).replace(",", "")
),
rx_errors=int(
errors_dict.get(interface, {}).get("tx", -1).replace(",", "")
),
tx_discards=-1, # Metamako discards?
rx_discards=-1,
tx_octets=int(counters.get("txoctets", -1).replace(",", "")),
rx_octets=int(counters.get("rxoctets", -1).replace(",", "")),
tx_unicast_packets=int(
counters.get("txucastpkts", -1).replace(",", "")
),
rx_unicast_packets=int(
counters.get("rxucastpkts", -1).replace(",", "")
),
tx_multicast_packets=int(
counters.get("txmcastpkts", -1).replace(",", "")
),
rx_multicast_packets=int(
counters.get("rxmcastpkts", -1).replace(",", "")
),
tx_broadcast_packets=int(
counters.get("txbcastpkts", -1).replace(",", "")
),
rx_broadcast_packets=int(
counters.get("rxbcastpkts", -1).replace(",", "")
| |
<gh_stars>1-10
import concurrent.futures
from copy import deepcopy, copy
from functools import partial
import json
import math
import os
from os.path import join
from time import time, sleep
from pathos.multiprocessing import ProcessPool, ThreadPool
from threading import Lock
from cloudvolume import Storage
from cloudvolume.lib import Vec
import numpy as np
import scipy
import scipy.ndimage
from skimage import img_as_ubyte
from skimage.filters import gabor
from skimage.morphology import rectangle, dilation, closing, opening
from taskqueue import TaskQueue, LocalTaskQueue
import torch
from torch.nn.functional import interpolate, max_pool2d, conv2d
import torch.nn as nn
from normalizer import Normalizer
from scipy.special import binom
from temporal_regularization import create_field_bump
from training.loss import lap
from utilities.helpers import save_chunk, crop, upsample, grid_sample, \
np_downsample, invert, compose_fields, upsample_field, \
is_identity, cpc, vector_vote, get_affine_field, is_blank, \
identity_grid
from boundingbox import BoundingBox, deserialize_bbox
from pathos.multiprocessing import ProcessPool, ThreadPool
from threading import Lock
from pathlib import Path
from utilities.archive import ModelArchive
import torch.nn as nn
#from taskqueue import TaskQueue
import tasks
import tenacity
import boto3
from fcorr import get_fft_power2, get_hp_fcorr
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_full_jitter(0.5, 60.0),
)
class Aligner:
def __init__(self, threads=1, queue_name=None, task_batch_size=1,
device='cuda', dry_run=False, **kwargs):
print('Creating Aligner object')
self.distributed = (queue_name != None)
self.queue_name = queue_name
self.task_queue = None
self.sqs = None
self.queue_url = None
if queue_name:
self.task_queue = TaskQueue(queue_name=queue_name, n_threads=0)
# self.chunk_size = (1024, 1024)
self.chunk_size = (4096, 4096)
self.device = torch.device(device)
self.model_archives = {}
# self.pool = None #ThreadPool(threads)
self.threads = threads
self.task_batch_size = task_batch_size
self.dry_run = dry_run
self.eps = 1e-6
self.gpu_lock = kwargs.get('gpu_lock', None) # multiprocessing.Semaphore
##########################
# Chunking & BoundingBox #
##########################
def break_into_chunks(self, bbox, chunk_size, offset, mip, max_mip=12):
"""Break bbox into list of chunks with chunk_size, given offset for all data
Args:
bbox: BoundingBox for region to be broken into chunks
chunk_size: tuple for dimensions of chunk that bbox will be broken into;
will be set to min(chunk_size, self.chunk_size)
offset: tuple for x,y origin for the entire dataset, from which chunks
will be aligned
mip: int for MIP level at which bbox is defined
max_mip: int for the maximum MIP level at which the bbox is valid
"""
if chunk_size[0] > self.chunk_size[0] or chunk_size[1] > self.chunk_size[1]:
chunk_size = self.chunk_size
raw_x_range = bbox.x_range(mip=mip)
raw_y_range = bbox.y_range(mip=mip)
x_chunk = chunk_size[0]
y_chunk = chunk_size[1]
x_offset = offset[0]
y_offset = offset[1]
x_remainder = ((raw_x_range[0] - x_offset) % x_chunk)
y_remainder = ((raw_y_range[0] - y_offset) % y_chunk)
calign_x_range = [raw_x_range[0] - x_remainder, raw_x_range[1]]
calign_y_range = [raw_y_range[0] - y_remainder, raw_y_range[1]]
chunks = []
for xs in range(calign_x_range[0], calign_x_range[1], chunk_size[0]):
for ys in range(calign_y_range[0], calign_y_range[1], chunk_size[1]):
chunks.append(BoundingBox(xs, xs + chunk_size[0],
ys, ys + chunk_size[1],
mip=mip, max_mip=max_mip))
return chunks
def adjust_bbox(self, bbox, dis):
padded_bbox = deepcopy(bbox)
x_range = padded_bbox.x_range(mip=0)
y_range = padded_bbox.y_range(mip=0)
new_bbox = BoundingBox(x_range[0] + dis[0], x_range[1] + dis[0],
y_range[0] + dis[1], y_range[1] + dis[1],
mip=0)
return new_bbox
##############
# IO methods #
##############
def get_model_archive(self, model_path):
"""Load a model stored in the repo with its relative path
TODO: evict old models from self.models
Args:
model_path: str for relative path to model directory
Returns:
the ModelArchive at that model_path
"""
if model_path in self.model_archives:
print('Loading model {0} from cache'.format(model_path), flush=True)
return self.model_archives[model_path]
else:
print('Adding model {0} to the cache'.format(model_path), flush=True)
path = Path(model_path)
model_name = path.stem
archive = ModelArchive(model_name)
self.model_archives[model_path] = archive
return archive
#######################
# Image IO + handlers #
#######################
def get_mask(self, cv, z, bbox, src_mip, dst_mip, valid_val, to_tensor=True):
start = time()
data = self.get_data(cv, z, bbox, src_mip=src_mip, dst_mip=dst_mip,
to_float=False, to_tensor=to_tensor, normalizer=None)
mask = data == valid_val
end = time()
diff = end - start
print('get_mask: {:.3f}'.format(diff), flush=True)
return mask
def get_image(self, cv, z, bbox, mip, to_tensor=True, normalizer=None,
dst_mip=None):
print('get_image for {0}'.format(bbox.stringify(z)), flush=True)
start = time()
if dst_mip == None:
d_mip = mip
else:
d_mip = dst_mip
image = self.get_data(cv, z, bbox, src_mip=mip, dst_mip=d_mip, to_float=True,
to_tensor=to_tensor, normalizer=normalizer)
end = time()
diff = end - start
print('get_image: {:.3f}'.format(diff), flush=True)
return image
def get_masked_image(self, image_cv, z, bbox, image_mip, mask_cv, mask_mip, mask_val,
to_tensor=True, normalizer=None):
"""Get image with mask applied
"""
start = time()
image = self.get_image(image_cv, z, bbox, image_mip,
to_tensor=True, normalizer=normalizer)
if mask_cv is not None:
mask = self.get_mask(mask_cv, z, bbox,
src_mip=mask_mip,
dst_mip=image_mip, valid_val=mask_val)
image = image.masked_fill_(mask, 0)
if not to_tensor:
image = image.cpu().numpy()
end = time()
diff = end - start
print('get_masked_image: {:.3f}'.format(diff), flush=True)
return image
def get_composite_image(self, image_cv, z_list, bbox, image_mip,
mask_cv, mask_mip, mask_val,
to_tensor=True, normalizer=None):
"""Collapse a stack of 2D image into a single 2D image, by consecutively
replacing black pixels (0) in the image of the first z_list entry with
non-black pixels from of the consecutive z_list entries images.
Args:
image_cv: MiplessCloudVolume where images are stored
z_list: list of image indices processed in the given order
bbox: BoundingBox defining data range
image_mip: int MIP level of the image data to process
mask_cv: MiplessCloudVolume where masks are stored, or None if no mask
should be used
mask_mip: int MIP level of the mask, ignored if ``mask_cv`` is None
mask_val: The mask value that specifies regions to be blackened, ignored
if ``mask_cv`` is None.
to_tensor: output will be torch.tensor
#TODO normalizer: callable function to adjust the contrast of each image
"""
# Retrieve image stack
assert len(z_list) > 0
combined = self.get_masked_image(image_cv, z_list[0], bbox, image_mip,
mask_cv, mask_mip, mask_val,
to_tensor=to_tensor, normalizer=normalizer)
for z in z_list[1:]:
tmp = self.get_masked_image(image_cv, z, bbox, image_mip,
mask_cv, mask_mip, mask_val,
to_tensor=to_tensor, normalizer=normalizer)
black_mask = combined == 0
combined[black_mask] = tmp[black_mask]
return combined
def get_data(self, cv, z, bbox, src_mip, dst_mip, to_float=True,
to_tensor=True, normalizer=None):
"""Retrieve CloudVolume data. Returns 4D ndarray or tensor, BxCxWxH
Args:
cv_key: string to lookup CloudVolume
bbox: BoundingBox defining data range
src_mip: mip of the CloudVolume data
dst_mip: mip of the output mask (dictates whether to up/downsample)
to_float: output should be float32
to_tensor: output will be torch.tensor
normalizer: callable function to adjust the contrast of the image
Returns:
image from CloudVolume in region bbox at dst_mip, with contrast adjusted,
if normalizer is specified, and as a uint8 or float32 torch tensor or numpy,
as specified
"""
x_range = bbox.x_range(mip=src_mip)
y_range = bbox.y_range(mip=src_mip)
data = cv[src_mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z]
data = np.transpose(data, (2,3,0,1))
if to_float:
data = np.divide(data, float(255.0), dtype=np.float32)
if (normalizer is not None) and (not is_blank(data)):
print('Normalizing image')
start = time()
data = torch.from_numpy(data)
data = data.to(device=self.device)
data = normalizer(data).reshape(data.shape)
end = time()
diff = end - start
print('normalizer: {:.3f}'.format(diff), flush=True)
# convert to tensor if requested, or if up/downsampling required
if to_tensor | (src_mip != dst_mip):
if isinstance(data, np.ndarray):
data = torch.from_numpy(data)
if self.device.type == 'cuda':
data = data.to(device=self.device)
if src_mip != dst_mip:
# k = 2**(src_mip - dst_mip)
size = (bbox.y_size(dst_mip), bbox.x_size(dst_mip))
if not isinstance(data, torch.cuda.ByteTensor):
data = interpolate(data, size=size, mode='bilinear')
else:
data = data.type('torch.cuda.DoubleTensor')
data = interpolate(data, size=size, mode='nearest')
data = data.type('torch.cuda.ByteTensor')
else:
data = data.type(torch.float32)
if src_mip > dst_mip:
size = (bbox.y_size(dst_mip), bbox.x_size(dst_mip))
data = interpolate(data, size=size, mode='nearest')
data = data.type(torch.ByteTensor)
elif src_mip < dst_mip:
ratio = 2**(dst_mip-src_mip)
data = max_pool2d(data, kernel_size=ratio)
data = data.type(torch.ByteTensor)
if not to_tensor:
data = data.cpu().numpy()
return data
def get_data_range(self, cv, z_range, bbox, src_mip, dst_mip, to_tensor=True):
"""Retrieve CloudVolume data. Returns 4D tensor, BxCxWxH
Args:
cv_key: string to lookup CloudVolume
bbox: BoundingBox defining data range
src_mip: mip of the CloudVolume data
dst_mip: mip of the output mask (dictates whether to up/downsample)
to_tensor: output will be torch.tensor
#TODO normalizer: callable function to adjust the contrast of the image
"""
x_range = bbox.x_range(mip=src_mip)
y_range = bbox.y_range(mip=src_mip)
data = cv[src_mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z_range]
data = np.transpose(data, (2,3,0,1))
if isinstance(data, np.ndarray):
data = torch.from_numpy(data)
data = data.to(device=self.device)
if src_mip != dst_mip:
# k = 2**(src_mip - dst_mip)
size = (bbox.y_size(dst_mip), bbox.x_size(dst_mip))
if not isinstance(data, torch.cuda.ByteTensor): #TODO: handle device
data = interpolate(data, size=size, mode='bilinear')
else:
data = data.type('torch.cuda.DoubleTensor')
data = interpolate(data, size=size, mode='nearest')
data = data.type('torch.cuda.ByteTensor')
if not to_tensor:
data = data.cpu().numpy()
return data
def save_image(self, float_patch, cv, z, bbox, mip, to_uint8=True):
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
patch = np.transpose(float_patch, (2,3,0,1))
#print("----------------z is", z, "save image patch at mip", mip, "range", x_range, y_range, "range at mip0", bbox.x_range(mip=0), bbox.y_range(mip=0))
if to_uint8:
patch = (np.multiply(patch, 255)).astype(np.uint8)
cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z] = | |
{1:7 , 2:6, 3:5, 4:4, 5:3, 6:2, 7:1 }
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-28.
cols = ['BPS_1','BPS_2','BPS_3',
'BPS_4','BPS_5','BPS_6',
'BPS_7','BPS_8','BPS_9',
'BPS_10','BPS_11','BPS_12',
'BPS_13','BPS_14','BPS_15',
'BPS_16','BPS_17','BPS_18',
'BPS_19','BPS_20','BPS_21',
'BPS_22','BPS_23','BPS_24',
'BPS_25','BPS_26','BPS_27',
'BPS_28']
df['BPS_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['BPS_sum']
df[cols_export].to_csv('%s/BP.csv' % out_dir, decimal='.', index=False)
##############################################################################
################# Derryberry Attention Control Scale #########################
##############################################################################
def run_ACS(df, out_dir):
#items to be recoded
items_recoded = ['ACS_1',
'ACS_2',
'ACS_3',
'ACS_6',
'ACS_7',
'ACS_8',
'ACS_11',
'ACS_12',
'ACS_15',
'ACS_16',
'ACS_20']
#recode items
recoder = {1:4 , 2:3, 3:2, 4:1}
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum of Item 1-20.
cols = ['ACS_1','ACS_2','ACS_3',
'ACS_4','ACS_5','ACS_6',
'ACS_7','ACS_8','ACS_9',
'ACS_10','ACS_11','ACS_12',
'ACS_13','ACS_14','ACS_15',
'ACS_16','ACS_17','ACS_18',
'ACS_19','ACS_20']
df['ACS_sum'] = df[cols].sum(axis=1)
cols_export = ['ids'] + ['ACS_sum']
df[cols_export].to_csv('%s/ACS.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## NEO-PI-R ######################################
##############################################################################
def run_NEOPIR(df, out_dir):
#recode reversed items
items_recoded = ['NEO_61','NEO_1','NEO_121','NEO_181','NEO_36','NEO_96','NEO_156','NEO_11',
'NEO_71','NEO_106','NEO_166','NEO_21','NEO_81','NEO_231','NEO_141','NEO_56',
'NEO_116','NEO_176','NEO_206','NEO_236','NEO_32','NEO_92','NEO_7','NEO_67',
'NEO_127','NEO_187','NEO_42','NEO_102','NEO_162','NEO_222','NEO_17','NEO_77','NEO_137',
'NEO_52','NEO_112','NEO_27', 'NEO_87','NEO_147','NEO_207','NEO_33','NEO_93','NEO_153',
'NEO_183', 'NEO_213', 'NEO_8','NEO_68','NEO_128','NEO_43','NEO_103','NEO_163','NEO_18',
'NEO_78','NEO_138','NEO_198','NEO_228','NEO_53','NEO_113','NEO_173', 'NEO_28',
'NEO_88', 'NEO_148', 'NEO_208','NEO_238' ,'NEO_4' ,'NEO_64','NEO_124','NEO_39',
'NEO_99','NEO_159','NEO_189','NEO_219', 'NEO_14','NEO_74','NEO_134','NEO_49',
'NEO_109','NEO_169','NEO_199','NEO_229','NEO_24','NEO_84','NEO_144','NEO_234',
'NEO_59','NEO_119','NEO_35','NEO_95','NEO_155','NEO_10','NEO_70','NEO_130',
'NEO_190','NEO_220','NEO_45','NEO_105','NEO_20', 'NEO_80','NEO_140','NEO_55',
'NEO_115','NEO_175','NEO_205','NEO_30','NEO_90','NEO_150']
recoder = {0:4, 1:3, 2:2, 3:1, 4:0}
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
# calculate subscales as means for all 30 facets
#Neuroticism
df['NEO_N'] = df[['NEO_1','NEO_31', 'NEO_61','NEO_91','NEO_121', 'NEO_151', 'NEO_181','NEO_211','NEO_6',
'NEO_36','NEO_66','NEO_96','NEO_126','NEO_156','NEO_186','NEO_216','NEO_11','NEO_41',
'NEO_71','NEO_101','NEO_131','NEO_161','NEO_191','NEO_221','NEO_16','NEO_76','NEO_106',
'NEO_136','NEO_166','NEO_196','NEO_226','NEO_21','NEO_51','NEO_81','NEO_111','NEO_141',
'NEO_171','NEO_201','NEO_231','NEO_26','NEO_56','NEO_86','NEO_116','NEO_146','NEO_176',
'NEO_206','NEO_236']].sum(axis=1) # false item 46 excluded
#N1 anxiety
df['NEO_N1_anx'] = df[['NEO_1','NEO_31','NEO_61','NEO_91',
'NEO_121','NEO_151','NEO_181','NEO_211']].sum(axis=1)
#N2 angry hostility
df['NEO_N2_host'] = df[['NEO_6','NEO_36','NEO_66','NEO_96',
'NEO_126','NEO_156','NEO_186','NEO_216']].sum(axis=1)
#N3 Depression
df['NEO_N3_depr'] = df[['NEO_11','NEO_41','NEO_71','NEO_101',
'NEO_131','NEO_161','NEO_191','NEO_221']].sum(axis=1)
#N4 Self Consciousness
df['NEO_N4_selfcon'] = df[['NEO_16','NEO_76','NEO_106','NEO_136',
'NEO_166','NEO_196','NEO_226']].sum(axis=1) # false item 46 excluded
#N5 Impulsiveness
df['NEO_N5_imp'] = df[['NEO_21','NEO_51','NEO_81','NEO_111',
'NEO_141','NEO_171','NEO_201','NEO_231']].sum(axis=1)
#N6 Vulnerability
df['NEO_N6_vuln'] = df[['NEO_26','NEO_56','NEO_86','NEO_116',
'NEO_146','NEO_176','NEO_206','NEO_236']].sum(axis=1)
#Extraversion
df['NEO_E'] = df[['NEO_2','NEO_32','NEO_62','NEO_92','NEO_122','NEO_152','NEO_182','NEO_212',
'NEO_7','NEO_37','NEO_67','NEO_97','NEO_127','NEO_157','NEO_187','NEO_217',
'NEO_12','NEO_42','NEO_72','NEO_102','NEO_132','NEO_162','NEO_192','NEO_222',
'NEO_17','NEO_47','NEO_77','NEO_107','NEO_137','NEO_167','NEO_197','NEO_227',
'NEO_22','NEO_52','NEO_82','NEO_112','NEO_142','NEO_172','NEO_202','NEO_232',
'NEO_27','NEO_57','NEO_87','NEO_117','NEO_147','NEO_177','NEO_207','NEO_237']].sum(axis=1)
#E1 warmth
df['NEO_E1_warm'] = df[['NEO_2','NEO_32','NEO_62',
'NEO_92','NEO_122','NEO_152','NEO_182','NEO_212']].sum(axis=1)
#E2 Gregariousness
df['NEO_E2_greg'] = df[['NEO_7','NEO_37','NEO_67',
'NEO_97','NEO_127','NEO_157','NEO_187','NEO_217']].sum(axis=1)
#N3 Assertiveness
df['NEO_E3_ass'] = df[['NEO_12','NEO_42','NEO_72',
'NEO_102','NEO_132','NEO_162','NEO_192','NEO_222']].sum(axis=1)
#N4 Activity
df['NEO_E4_act'] = df[['NEO_17','NEO_47','NEO_77',
'NEO_107','NEO_137','NEO_167','NEO_197','NEO_227']].sum(axis=1)
#N5 Excitement Seeking
df['NEO_E5_excseek'] = df[['NEO_22','NEO_52','NEO_82',
'NEO_112','NEO_142','NEO_172','NEO_202','NEO_232']].sum(axis=1)
#N6 Positive Emotions
df['NEO_E6_PosEmo'] = df[['NEO_27','NEO_57','NEO_87',
'NEO_117','NEO_147','NEO_177','NEO_207','NEO_237']].sum(axis=1)
#Openness
#item 83 missing
df['NEO_O'] = df[['NEO_3','NEO_33','NEO_63','NEO_93','NEO_123','NEO_153','NEO_183','NEO_213',
'NEO_8','NEO_38','NEO_68','NEO_98','NEO_128','NEO_158','NEO_188','NEO_218',
'NEO_13','NEO_43','NEO_73','NEO_103','NEO_133','NEO_163','NEO_193','NEO_223',
'NEO_18','NEO_48','NEO_78','NEO_108','NEO_138','NEO_168','NEO_198','NEO_228',
'NEO_23','NEO_53','NEO_113','NEO_143','NEO_173','NEO_203','NEO_233',
'NEO_28','NEO_58','NEO_88','NEO_118','NEO_148','NEO_178','NEO_208','NEO_238']].sum(axis=1)
#O1 fantasy
df['NEO_O1_fan'] = df[['NEO_3','NEO_33','NEO_63',
'NEO_93','NEO_123','NEO_153','NEO_183','NEO_213']].sum(axis=1)
#O2 aesthetics
df['NEO_O2_aest'] = df[['NEO_8','NEO_38','NEO_68',
'NEO_98','NEO_128','NEO_158','NEO_188','NEO_218']].sum(axis=1)
#O3 feelings
df['NEO_O3_feel'] = df[['NEO_13','NEO_43','NEO_73',
'NEO_103','NEO_133','NEO_163','NEO_193','NEO_223']].sum(axis=1)
#04 actions
df['NEO_O4_act'] = df[['NEO_18','NEO_48','NEO_78',
'NEO_108','NEO_138','NEO_168','NEO_198','NEO_228']].sum(axis=1)
#05 ideas
#item 83 missing
df['NEO_O5_idea'] = df[['NEO_23','NEO_53','NEO_113',
'NEO_143','NEO_173','NEO_203','NEO_233']].sum(axis=1)
#06 values
df['NEO_O6_value'] = df[['NEO_28','NEO_58','NEO_88',
'NEO_118','NEO_148','NEO_178','NEO_208','NEO_238']].sum(axis=1)
#Agreeableness
df['NEO_A'] = df[['NEO_4','NEO_34','NEO_64','NEO_94','NEO_124','NEO_154','NEO_184','NEO_214',
'NEO_9','NEO_39','NEO_69','NEO_99','NEO_129','NEO_159','NEO_189','NEO_219',
'NEO_14','NEO_44','NEO_74','NEO_104','NEO_134','NEO_164','NEO_194','NEO_224',
'NEO_19','NEO_49','NEO_79','NEO_109','NEO_139','NEO_169','NEO_199','NEO_229',
'NEO_24','NEO_54','NEO_84','NEO_114','NEO_144','NEO_174','NEO_204','NEO_234',
'NEO_29','NEO_59','NEO_89','NEO_119','NEO_149','NEO_179','NEO_209','NEO_239']].sum(axis=1)
#A1 trust
df['NEO_A1_trust'] = df[['NEO_4','NEO_34','NEO_64',
'NEO_94','NEO_124','NEO_154','NEO_184','NEO_214']].sum(axis=1)
#A2 straightforwardedness
df['NEO_A2_sf'] = df[['NEO_9','NEO_39','NEO_69',
'NEO_99','NEO_129','NEO_159','NEO_189','NEO_219']].sum(axis=1)
#A3 altruism
df['NEO_A3_altr'] = df[['NEO_14','NEO_44','NEO_74',
'NEO_104','NEO_134','NEO_164','NEO_194','NEO_224']].sum(axis=1)
#A4 compliance
df['NEO_A4_compl'] = df[['NEO_19','NEO_49','NEO_79',
'NEO_109','NEO_139','NEO_169','NEO_199','NEO_229']].sum(axis=1)
#A5 modesty
df['NEO_A5_modes'] = df[['NEO_24','NEO_54','NEO_84',
'NEO_114','NEO_144','NEO_174','NEO_204','NEO_234']].sum(axis=1)
#A6 tender_mindedness
df['NEO_A6_tenmind'] = df[['NEO_29','NEO_59','NEO_89',
'NEO_119','NEO_149','NEO_179','NEO_209','NEO_239']].sum(axis=1)
#Conscientiousness
df['NEO_C'] = df[['NEO_5','NEO_35','NEO_65','NEO_95','NEO_125','NEO_155','NEO_185','NEO_215',
'NEO_10','NEO_40','NEO_70','NEO_100','NEO_130','NEO_160','NEO_190','NEO_220',
'NEO_15','NEO_45','NEO_75','NEO_105','NEO_135','NEO_165','NEO_195','NEO_225',
'NEO_20','NEO_50','NEO_80','NEO_110','NEO_140','NEO_170','NEO_200','NEO_230',
'NEO_25','NEO_55','NEO_85','NEO_115','NEO_145','NEO_175','NEO_205','NEO_235',
'NEO_30','NEO_60','NEO_90','NEO_120','NEO_150','NEO_180','NEO_210','NEO_240']].sum(axis=1)
#C1 compentence
df['NEO_C1_comp'] = df[['NEO_5','NEO_35','NEO_65',
'NEO_95','NEO_125','NEO_155','NEO_185','NEO_215']].sum(axis=1)
#C2 order
df['NEO_C2_order'] = df[['NEO_10','NEO_40','NEO_70',
'NEO_100','NEO_130','NEO_160','NEO_190','NEO_220']].sum(axis=1)
#C3 dutifulness
df['NEO_C3_dutif'] = df[['NEO_15','NEO_45','NEO_75',
'NEO_105','NEO_135','NEO_165','NEO_195','NEO_225']].sum(axis=1)
#C4 achievement striving
df['NEO_C4_achstr'] = df[['NEO_20','NEO_50','NEO_80',
'NEO_110','NEO_140','NEO_170','NEO_200','NEO_230']].sum(axis=1)
#C5 self discipline
df['NEO_C5_selfdis'] = df[['NEO_25','NEO_55','NEO_85',
'NEO_115','NEO_145','NEO_175','NEO_205','NEO_235']].sum(axis=1)
#C6 deliberation
df['NEO_C6_deli'] = df[['NEO_30','NEO_60','NEO_90',
'NEO_120','NEO_150','NEO_180','NEO_210','NEO_240']].sum(axis=1)
summary_cols = ['NEO_N', 'NEO_N1_anx', 'NEO_N2_host',
'NEO_N3_depr', 'NEO_N4_selfcon', 'NEO_N5_imp', 'NEO_N6_vuln',
'NEO_E', 'NEO_E1_warm', 'NEO_E2_greg',
'NEO_E3_ass', 'NEO_E4_act', 'NEO_E5_excseek', 'NEO_E6_PosEmo',
'NEO_O', 'NEO_O1_fan', 'NEO_O2_aest',
'NEO_O3_feel', 'NEO_O4_act', 'NEO_O5_idea', 'NEO_O6_value',
'NEO_A', 'NEO_A1_trust', 'NEO_A2_sf',
'NEO_A3_altr', 'NEO_A4_compl', 'NEO_A5_modes', 'NEO_A6_tenmind',
'NEO_C', 'NEO_C1_comp', 'NEO_C2_order',
'NEO_C3_dutif', 'NEO_C4_achstr', 'NEO_C5_selfdis', 'NEO_C6_deli']
df[['ids'] + summary_cols].to_csv('%s/NEO-PI-R.csv' % out_dir, decimal='.', index=False)
##############################################################################
############# PSSI - Persönlichkeitsstil- und Störungsinventar################
##############################################################################
def run_PSSI(df, out_dir):
cols = ['PSSI_1','PSSI_2','PSSI_3','PSSI_4','PSSI_5','PSSI_6','PSSI_7','PSSI_8','PSSI_9',
'PSSI_10','PSSI_11','PSSI_12','PSSI_13','PSSI_14','PSSI_15','PSSI_16','PSSI_17',
'PSSI_18','PSSI_19','PSSI_20','PSSI_21','PSSI_22','PSSI_23','PSSI_24','PSSI_25',
'PSSI_26','PSSI_27','PSSI_28','PSSI_29','PSSI_30','PSSI_31','PSSI_32','PSSI_33',
'PSSI_34','PSSI_35','PSSI_36','PSSI_37','PSSI_38','PSSI_39','PSSI_40','PSSI_41',
'PSSI_42','PSSI_43','PSSI_44','PSSI_45','PSSI_46','PSSI_47','PSSI_48','PSSI_49',
'PSSI_50','PSSI_51','PSSI_52','PSSI_53','PSSI_54','PSSI_55','PSSI_56','PSSI_57',
'PSSI_58','PSSI_59','PSSI_60','PSSI_61','PSSI_62','PSSI_63','PSSI_64','PSSI_65',
'PSSI_66','PSSI_67','PSSI_68','PSSI_69','PSSI_70','PSSI_71','PSSI_72','PSSI_73',
'PSSI_74','PSSI_75','PSSI_76','PSSI_77','PSSI_78','PSSI_79','PSSI_80','PSSI_81',
'PSSI_82','PSSI_83','PSSI_84','PSSI_85','PSSI_86','PSSI_87','PSSI_88','PSSI_89',
'PSSI_90','PSSI_91','PSSI_92','PSSI_93','PSSI_94','PSSI_95','PSSI_96','PSSI_97',
'PSSI_98','PSSI_99','PSSI_100','PSSI_101','PSSI_102','PSSI_103','PSSI_104','PSSI_105',
'PSSI_106','PSSI_107','PSSI_108','PSSI_109','PSSI_110','PSSI_111','PSSI_112','PSSI_113',
'PSSI_114','PSSI_115','PSSI_116','PSSI_117','PSSI_118','PSSI_119','PSSI_120','PSSI_121',
'PSSI_122','PSSI_123','PSSI_124','PSSI_125','PSSI_126','PSSI_127','PSSI_128','PSSI_129',
'PSSI_130','PSSI_131','PSSI_132','PSSI_133','PSSI_134','PSSI_135','PSSI_136','PSSI_137',
'PSSI_138','PSSI_139','PSSI_140']
#recode all items to original format (limesurvey: 1234, original = 0123)
recoder = {1:0, 2:1, 3:2, 4:3 }
for i in cols:
df[i] = df[i].map(recoder).astype('float64')
#recode reversed items
items_recoded = ['PSSI_15',
'PSSI_43',
'PSSI_71',
'PSSI_99',
'PSSI_44',
'PSSI_72',
'PSSI_86',
'PSSI_104',
'PSSI_49',
'PSSI_91',
'PSSI_105',
'PSSI_39',
'PSSI_67',
'PSSI_109',
'PSSI_137']
recoder = {0:3, 1:2, 2:1, 3:0}
for i in items_recoded:
df[i] = df[i].map(recoder).astype('float64')
# calculate subscales as sumscores
#PN = eigenwillig_paranoid
df['PSSI_PN'] = df[['PSSI_1',
'PSSI_15',
'PSSI_29',
'PSSI_43',
'PSSI_57',
'PSSI_71',
'PSSI_85',
'PSSI_99',
'PSSI_113',
'PSSI_127']].sum(axis=1)
#SZ = zurückhaltend-schizoid
df['PSSI_SZ'] = df[['PSSI_2',
'PSSI_16',
'PSSI_30',
'PSSI_44',
'PSSI_58',
'PSSI_72',
'PSSI_86',
'PSSI_100',
'PSSI_114',
'PSSI_128']].sum(axis=1)
#ST = ahnungsvoll-schizotypisch
df['PSSI_ST'] = df[['PSSI_3',
'PSSI_17',
'PSSI_31',
'PSSI_45',
'PSSI_59',
'PSSI_73',
'PSSI_87',
'PSSI_101',
'PSSI_115',
'PSSI_129']].sum(axis=1)
#BL = spontan-borderline
df['PSSI_BL'] = df[['PSSI_4',
'PSSI_18',
'PSSI_32',
'PSSI_46',
'PSSI_60',
'PSSI_74',
'PSSI_88',
'PSSI_102',
'PSSI_116',
'PSSI_130']].sum(axis=1)
#HI = liebenswürdig-hisrtionisch
df['PSSI_HI'] = df[['PSSI_5',
'PSSI_19',
'PSSI_33',
'PSSI_47',
'PSSI_61',
'PSSI_75',
'PSSI_89',
'PSSI_103',
'PSSI_117',
'PSSI_131']].sum(axis=1)
# NA = ehrgeizig_narzisstisch
df['PSSI_NA'] = df[['PSSI_6',
'PSSI_20',
'PSSI_34',
'PSSI_48',
'PSSI_62',
'PSSI_76',
'PSSI_90',
'PSSI_104',
'PSSI_118',
'PSSI_132']].sum(axis=1)
#SU = selbstkritisch-selbstunsicher
df['PSSI_SU'] = df[['PSSI_7',
'PSSI_21',
'PSSI_35',
'PSSI_49',
'PSSI_63',
'PSSI_77',
'PSSI_91',
'PSSI_105',
'PSSI_119',
'PSSI_133']].sum(axis=1)
# AB = loyal-abhängig
df['PSSI_AB'] = df[['PSSI_8',
'PSSI_22',
'PSSI_36',
'PSSI_50',
'PSSI_64',
'PSSI_78',
'PSSI_92',
'PSSI_106',
'PSSI_120',
'PSSI_134']].sum(axis=1)
# ZW = sorgfältig - zwanghaft
df['PSSI_ZW'] = df[['PSSI_9',
'PSSI_23',
'PSSI_37',
'PSSI_51',
'PSSI_65',
'PSSI_79',
'PSSI_93',
'PSSI_107',
'PSSI_121',
'PSSI_135']].sum(axis=1)
#NT = kritisch-negativistisch
df['PSSI_NT'] = df[['PSSI_10',
'PSSI_24',
'PSSI_38',
'PSSI_52',
'PSSI_66',
'PSSI_80',
'PSSI_94',
'PSSI_108',
'PSSI_122',
'PSSI_136']].sum(axis=1)
# DP = still depressiv
df['PSSI_DP'] = df[['PSSI_11',
'PSSI_25',
'PSSI_39',
'PSSI_53',
'PSSI_67',
'PSSI_81',
'PSSI_95',
'PSSI_109',
'PSSI_123',
'PSSI_137']].sum(axis=1)
#SL = hilfsbereit-selbstlos
df['PSSI_SL'] = df[['PSSI_12',
'PSSI_26',
'PSSI_40',
'PSSI_54',
'PSSI_68',
'PSSI_82',
'PSSI_96',
'PSSI_110',
'PSSI_124',
'PSSI_138']].sum(axis=1)
#RH = optimistisch-rhapsodisch
df['PSSI_RH'] = df[['PSSI_13',
'PSSI_27',
'PSSI_41',
'PSSI_55',
'PSSI_69',
'PSSI_83',
'PSSI_97',
'PSSI_111',
'PSSI_125',
'PSSI_139']].sum(axis=1)
#AS = selbstbehauptend-antisozial
df['PSSI_AS'] = df[['PSSI_14',
'PSSI_28',
'PSSI_42',
'PSSI_56',
'PSSI_70',
'PSSI_84',
'PSSI_98',
'PSSI_112',
'PSSI_126',
'PSSI_140']].sum(axis=1)
cols_export = ['ids'] + ["PSSI_PN", 'PSSI_SZ', 'PSSI_ST', 'PSSI_BL',
'PSSI_HI', 'PSSI_NA', 'PSSI_SU', 'PSSI_AB',
'PSSI_ZW', 'PSSI_NT', 'PSSI_DP', 'PSSI_SL',
'PSSI_RH', 'PSSI_AS']
df[cols_export].to_csv('%s/PSSI.csv' % out_dir, decimal='.', index=False)
##############################################################################
################################## MMI #######################################
##############################################################################
def run_MMI(df, out_dir):
#items to be recoded
cols= ['MMI_1_4_A' ,'MMI_1_4_B' ,'MMI_1_4_C' ,'MMI_1_4_D' ,'MMI_1_4_E' ,'MMI_1_4_F' ,
'MMI_1_4_G' ,'MMI_1_4_H' ,'MMI_1_4_I' ,'MMI_1_4_J' ,'MMI_1_4_K' ,'MMI_1_4_L' ,
'MMI_2_4_A' ,'MMI_2_4_B' ,'MMI_2_4_C' ,'MMI_2_4_D' ,'MMI_2_4_E' ,'MMI_2_4_F' ,
'MMI_2_4_G' ,'MMI_2_4_H' ,'MMI_2_4_I' ,'MMI_2_4_J' ,'MMI_2_4_K' ,'MMI_2_4_L' ,
'MMI_3_4_A' ,'MMI_3_4_B' ,'MMI_3_4_C' ,'MMI_3_4_D' ,'MMI_3_4_E' , 'MMI_3_4_F' ,
'MMI_3_4_G' ,'MMI_3_4_H' ,'MMI_3_4_I' ,'MMI_3_4_J' ,'MMI_3_4_K' ,'MMI_3_4_L' ,
'MMI_4_4_A' ,'MMI_4_4_B' ,'MMI_4_4_C' ,'MMI_4_4_D' ,'MMI_4_4_E' ,'MMI_4_4_F' ,
'MMI_4_4_G' ,'MMI_4_4_H' ,'MMI_4_4_I' ,'MMI_4_4_J' ,'MMI_4_4_K' ,'MMI_4_4_L' ,
'MMI_5_4_A' ,'MMI_5_4_B' ,'MMI_5_4_C' ,'MMI_5_4_D' ,'MMI_5_4_E' ,'MMI_5_4_F' ,
'MMI_5_4_G' ,'MMI_5_4_H' ,'MMI_5_4_I' ,'MMI_5_4_J' ,'MMI_5_4_K' ,'MMI_5_4_L' ,
'MMI_6_4_A' ,'MMI_6_4_B' ,'MMI_6_4_C' ,'MMI_6_4_D' ,'MMI_6_4_E' ,'MMI_6_4_F' ,
'MMI_6_4_G' ,'MMI_6_4_H' ,'MMI_6_4_I' ,'MMI_6_4_J' ,'MMI_6_4_K' ,'MMI_6_4_L' ,
'MMI_7_4_A' ,'MMI_7_4_B' ,'MMI_7_4_C' ,'MMI_7_4_D' ,'MMI_7_4_E' ,'MMI_7_4_F' ,
'MMI_7_4_G' ,'MMI_7_4_H' ,'MMI_7_4_I' ,'MMI_7_4_J' ,'MMI_7_4_K' ,'MMI_7_4_L' ,
'MMI_8_4_A' ,'MMI_8_4_B' ,'MMI_8_4_C' ,'MMI_8_4_D' ,'MMI_8_4_E' ,'MMI_8_4_F' ,
'MMI_8_4_G' ,'MMI_8_4_H' ,'MMI_8_4_I' ,'MMI_8_4_J' ,'MMI_8_4_K' ,'MMI_8_4_L' ,
'MMI_9_6_A','MMI_9_6_B' ,'MMI_9_6_C' ,'MMI_9_6_D' ,'MMI_9_6_E' ,'MMI_9_6_F' ,
'MMI_9_6_G' ,'MMI_9_6_H' ,'MMI_9_6_I' ,'MMI_9_6_J' ,'MMI_9_6_K' ,'MMI_9_6_L' ,
'MMI_10_4_A' ,'MMI_10_4_B' ,'MMI_10_4_C' ,'MMI_10_4_D' ,'MMI_10_4_E' ,'MMI_10_4_F' ,
'MMI_10_4_G' ,'MMI_10_4_H' ,'MMI_10_4_I' ,'MMI_10_4_J' ,'MMI_10_4_K' ,'MMI_10_4_L' ,
'MMI_11_4_A' ,'MMI_11_4_B' ,'MMI_11_4_C' ,'MMI_11_4_D' ,'MMI_11_4_E' ,'MMI_11_4_F' ,
'MMI_11_4_G' ,'MMI_11_4_H' ,'MMI_11_4_I' ,'MMI_11_4_J' ,'MMI_11_4_K' ,'MMI_11_4_L' ,
'MMI_12_4_A' ,'MMI_12_4_B' ,'MMI_12_4_C' ,'MMI_12_4_D' ,'MMI_12_4_E' ,'MMI_12_4_F' ,
'MMI_12_4_G' ,'MMI_12_4_H']
#recode items
recoder = {5 :'NaN', 4 :1, 3:0.66, 2:0.33, 1:0}
for i in cols:
df[i] = df[i].map(recoder).astype('float64')
#Calculate total score as the sum for media types
df['MMI1'] = df[['MMI_1_4_A',
'MMI_1_4_B',
'MMI_1_4_C',
'MMI_1_4_D',
'MMI_1_4_E',
'MMI_1_4_F',
'MMI_1_4_G',
'MMI_1_4_H',
'MMI_1_4_I',
'MMI_1_4_J',
'MMI_1_4_K',
'MMI_1_4_L']].sum(axis=1).astype('float64')
df['MMI2'] = df[['MMI_2_4_A' ,
'MMI_2_4_B' ,
'MMI_2_4_C' ,
'MMI_2_4_D' ,
'MMI_2_4_E' ,
'MMI_2_4_F' ,
'MMI_2_4_G' ,
'MMI_2_4_H' ,
'MMI_2_4_I' ,
'MMI_2_4_J' ,
'MMI_2_4_K' ,
'MMI_2_4_L']].sum(axis=1).astype('float64')
df['MMI3'] = df[['MMI_3_4_A',
'MMI_3_4_B',
'MMI_3_4_C',
'MMI_3_4_D',
'MMI_3_4_E',
'MMI_3_4_F',
'MMI_3_4_G',
'MMI_3_4_H',
'MMI_3_4_I',
'MMI_3_4_J',
'MMI_3_4_K',
'MMI_3_4_L']].sum(axis=1).astype('float64')
df['MMI4'] = df[['MMI_4_4_A',
'MMI_4_4_B',
'MMI_4_4_C',
'MMI_4_4_D',
'MMI_4_4_E',
'MMI_4_4_F',
'MMI_4_4_G',
'MMI_4_4_H',
'MMI_4_4_I',
'MMI_4_4_J',
'MMI_4_4_K',
'MMI_4_4_L']].sum(axis=1).astype('float64')
df['MMI5'] = df[['MMI_5_4_A',
'MMI_5_4_B',
'MMI_5_4_C',
'MMI_5_4_D',
'MMI_5_4_E',
'MMI_5_4_F',
'MMI_5_4_G',
'MMI_5_4_H',
'MMI_5_4_I',
'MMI_5_4_J',
'MMI_5_4_K',
'MMI_5_4_L']].sum(axis=1).astype('float64')
df['MMI6'] = df[['MMI_6_4_A',
'MMI_6_4_B',
'MMI_6_4_C',
'MMI_6_4_D',
'MMI_6_4_E',
'MMI_6_4_F',
'MMI_6_4_G',
'MMI_6_4_H',
'MMI_6_4_I',
'MMI_6_4_J',
'MMI_6_4_K',
'MMI_6_4_L']].sum(axis=1).astype('float64')
df['MMI7'] = df[['MMI_7_4_A',
'MMI_7_4_B',
'MMI_7_4_C',
'MMI_7_4_D',
'MMI_7_4_E',
'MMI_7_4_F',
'MMI_7_4_G',
'MMI_7_4_H',
'MMI_7_4_I',
'MMI_7_4_J',
'MMI_7_4_K',
'MMI_7_4_L']].sum(axis=1).astype('float64')
df['MMI8'] = df[['MMI_8_4_A',
'MMI_8_4_B',
'MMI_8_4_C',
'MMI_8_4_D',
'MMI_8_4_E',
'MMI_8_4_F',
'MMI_8_4_G',
'MMI_8_4_H',
'MMI_8_4_I',
'MMI_8_4_J',
'MMI_8_4_K',
'MMI_8_4_L']].sum(axis=1).astype('float64')
df['MMI9'] = df[['MMI_9_6_A',
'MMI_9_6_B',
'MMI_9_6_C',
'MMI_9_6_D',
'MMI_9_6_E',
'MMI_9_6_F',
'MMI_9_6_G',
'MMI_9_6_H',
'MMI_9_6_I',
'MMI_9_6_J',
'MMI_9_6_K',
'MMI_9_6_L']].sum(axis=1).astype('float64')
df['MMI10'] = df[['MMI_10_4_A',
'MMI_10_4_B',
'MMI_10_4_C',
'MMI_10_4_D',
'MMI_10_4_E',
'MMI_10_4_F',
'MMI_10_4_G',
'MMI_10_4_H',
'MMI_10_4_I',
'MMI_10_4_J',
'MMI_10_4_K',
'MMI_10_4_L']].sum(axis=1).astype('float64')
df['MMI11'] = df[['MMI_11_4_A',
'MMI_11_4_B',
'MMI_11_4_C',
'MMI_11_4_D',
'MMI_11_4_E',
'MMI_11_4_F',
'MMI_11_4_G',
'MMI_11_4_H',
'MMI_11_4_I',
'MMI_11_4_J',
'MMI_11_4_K',
'MMI_11_4_L']].sum(axis=1).astype('float64')
df['MMI12'] = df[['MMI_12_4_A',
'MMI_12_4_B',
'MMI_12_4_C',
'MMI_12_4_D',
'MMI_12_4_E',
'MMI_12_4_F',
'MMI_12_4_G',
'MMI_12_4_H',
'MMI_12_4_I',
'MMI_12_4_J',
'MMI_12_4_K',
'MMI_12_4_L']].sum(axis=1).astype('float64')
df['TotalHours'] = df[['MMI_1_1','MMI_2_1', 'MMI_3_1', 'MMI_4_1', 'MMI_5_1', 'MMI_6_1', 'MMI_7_1', 'MMI_8_1',
'MMI_9_1', 'MMI_10_1', 'MMI_11_1', 'MMI_12_1']].sum(axis=1).astype('float64')
#mediatypes weighted by hours of primary medium divided by hours spent with all media
df['MMI1xhoursdividedbytotalhours'] = df['MMI1']*df['MMI_1_1'].astype('float64')/df['TotalHours']
df['MMI2xhoursdividedbytotalhours'] = df['MMI2']*df['MMI_2_1'].astype('float64')/df['TotalHours']
df['MMI3xhoursdividedbytotalhours'] = df['MMI3']*df['MMI_3_1'].astype('float64')/df['TotalHours']
df['MMI4xhoursdividedbytotalhours'] = df['MMI4']*df['MMI_4_1'].astype('float64')/df['TotalHours']
df['MMI5xhoursdividedbytotalhours'] = df['MMI5']*df['MMI_5_1'].astype('float64')/df['TotalHours']
df['MMI6xhoursdividedbytotalhours'] = df['MMI6']*df['MMI_6_1'].astype('float64')/df['TotalHours']
df['MMI7xhoursdividedbytotalhours'] = df['MMI7']*df['MMI_7_1'].astype('float64')/df['TotalHours']
df['MMI8xhoursdividedbytotalhours'] = df['MMI8']*df['MMI_8_1'].astype('float64')/df['TotalHours']
df['MMI9xhoursdividedbytotalhours'] = df['MMI9']*df['MMI_9_1'].astype('float64')/df['TotalHours']
df['MMI10xhoursdividedbytotalhours'] = df['MMI10']*df['MMI_10_1'].astype('float64')/df['TotalHours']
df['MMI11xhoursdividedbytotalhours'] = df['MMI11']*df['MMI_11_1'].astype('float64')/df['TotalHours']
df['MMI12xhoursdividedbytotalhours'] = df['MMI12']*df['MMI_12_1'].astype('float64')/df['TotalHours']
#Index by summing the weighted scales
df['MMI_score'] = df[['MMI1xhoursdividedbytotalhours',
'MMI2xhoursdividedbytotalhours',
'MMI3xhoursdividedbytotalhours',
'MMI4xhoursdividedbytotalhours',
'MMI5xhoursdividedbytotalhours',
'MMI6xhoursdividedbytotalhours',
'MMI7xhoursdividedbytotalhours',
'MMI8xhoursdividedbytotalhours',
'MMI9xhoursdividedbytotalhours',
'MMI10xhoursdividedbytotalhours',
'MMI11xhoursdividedbytotalhours',
'MMI12xhoursdividedbytotalhours']].sum(axis=1)
cols_export = ['ids'] + ['MMI_score']
df[cols_export].to_csv('%s/MMI.csv' % out_dir, decimal='.', index=False)
##############################################################################
############################## BIS/BAS #######################################
##############################################################################
def run_BISBAS(df, out_dir):
#items to be recoded
items_recoded = | |
('pad_xx', c_char * 100) )
plist.append( ('rhi_user25', c_float) )
plist.append( ('rhi_user26', c_float) )
plist.append( ('rhi_user27', c_float) )
plist.append( ('rhi_user28', c_float) )
plist.append( ('rhi_user29', c_float) )
plist.append( ('rhi_user30', c_float) )
plist.append( ('rhi_user31', c_float) )
plist.append( ('rhi_user32', c_float) )
plist.append( ('rhi_user33', c_float) )
plist.append( ('rhi_user34', c_float) )
plist.append( ('rhi_user35', c_float) )
plist.append( ('rhi_user36', c_float) )
plist.append( ('rhi_user37', c_float) )
plist.append( ('rhi_user38', c_float) )
plist.append( ('rhi_user39', c_float) )
plist.append( ('rhi_user40', c_float) )
plist.append( ('rhi_user41', c_float) )
plist.append( ('rhi_user42', c_float) )
plist.append( ('rhi_user43', c_float) )
plist.append( ('rhi_user44', c_float) )
plist.append( ('rhi_user45', c_float) )
plist.append( ('rhi_user46', c_float) )
plist.append( ('rhi_user47', c_float) )
plist.append( ('rhi_user48', c_float) )
elif version == 12:
plist.append( ('rhr_rh_rdbm_rev', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhr_rh_scan_date', c_char * 10) )
plist.append( ('rhr_rh_scan_time', c_char * 8) )
plist.append( ('rhr_rh_logo', c_char * 10) )
plist.append( ('rhr_rh_file_contents', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_data_collect_type', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhr_rh_npasses', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhr_rh_nslices', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_frame_size', c_ushort) )
plist.append( ('rhr_rh_point_size', c_short) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_raw_pass_size', c_int) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhr_rh_dab[0]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[0]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_stop_rcv', c_short) )
plist.append( ('rhr_rh_user0', c_float) )
plist.append( ('rhr_rh_user1', c_float) )
plist.append( ('rhr_rh_user2', c_float) )
plist.append( ('rhr_rh_user3', c_float) )
plist.append( ('rhr_rh_user4', c_float) )
plist.append( ('rhr_rh_user5', c_float) )
plist.append( ('rhr_rh_user6', c_float) )
plist.append( ('rhr_rh_user7', c_float) )
plist.append( ('rhr_rh_user8', c_float) )
plist.append( ('rhr_rh_user9', c_float) )
plist.append( ('rhr_rh_user10', c_float) )
plist.append( ('rhr_rh_user11', c_float) )
plist.append( ('rhr_rh_user12', c_float) )
plist.append( ('rhr_rh_user13', c_float) )
plist.append( ('rhr_rh_user14', c_float) )
plist.append( ('rhr_rh_user15', c_float) )
plist.append( ('rhr_rh_user16', c_float) )
plist.append( ('rhr_rh_user17', c_float) )
plist.append( ('rhr_rh_user18', c_float) )
plist.append( ('rhr_rh_user19', c_float) )
plist.append( ('pad_xx', c_char * 72) )
plist.append( ('rhr_spectral_width', c_float) )
plist.append( ('rhr_csi_dims', c_short) )
plist.append( ('rhr_xcsi', c_short) )
plist.append( ('rhr_ycsi', c_short) )
plist.append( ('rhr_zcsi', c_short) )
plist.append( ('rhr_roilenx', c_float) )
plist.append( ('rhr_roileny', c_float) )
plist.append( ('rhr_roilenz', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_ps_mps_freq', c_int) )
plist.append( ('pad_xx', c_char * 560) )
plist.append( ('rhr_rh_user_usage_tag', c_uint) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhr_rh_user20', c_float) )
plist.append( ('rhr_rh_user21', c_float) )
plist.append( ('rhr_rh_user22', c_float) )
plist.append( ('rhr_rh_user23', c_float) )
plist.append( ('rhr_rh_user24', c_float) )
plist.append( ('rhr_rh_user25', c_float) )
plist.append( ('rhr_rh_user26', c_float) )
plist.append( ('rhr_rh_user27', c_float) )
plist.append( ('rhr_rh_user28', c_float) )
plist.append( ('rhr_rh_user29', c_float) )
plist.append( ('rhr_rh_user30', c_float) )
plist.append( ('rhr_rh_user31', c_float) )
plist.append( ('rhr_rh_user32', c_float) )
plist.append( ('rhr_rh_user33', c_float) )
plist.append( ('rhr_rh_user34', c_float) )
plist.append( ('rhr_rh_user35', c_float) )
plist.append( ('rhr_rh_user36', c_float) )
plist.append( ('rhr_rh_user37', c_float) )
plist.append( ('rhr_rh_user38', c_float) )
plist.append( ('rhr_rh_user39', c_float) )
plist.append( ('rhr_rh_user40', c_float) )
plist.append( ('rhr_rh_user41', c_float) )
plist.append( ('rhr_rh_user42', c_float) )
plist.append( ('rhr_rh_user43', c_float) )
plist.append( ('rhr_rh_user44', c_float) )
plist.append( ('rhr_rh_user45', c_float) )
plist.append( ('rhr_rh_user46', c_float) )
plist.append( ('rhr_rh_user47', c_float) )
plist.append( ('rhr_rh_user48', c_float) )
plist.append( ('pad_xx', c_char * 352) )
plist.append( ('rhr_rdb_hdr_off_data', c_int) )
plist.append( ('pad_xx', c_char * 60088) )
plist.append( ('rhe_magstrength', c_int) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_ex_datetime', c_int) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_ex_no', c_ushort) )
plist.append( ('pad_xx', c_char * 22) )
plist.append( ('rhe_patsex', c_short) )
plist.append( ('pad_xx', c_char * 75) )
plist.append( ('rhe_reqnum', c_char * 13) )
plist.append( ('rhe_refphy', c_char * 33) )
plist.append( ('pad_xx', c_char * 105) )
plist.append( ('rhe_ex_sysid', c_char * 9) )
plist.append( ('pad_xx', c_char * 14) )
plist.append( ('rhe_hospname', c_char * 33) )
plist.append( ('rhe_patid', c_char * 13) )
plist.append( ('rhe_patname', c_char * 25) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_ex_verscre', c_char * 2) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhe_uniq_sys_id', c_char * 16) )
plist.append( ('pad_xx', c_char * 20) )
plist.append( ('rhe_study_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 64) )
plist.append( ('rhe_patnameff', c_char * 65) )
plist.append( ('rhe_patidff', c_char * 65) )
plist.append( ('rhe_reqnumff', c_char * 17) )
plist.append( ('rhe_dateofbirth', c_char * 9) )
plist.append( ('pad_xx', c_char * 422) )
plist.append( ('rhs_position', c_int) )
plist.append( ('rhs_entry', c_int) )
plist.append( ('pad_xx', c_char * 62) )
plist.append( ('rhs_se_no', c_short) )
plist.append( ('pad_xx', c_char * 74) )
plist.append( ('rhs_se_desc', c_char * 65) )
plist.append( ('pad_xx', c_char * 18) )
plist.append( ('rhs_anref', c_char * 3) )
plist.append( ('pad_xx', c_char * 27) )
plist.append( ('rhs_series_uid', c_char * 32) )
plist.append( ('rhs_landmark_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 1573) )
plist.append( ('rhi_dfov', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_scanspacing', c_float) )
plist.append( ('rhi_loc', c_float) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_nex', c_float) )
plist.append( ('pad_xx', c_char * 20) )
plist.append( ('rhi_user0', c_float) )
plist.append( ('rhi_user1', c_float) )
plist.append( ('rhi_user2', c_float) )
plist.append( ('rhi_user3', c_float) )
plist.append( ('rhi_user4', c_float) )
plist.append( ('rhi_user5', c_float) )
plist.append( ('rhi_user6', c_float) )
plist.append( ('rhi_user7', c_float) )
plist.append( ('rhi_user8', c_float) )
plist.append( ('rhi_user9', c_float) )
plist.append( ('rhi_user10', c_float) )
plist.append( ('rhi_user11', c_float) )
plist.append( ('rhi_user12', c_float) )
plist.append( ('rhi_user13', c_float) )
plist.append( ('rhi_user14', c_float) )
plist.append( ('rhi_user15', c_float) )
plist.append( ('rhi_user16', c_float) )
plist.append( ('rhi_user17', c_float) )
plist.append( ('rhi_user18', c_float) )
plist.append( ('rhi_user19', c_float) )
plist.append( ('rhi_user20', c_float) )
plist.append( ('rhi_user21', c_float) )
plist.append( ('rhi_user22', c_float) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhi_user23', c_float) )
plist.append( ('rhi_user24', c_float) )
plist.append( ('pad_xx', c_char * 60) )
plist.append( ('rhi_user25', c_float) )
plist.append( ('rhi_user26', c_float) )
plist.append( ('rhi_user27', c_float) )
plist.append( ('rhi_user28', c_float) )
plist.append( ('rhi_user29', c_float) )
plist.append( ('rhi_user30', c_float) )
plist.append( ('rhi_user31', c_float) )
plist.append( ('rhi_user32', c_float) )
plist.append( ('rhi_user33', c_float) )
plist.append( ('rhi_user34', c_float) )
plist.append( ('rhi_user35', c_float) )
plist.append( ('rhi_user36', c_float) )
plist.append( ('rhi_user37', c_float) )
plist.append( ('rhi_user38', c_float) )
plist.append( ('rhi_user39', c_float) )
plist.append( ('rhi_user40', c_float) )
plist.append( ('rhi_user41', c_float) )
plist.append( ('rhi_user42', c_float) )
plist.append( ('rhi_user43', c_float) )
plist.append( ('rhi_user44', c_float) )
plist.append( ('rhi_user45', c_float) )
plist.append( ('rhi_user46', c_float) )
plist.append( ('rhi_user47', c_float) )
plist.append( ('rhi_user48', c_float) )
plist.append( ('pad_xx', c_char * 76) )
plist.append( ('rhi_ctr_R', c_float) )
plist.append( ('rhi_ctr_A', c_float) )
plist.append( ('rhi_ctr_S', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_tlhc_R', c_float) )
plist.append( ('rhi_tlhc_A', c_float) )
plist.append( ('rhi_tlhc_S', c_float) )
plist.append( ('rhi_trhc_R', c_float) )
plist.append( ('rhi_trhc_A', c_float) )
plist.append( ('rhi_trhc_S', c_float) )
plist.append( ('rhi_brhc_R', c_float) )
plist.append( ('rhi_brhc_A', c_float) )
plist.append( ('rhi_brhc_S', c_float) )
plist.append( ('pad_xx', c_char * 36) )
plist.append( ('rhi_tr', c_int) )
plist.append( ('rhi_ti', c_int) )
plist.append( ('rhi_te', c_int) )
plist.append( ('pad_xx', c_char * 170) )
plist.append( ('rhi_numecho', c_short) )
plist.append( ('pad_xx', c_char * 36) )
plist.append( ('rhi_mr_flip', c_short) )
plist.append( ('pad_xx', c_char * 22) )
plist.append( ('rhi_ctyp', c_short) )
plist.append( ('pad_xx', c_char * 64) )
plist.append( ('rhi_freq_dir', c_short) )
plist.append( ('pad_xx', c_char * 38) )
plist.append( ('rhi_psdname', c_char * 33) )
plist.append( ('pad_xx', c_char * 84) )
plist.append( ('rhi_cname', c_char * 17) )
plist.append( ('pad_xx', c_char * 51) )
plist.append( ('rhi_image_uid', c_char * 32) )
elif version == 14:
plist.append( ('rhr_rh_rdbm_rev', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhr_rh_scan_date', c_char * 10) )
plist.append( ('rhr_rh_scan_time', c_char * 8) )
plist.append( ('rhr_rh_logo', c_char * 10) )
plist.append( ('rhr_rh_file_contents', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_data_collect_type', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhr_rh_npasses', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhr_rh_nslices', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_frame_size', c_ushort) )
plist.append( ('rhr_rh_point_size', c_short) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_raw_pass_size', c_uint) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhr_rh_dab[0]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[0]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_stop_rcv', c_short) )
plist.append( ('rhr_rh_user0', c_float) )
plist.append( ('rhr_rh_user1', c_float) )
plist.append( ('rhr_rh_user2', c_float) )
plist.append( ('rhr_rh_user3', c_float) )
plist.append( ('rhr_rh_user4', c_float) )
plist.append( ('rhr_rh_user5', c_float) )
plist.append( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.