input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
import multiprocessing as mp
from copy import copy
import numpy as np
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Callback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self, update_function, plot_type=PlotType.PLOT, axes_idx=None, legend=(), combine_to=None, color=None, ylim=None, bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatically_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0]["nbQ"] != ocp.nlp[i]["nbQ"]:
raise RuntimeError("Graphs with nbQ different at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp["tf"], self.ocp.CX):
self.t_idx_to_optimize.append(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.all_figures = []
self.automatically_organize = automatically_organize
self._organize_windows(len(self.ocp.nlp[0]["var_states"]) + len(self.ocp.nlp[0]["var_controls"]))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.all_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.all_figures):
if self.automatically_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis array"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp["nb_integration_steps"]
dt_ns = self.tf[phase_idx] / nlp["ns"]
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp["ns"]):
time_phase_integrated.append(np.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.append(time_phase_integrated)
self.ns += nlp["ns"] + 1
time_phase = np.linspace(last_t, last_t + self.tf[phase_idx], nlp["ns"] + 1)
last_t += self.tf[phase_idx]
self.t.append(time_phase)
def __create_plots(self):
"""Actually plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.append({})
if "plot" in nlp:
for key in nlp["plot"]:
if isinstance(nlp["plot"][key], tuple):
nlp["plot"][key] = nlp["plot"][key][0]
if nlp["plot"][key].phase_mappings is None:
size = (
nlp["plot"][key]
.function(np.zeros((nlp["nx"], 1)), np.zeros((nlp["nu"], 1)), np.zeros((nlp["np"], 1)))
.shape[0]
)
nlp["plot"][key].phase_mappings = Mapping(range(size))
else:
size = len(nlp["plot"][key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
nb = max(nlp["plot"][variable].phase_mappings.map_idx) + 1
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
if nlp["plot"][variable].combine_to:
self.axes[variable] = self.axes[nlp["plot"][variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
axes = self.__add_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp["plot"][variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp["plot"][variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp["plot"][variable].ylim:
ax.set_ylim(nlp["plot"][variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp["plot"][variable].bounds:
if nlp["plot"][variable].bounds.type != InterpolationType.CUSTOM:
y_min = nlp["plot"][variable].bounds.min[ctr].min()
y_max = nlp["plot"][variable].bounds.max[ctr].max()
else:
nlp["plot"][variable].bounds.check_and_adjust_dimensions(len(mapping), nlp["ns"])
y_min = min([nlp["plot"][variable].bounds.min.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_max = max([nlp["plot"][variable].bounds.max.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_range, _ = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
zero = np.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.append(
[plot_type, i, ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0]]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp["nb_integration_steps"]
for cmp in range(nlp["ns"]):
plots_integrated.append(
ax.plot(
self.t_integrated[i][cmp],
np.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.append([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
self.plots.append([plot_type, i, ax.step(t, zero, where="post", color=color, zorder=0)[0]])
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.append(ax.axvline(time, **self.plot_options["vertical_lines"]))
if self.axes[variable][0].bounds:
if self.axes[variable][0].bounds.type == InterpolationType.EACH_FRAME:
ns = self.axes[variable][0].bounds.min.shape[1] - 1
else:
ns = nlp["ns"]
self.axes[variable][0].bounds.check_and_adjust_dimensions(
nb_elements=len(mapping), nb_shooting=ns
)
bounds_min = np.array(
[self.axes[variable][0].bounds.min.evaluate_at(k)[j] for k in range(ns + 1)]
)
bounds_max = np.array(
[self.axes[variable][0].bounds.max.evaluate_at(k)[j] for k in range(ns + 1)]
)
if bounds_min.shape[0] == nlp["ns"]:
bounds_min = np.concatenate((bounds_min, [bounds_min[-1]]))
bounds_max = np.concatenate((bounds_max, [bounds_max[-1]]))
self.plots_bounds.append(
[ax.step(self.t[i], bounds_min, where='post', **self.plot_options["bounds"]), i]
)
self.plots_bounds.append(
[ax.step(self.t[i], bounds_max, where='post', **self.plot_options["bounds"]), i]
)
def __add_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatically_organize:
self.all_figures.append(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.all_figures.append(plt.figure(variable))
axes = self.all_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.all_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetically the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatically_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, concatenate=False
)
data_param_in_dyn = np.array([data_param[key] for key in data_param if key != "time"]).squeeze()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, concatenate=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp["nb_integration_steps"] + 1
nb_elements = nlp["ns"] * step_size + 1
state = np.ndarray((0, nb_elements))
for s in nlp["var_states"]:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = np.concatenate((state, data_states_per_phase[s][i]))
else:
state = np.concatenate((state, data_states_per_phase[s]))
control = np.ndarray((0, nlp["ns"] + 1))
for s in nlp["var_controls"]:
if isinstance(data_controls_per_phase[s], (list, tuple)):
control = np.concatenate((control, data_controls_per_phase[s][i]))
else:
control = np.concatenate((control, data_controls_per_phase[s]))
if nlp["control_type"] == ControlType.CONSTANT:
u_mod = 1
elif nlp["control_type"] == ControlType.LINEAR_CONTINUOUS:
u_mod = 2
else:
raise NotImplementedError(f"Plotting {nlp['control_type']} is not implemented yet")
for key in self.variable_sizes[i]:
if self.plot_func[key][i].type == PlotType.INTEGRATED:
all_y = []
for idx, t in enumerate(self.t_integrated[i]):
y_tp = np.empty((self.variable_sizes[i][key], len(t)))
y_tp.fill(np.nan)
y_tp[:, :] = self.plot_func[key][i].function(
state[:, step_size * idx : step_size * (idx + 1)],
control[:, idx : idx + u_mod],
data_param_in_dyn,
)
all_y.append(y_tp)
for idx in range(len(self.plot_func[key][i].phase_mappings.map_idx)):
y_tp = []
for y in all_y:
y_tp.append(y[idx, :])
self.__append_to_ydata([y_tp])
else:
y = np.empty((self.variable_sizes[i][key], len(self.t[i])))
y.fill(np.nan)
y[:, :] = self.plot_func[key][i].function(state[:, ::step_size], control, data_param_in_dyn)
self.__append_to_ydata(y)
self.__update_axes()
def __update_xdata(self):
"""Update of the time in plots (independent axis)"""
self.__update_time_vector()
for plot in self.plots:
phase_idx = plot[1]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_xdata(self.t_integrated[phase_idx][cmp])
ax = plot[2][-1].axes
else:
plot[2].set_xdata(self.t[phase_idx])
ax = plot[2].axes
ax.set_xlim(0, self.t[-1][-1])
if self.plots_bounds:
for plot_bounds in self.plots_bounds:
plot_bounds[0][0].set_xdata(self.t[plot_bounds[1]])
ax = plot_bounds[0][0].axes
ax.set_xlim(0, self.t[-1][-1])
intersections_time = self.find_phases_intersections()
n = len(intersections_time)
if n > 0:
for p in range(int(len(self.plots_vertical_lines) / n)):
for i, time in enumerate(intersections_time):
self.plots_vertical_lines[p * n + i].set_xdata([time, time])
def __append_to_ydata(self, data):
| |
import asyncio
import glob
import gzip
import hashlib
import importlib.util
import logging
import os
import re
import signal
import socket
import ssl
import sys
import threading
from email.utils import formatdate
from http.cookies import SimpleCookie
from logging.handlers import RotatingFileHandler
from traceback import format_exc, print_exc
from urllib.parse import urlparse, parse_qs
import requests
import yaml
class CIDict(dict):
def __init__(self, *args, **kwargs):
if 'query' in kwargs:
del kwargs['query']
if len(args) and type(args[0]) == dict:
for key, value in args[0].items():
if len(key) > 2 and key[-2:] != '[]' and len(value) == 1:
args[0][key] = value[0]
super().__init__(*args, **kwargs)
def __contains__(self, key):
for k in self.keys():
if key.lower() == k.lower():
return True
return False
def __getitem__(self, key):
for k, v in self.items():
if key.lower() == k.lower():
return v
raise KeyError(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
class MiniFormatter(logging.Formatter):
def __init__(self):
super().__init__(fmt='[%(asctime)s] %(levelname)s: %(message)s%(peer)s', datefmt='%m-%d-%Y %I:%M:%S %p')
def format(self, record):
if not hasattr(record, 'peer'):
record.peer = ''
else:
record.peer = ' [' + record.peer + ']'
return super().format(record)
class MiniFilter(logging.Handler):
def __init__(self, min, max=None):
super().__init__()
self.min = min or logging.NOTSET
self.max = max or logging.FATAL
def filter(self, record):
return self.min <= record.levelno <= self.max
def emit(self, record):
super().emit(record)
__version__ = '0.2.0a1'
log = logging.getLogger('minipyp')
log.setLevel(logging.INFO)
stream = logging.StreamHandler()
stream.setFormatter(MiniFormatter())
log.addHandler(stream)
def _default(obj: dict, key, default):
if key not in obj:
obj[key] = default
def _except(error: str, extra: dict=None, fatal: bool=False):
log.fatal(error, extra=extra) if fatal else log.error(error, extra=extra)
raise Exception(error)
def _translate(item, keep_keys: list, keep_values: list, parent: str=None):
def fix(string: str):
return string.lower().replace(' ', '_').replace('\'', '')
if type(item) == dict:
new = {}
for key, value in item.items():
new_key = _translate(key, keep_keys, keep_values)
if parent not in keep_keys:
key = new_key
if new_key not in keep_values:
value = _translate(value, keep_keys, keep_values, parent=new_key)
new[key] = value
item = new
elif type(item) in [list, tuple]:
for i in range(len(item)):
item[i] = _translate(item[i], keep_keys, keep_values)
elif type(item) == str:
item = fix(item)
return item
def _capitalize(string: str, reset: bool=False):
if reset:
string = string.lower()
new = ''
for i in range(len(string)):
if i == 0:
new += string[i].upper()
elif string[i - 1] == '-':
new += string[i].upper()
return string
class Handler:
def handle(self, minipyp, request):
_except('Handler for file ' + request.file + ' has no handle() method')
class PyHandler(Handler):
def handle(self, minipyp, request):
cwd = os.getcwd()
modules = sys.modules
path = sys.path
cwd_temp = os.path.dirname(request.file)
os.chdir(cwd_temp)
if cwd_temp not in sys.path:
sys.path.append(cwd_temp)
spec = importlib.util.spec_from_file_location('page', request.file)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
result = mod.render(minipyp, request)
os.chdir(cwd)
sys.modules = modules
sys.path = path
if result is None:
return b''
elif type(result) != bytes:
charset = 'latin_1'
if 'Content-Type' in request._response_headers:
if 'charset=' in request._response_headers['Content-Type']:
charset = request._response_headers['Content-Type'].split('charset=')[1]
try:
return str(result).encode(charset.replace('-', '_')) # will this cover all encodings?
except UnicodeEncodeError:
_except('Non-' + charset + ' value returned by handler, Content-Type does not contain correct encoding')
return result
class Request:
"""Information about the client's request."""
def __init__(self, minipyp, server, bare=None, full=None):
self._status = None
self._response_headers = {}
self._response_cookies = []
self.bare = bool(bare)
if full:
# Full Request object
proto = full[0].split()
if len(proto) != 3:
_except('Failed to parse initial request line', server.extra)
self.method = proto[0] #: HTTP method (e.g. POST)
self.protocol = proto[2] #: HTTP protocol version (e.g. HTTP/1.1)
if self.protocol not in ['HTTP/1.0', 'HTTP/1.1']:
_except('Invalid protocol `' + self.protocol + '`', server.extra)
self.headers = CIDict() #: Request headers
self.cookies = CIDict()
try:
for line in full[1:]:
if line == '':
break
key, value = line.split(': ', 1)
self.headers[key] = value
if key.lower() == 'cookie':
cookies = SimpleCookie()
cookies.load(value)
for key, morsel in cookies.items():
self.cookies[key] = morsel.value
except:
_except('Headers seem to be malformed', server.extra)
uri = urlparse(proto[1])
self.scheme = uri.scheme or 'http' #: Transfer scheme (e.g. https)
try:
self.host = uri.netloc or self.headers['Host'] #: Hostname requested (e.g. localhost)
except:
_except('No host was provided in headers or request line', server.extra)
self.path = uri.path #: Path requested (e.g. /path/to/file.txt)
self.uri = uri.path + (('?' + uri.query) if len(uri.query) else '') #: Path requested, including query
self.query_string = uri.query #: Querystring (e.g. A=1&B=2)
self.query = CIDict(parse_qs(uri.query, True), query=True) #: Parsed querystring (i.e. GET params)
if '' in full:
self.body = '\n'.join(full[full.index('') + 1:]) #: Request body
self.post = CIDict(parse_qs(self.body, True), query=True) #: Parsed request body (i.e. POST params)
self.site = minipyp.get_site(self.host) #: Effective site config
self.root = self.site['root'] if self.site else minipyp._config['root'] #: Document root
self.file = os.path.join(self.root, *self.path.split('/')) #: File requested
elif bare:
# Barebones Request object (for error handling)
self.protocol = 'HTTP/1.0'
self.host = None
self.file = None
if len(bare):
args = bare[0].split()
if len(args) == 3:
try:
uri = urlparse(args[1])
if uri.netloc:
self.host = uri.netloc
self.path = uri.path
else:
self.path = uri.path
except ValueError:
pass
if args[2] in ['HTTP/1.0', 'HTTP/1.1', 'HTTP/2.0']:
self.protocol = args[2]
self.headers = {}
for line in bare[1:]:
if line == '':
break
if ': ' in line:
key, value = line.split(': ', 1)
self.headers[_capitalize(key)] = value
if not self.host and 'Host' in self.headers:
self.host = self.headers['Host']
if self.host:
self.site = minipyp.get_site(self.host)
self.root = self.site['root'] if self.site else minipyp._config['root']
if self.path:
self.file = os.path.join(self.root, *self.path.split('/'))
def set_header(self, name: str, value: str):
"""
Set a header in the response, capitalized Like-This.
:param name: header name, e.g. "X-My-Header"
:param value: header value, e.g. "My Value"
"""
self._response_headers[_capitalize(name)] = value
def set_status(self, status: str):
"""
Set the response status.
:param status: full status string (e.g. "404 Not Found"
"""
self._status = status
def set_cookie(self, name: str, value, **options):
"""
Set a cookie.
:param name: the name of the cookie
:param value: the cookie's value
:param options: additional options, e.g. expires='Jan 1...', path='/'
"""
flags = ''
for k, v in options.items():
flags += '; ' + k + '=' + v
self._response_cookies.append(name + '=' + value + flags)
def delete_cookie(self, name: str, **options):
"""
Delete a cookie. Supply the same domain, path, etc. used when creating the cookie.
:param name: the name of the cookie
:param options: the cookie's options
"""
if 'expires' in options:
raise Exception('the expires option should not be supplied when deleting a cookie')
self.set_cookie(name, '', expires='Sat, 17 Mar 2001 6:00:00 GMT', **options)
class Server(asyncio.Protocol):
def __init__(self, minipyp):
self.minipyp = minipyp
self._loop = asyncio.get_event_loop()
self._transport = None
self._keepalive = None
self._timeout = minipyp._config['timeout']
self._timing = None
self.peer = None
self.extra = {
'peer': 'unknown peer'
}
def connection_made(self, transport):
self.peer = transport.get_extra_info('peername')
if type(self.peer) in (list, tuple):
self.extra['peer'] = self.peer[0] + ':' + str(self.peer[1])
self._transport = transport
self._keepalive = True
if self._timeout:
self._timing = self._loop.call_later(self._timeout, self.on_timeout)
log.debug('Connected', extra=self.extra)
def connection_lost(self, e):
if e:
log.warning('Connection lost: ' + str(e), extra=self.extra)
def data_received(self, data):
lines = data.decode('utf-8').split('\r\n')
if len(lines):
try:
request = Request(self.minipyp, self, full=lines)
log.info(request.method + ' ' + request.path, extra=self.extra)
request.site = self.minipyp.get_site(request.host)
path_opts = self.minipyp.get_path(request.path, request.site)
proxy = None
if path_opts['proxy']:
proxy_p = urlparse(path_opts['proxy'])
proxy = proxy_p.scheme + '://' + proxy_p.netloc
request.uri = proxy_p.path + request.uri
if proxy:
log.info('Forwarding client to: ' + proxy + request.uri, extra=self.extra)
if 'X-Forwarded-Host' not in request.headers:
request.headers['X-Forwarded-Host'] = request.host
try:
r = requests.request(request.method, proxy + request.uri, stream=True,
headers=request.headers, data=request.body)
request.set_status(str(r.status_code) + ' ' + r.reason)
request._response_headers = r.headers
if r.headers.get('Transfer-Encoding', '') == 'chunked':
i = 0
for chunk in r.iter_content(chunk_size=None):
chunk = hex(len(chunk))[2:].encode('latin_1') + b'\r\n' + chunk + b'\r\n'
if i == 0:
self._respond(request, chunk)
else:
self._write(chunk)
i += 1
self._write(b'0\r\n\r\n')
else:
self._respond(request, r.content)
r.close()
except:
print_exc()
self._give_error(request, 502, traceback=format_exc())
else:
request.root = request.site['root'] if request.site else self.minipyp._config['root']
if request.protocol == 'HTTP/1.1':
self._keepalive = request.headers.get('Connection', 'close') != 'close'
else:
self._keepalive = request.headers.get('Connection', '') == 'Keep-Alive'
match = re.match(r'timeout=(\d+)', request.headers.get('Keep-Alive', ''))
if match:
timeout = int(match.group(1))
if timeout < self._timeout:
self._timeout = timeout
path = list(filter(None, request.path.split('?')[0].split('/')))
path = [''] + path
file = None
full_ospath = os.path.join(request.root, *path)
options = self.minipyp.get_directory(full_ospath)
for key, value in options['headers'].items():
request.set_header(key, value)
if options['public']:
for i in range(len(path)):
ospath = full_ospath if i == 0 else os.path.join(request.root, *path)
viewing_dir = i == 0 and os.path.isdir(ospath)
if viewing_dir:
matches = glob.glob(os.path.join(ospath, 'index.*'))
if len(matches):
file = matches[0]
break
if os.path.isfile(ospath):
file = ospath
break
if not os.path.isdir(ospath):
matches = glob.glob(ospath + | |
# Written by Dr <NAME>, Marda Science LLC
# for the USGS Coastal Change Hazards Program
#
# MIT License
#
# Copyright (c) 2021, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE zSOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
# keras functions for early stopping and model weights saving
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
SEED = 42
np.random.seed(SEED)
AUTO = tf.data.experimental.AUTOTUNE # used in tf.data.Dataset API
tf.random.set_seed(SEED)
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
###############################################################
### MODEL ARCHITECTURES
###############################################################
# -----------------------------------
def simple_resunet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
# x = bottleneck_block(inputs, filters)
down_layers = []
for l in range(num_layers):
x = res_conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = res_conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides,
) # (1,1))
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(
x
) #
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(
x
) # (1, 1)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model
# -----------------------------------
def simple_unet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
down_layers = []
for l in range(num_layers):
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
# if use_pooling:
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
)
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(x)
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(x)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model
##========================================================================
# -----------------------------------
def simple_satunet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
upconv_filters = int(1.5 * filters)
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
down_layers = []
for l in range(num_layers):
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
# filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = conv2d_block(
inputs=x,
filters=upconv_filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
)
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(x)
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(x)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model
##========================================================================
# -----------------------------------
def custom_resunet(
sz,
f,
nclasses=1,
kernel_size=(7, 7),
strides=2,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
):
"""
res_unet(sz, f, nclasses=1)
This function creates a custom residual U-Net model for image segmentation
INPUTS:
* `sz`: [tuple] size of input image
* `f`: [int] number of filters in the convolutional block
* flag: [string] if 'binary', the model will expect 2D masks and uses sigmoid. If 'multiclass', the model will expect 3D masks and uses softmax
* nclasses [int]: number of classes
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the | |
MOBILE_PREFIX = [
{'prefix': '1', 'en': 'USA', 'cn': '美国'},
{'prefix': '1', 'en': 'PuertoRico', 'cn': '波多黎各'},
{'prefix': '1', 'en': 'Canada', 'cn': '加拿大'},
{'prefix': '7', 'en': 'Russia', 'cn': '俄罗斯'},
{'prefix': '7', 'en': 'Kazeakhstan', 'cn': '哈萨克斯坦'},
{'prefix': '20', 'en': 'Egypt', 'cn': '埃及'},
{'prefix': '27', 'en': 'South Africa', 'cn': '南非'},
{'prefix': '30', 'en': 'Greece', 'cn': '希腊'},
{'prefix': '31', 'en': 'Netherlands', 'cn': '荷兰'},
{'prefix': '32', 'en': 'Belgium', 'cn': '比利时'},
{'prefix': '33', 'en': 'France', 'cn': '法国'},
{'prefix': '34', 'en': 'Spain', 'cn': '西班牙'},
{'prefix': '36', 'en': 'Hungary', 'cn': '匈牙利'},
{'prefix': '40', 'en': 'Romania', 'cn': '罗马尼亚'},
{'prefix': '41', 'en': 'Switzerland', 'cn': '瑞士'},
{'prefix': '43', 'en': 'Austria', 'cn': '奥地利'},
{'prefix': '44', 'en': 'United Kingdom', 'cn': '英国'},
{'prefix': '44', 'en': 'Jersey', 'cn': '泽西岛'},
{'prefix': '44', 'en': 'Isle of Man', 'cn': '马恩岛'},
{'prefix': '44', 'en': 'Guernsey', 'cn': '根西'},
{'prefix': '45', 'en': 'Denmark', 'cn': '丹麦'},
{'prefix': '46', 'en': 'Sweden', 'cn': '瑞典'},
{'prefix': '47', 'en': 'Norway', 'cn': '挪威'},
{'prefix': '48', 'en': 'Poland', 'cn': '波兰'},
{'prefix': '51', 'en': 'Peru', 'cn': '秘鲁'},
{'prefix': '52', 'en': 'Mexico', 'cn': '墨西哥'},
{'prefix': '53', 'en': 'Cuba', 'cn': '古巴'},
{'prefix': '54', 'en': 'Argentina', 'cn': '阿根廷'},
{'prefix': '55', 'en': 'Brazill', 'cn': '巴西'},
{'prefix': '56', 'en': 'Chile', 'cn': '智利'},
{'prefix': '57', 'en': 'Colombia', 'cn': '哥伦比亚'},
{'prefix': '58', 'en': 'Venezuela', 'cn': '委内瑞拉'},
{'prefix': '60', 'en': 'Malaysia', 'cn': '马来西亚'},
{'prefix': '61', 'en': 'Australia', 'cn': '澳大利亚'},
{'prefix': '62', 'en': 'Indonesia', 'cn': '印度尼西亚'},
{'prefix': '63', 'en': 'Philippines', 'cn': '菲律宾'},
{'prefix': '64', 'en': 'NewZealand', 'cn': '新西兰'},
{'prefix': '65', 'en': 'Singapore', 'cn': '新加坡'},
{'prefix': '66', 'en': 'Thailand', 'cn': '泰国'},
{'prefix': '81', 'en': 'Japan', 'cn': '日本'},
{'prefix': '82', 'en': 'Korea', 'cn': '韩国'},
{'prefix': '84', 'en': 'Vietnam', 'cn': '越南'},
{'prefix': '86', 'en': 'China', 'cn': '中国'},
{'prefix': '90', 'en': 'Turkey', 'cn': '土耳其'},
{'prefix': '91', 'en': 'Indea', 'cn': '印度'},
{'prefix': '92', 'en': 'Pakistan', 'cn': '巴基斯坦'},
{'prefix': '93', 'en': 'Italy', 'cn': '意大利'},
{'prefix': '93', 'en': 'Afghanistan', 'cn': '阿富汗'},
{'prefix': '94', 'en': 'SriLanka', 'cn': '斯里兰卡'},
{'prefix': '94', 'en': 'Germany', 'cn': '德国'},
{'prefix': '95', 'en': 'Myanmar', 'cn': '缅甸'},
{'prefix': '98', 'en': 'Iran', 'cn': '伊朗'},
{'prefix': '212', 'en': 'Morocco', 'cn': '摩洛哥'},
{'prefix': '213', 'en': 'Algera', 'cn': '阿尔格拉'},
{'prefix': '216', 'en': 'Tunisia', 'cn': '突尼斯'},
{'prefix': '218', 'en': 'Libya', 'cn': '利比亚'},
{'prefix': '220', 'en': 'Gambia', 'cn': '冈比亚'},
{'prefix': '221', 'en': 'Senegal', 'cn': '塞内加尔'},
{'prefix': '222', 'en': 'Mauritania', 'cn': '毛里塔尼亚'},
{'prefix': '223', 'en': 'Mali', 'cn': '马里'},
{'prefix': '224', 'en': 'Guinea', 'cn': '几内亚'},
{'prefix': '225', 'en': 'Cote divoire', 'cn': '科特迪沃'},
{'prefix': '226', 'en': 'Burkina Faso', 'cn': '布基纳法索'},
{'prefix': '227', 'en': 'Niger', 'cn': '尼日尔'},
{'prefix': '228', 'en': 'Togo', 'cn': '多哥'},
{'prefix': '229', 'en': 'Benin', 'cn': '贝宁'},
{'prefix': '230', 'en': 'Mauritius', 'cn': '毛里求斯'},
{'prefix': '231', 'en': 'Liberia', 'cn': '利比里亚'},
{'prefix': '232', 'en': 'Sierra Leone', 'cn': '塞拉利昂'},
{'prefix': '233', 'en': 'Ghana', 'cn': '加纳'},
{'prefix': '234', 'en': 'Nigeria', 'cn': '尼日利亚'},
{'prefix': '235', 'en': 'Chad', 'cn': '乍得'},
{'prefix': '236', 'en': 'Central African Republic', 'cn': '中非共和国'},
{'prefix': '237', 'en': 'Cameroon', 'cn': '喀麦隆'},
{'prefix': '238', 'en': 'Cape Verde', 'cn': '佛得角'},
{'prefix': '239', 'en': 'Sao Tome and Principe', 'cn': '圣多美和普林西比'},
{'prefix': '240', 'en': 'Guinea', 'cn': '几内亚'},
{'prefix': '241', 'en': 'Gabon', 'cn': '加蓬'},
{'prefix': '242', 'en': 'Republic of the Congo', 'cn': '刚果共和国'},
{'prefix': '243', 'en': 'Democratic Republic of the Congo', 'cn': '刚果民主共和国'},
{'prefix': '244', 'en': 'Angola', 'cn': '安哥拉'},
{'prefix': '247', 'en': 'Ascension', 'cn': '阿森松岛'},
{'prefix': '248', 'en': 'Seychelles', 'cn': '塞舌尔'},
{'prefix': '249', 'en': 'Sudan', 'cn': '苏丹'},
{'prefix': '250', 'en': 'Rwanda', 'cn': '卢旺达'},
{'prefix': '251', 'en': 'Ethiopia', 'cn': '埃塞俄比亚'},
{'prefix': '253', 'en': 'Djibouti', 'cn': '吉布提'},
{'prefix': '254', 'en': 'Kenya', 'cn': '肯尼亚'},
{'prefix': '255', 'en': 'Tanzania', 'cn': '坦桑尼亚'},
{'prefix': '256', 'en': 'Uganda', 'cn': '乌干达'},
{'prefix': '257', 'en': 'Burundi', 'cn': '布隆迪'},
{'prefix': '258', 'en': 'Mozambique', 'cn': '莫桑比克'},
{'prefix': '260', 'en': 'Zambia', 'cn': '赞比亚'},
{'prefix': '261', 'en': 'Madagascar', 'cn': '马达加斯加'},
{'prefix': '262', 'en': 'Reunion', 'cn': '留尼汪'},
{'prefix': '262', 'en': 'Mayotte', 'cn': '马约特'},
{'prefix': '263', 'en': 'Zimbabwe', 'cn': '津巴布韦'},
{'prefix': '264', 'en': 'Namibia', 'cn': '纳米比亚'},
{'prefix': '265', 'en': 'Malawi', 'cn': '马拉维'},
{'prefix': '266', 'en': 'Lesotho', 'cn': '莱索托'},
{'prefix': '267', 'en': 'Botwana', 'cn': '博茨瓦纳'},
{'prefix': '268', 'en': 'Swaziland', 'cn': '斯威士兰'},
{'prefix': '269', 'en': 'Comoros', 'cn': '科摩罗'},
{'prefix': '297', 'en': 'Aruba', 'cn': '阿鲁巴'},
{'prefix': '298', 'en': 'Faroe Islands', 'cn': '法罗群岛'},
{'prefix': '299', 'en': 'Greenland', 'cn': '格陵兰'},
{'prefix': '350', 'en': 'Gibraltar', 'cn': '直布罗陀'},
{'prefix': '351', 'en': 'Portugal', 'cn': '葡萄牙'},
{'prefix': '352', 'en': 'Luxembourg', 'cn': '卢森堡'},
{'prefix': '353', 'en': 'Ireland', 'cn': '爱尔兰'},
{'prefix': '354', 'en': 'Iceland', 'cn': '冰岛'},
{'prefix': '355', 'en': 'Albania', 'cn': '阿尔巴尼亚'},
{'prefix': '356', 'en': 'Malta', 'cn': '马耳他'},
{'prefix': '357', 'en': 'Cyprus', 'cn': '塞浦路斯'},
{'prefix': '358', 'en': 'Finland', 'cn': '芬兰'},
{'prefix': '359', 'en': 'Bulgaria', 'cn': '保加利亚'},
{'prefix': '370', 'en': 'Lithuania', 'cn': '立陶宛'},
{'prefix': '371', 'en': 'Latvia', 'cn': '拉脱维亚'},
{'prefix': '372', 'en': 'Estonia', 'cn': '爱沙尼亚'},
{'prefix': '373', 'en': 'Moldova', 'cn': '摩尔多瓦'},
{'prefix': '374', 'en': 'Armenia', 'cn': '亚美尼亚'},
{'prefix': '375', 'en': 'Belarus', 'cn': '白俄罗斯'},
{'prefix': '376', 'en': 'Andorra', 'cn': '安道尔'},
{'prefix': '377', 'en': 'Monaco', 'cn': '摩纳哥'},
{'prefix': '378', 'en': 'San Marino', 'cn': '圣马力诺'},
{'prefix': '380', 'en': 'Ukraine', 'cn': '乌克兰'},
{'prefix': '381', 'en': 'Serbia', 'cn': '塞尔维亚'},
{'prefix': '382', 'en': 'Montenegro', 'cn': '黑山'},
{'prefix': '383', 'en': 'Kosovo', 'cn': '科索沃'},
{'prefix': '385', 'en': 'Croatia', 'cn': '克罗地亚'},
{'prefix': '386', 'en': 'Slovenia', 'cn': '斯洛文尼亚'},
{'prefix': '387', 'en': 'Bosnia and Herzegovina', 'cn': '波斯尼亚和黑塞哥维那'},
{'prefix': '389', 'en': 'Macedonia', 'cn': '马其顿'},
{'prefix': '420', 'en': 'Czech Republic', 'cn': '捷克共和国'},
{'prefix': '421', 'en': 'Slovakia', 'cn': '斯洛伐克'},
{'prefix': '423', 'en': 'Liechtenstein', 'cn': '列支敦士登'},
{'prefix': '501', 'en': 'Belize', 'cn': '伯利兹'},
{'prefix': '502', 'en': 'Guatemala', 'cn': '危地马拉'},
{'prefix': '503', 'en': 'EISalvador', 'cn': '艾萨尔瓦多'},
{'prefix': '504', 'en': 'Honduras', 'cn': '洪都拉斯'},
{'prefix': '505', 'en': 'Nicaragua', 'cn': '尼加拉瓜'},
{'prefix': '506', 'en': 'Costa Rica', 'cn': '哥斯达黎加'},
{'prefix': '507', 'en': 'Panama', 'cn': '巴拿马'},
{'prefix': '509', 'en': 'Haiti', 'cn': '海地'},
{'prefix': '590', 'en': 'Guadeloupe', 'cn': '瓜德罗普'},
{'prefix': '591', 'en': 'Bolivia', 'cn': '玻利维亚'},
{'prefix': '592', 'en': 'Guyana', 'cn': '圭亚那'},
{'prefix': '593', 'en': 'Ecuador', 'cn': '厄瓜多尔'},
{'prefix': '594', 'en': 'French Guiana', 'cn': '法属圭亚那'},
{'prefix': '595', 'en': 'Paraguay', 'cn': '巴拉圭'},
{'prefix': '596', 'en': 'Martinique', 'cn': '马提尼克'},
{'prefix': '597', 'en': 'Suriname', 'cn': '苏里南'},
{'prefix': '598', 'en': 'Uruguay', 'cn': '乌拉圭'},
{'prefix': '599', 'en': 'Netherlands Antillse', 'cn': '荷属安的列斯'},
{'prefix': '670', 'en': 'Timor Leste', 'cn': '东帝汶'},
{'prefix': '673', 'en': 'Brunei', 'cn': '文莱'},
{'prefix': '675', 'en': 'Papua New Guinea', 'cn': '巴布亚新几内亚'},
{'prefix': '676', 'en': 'Tonga', 'cn': '汤加'},
{'prefix': '678', 'en': 'Vanuatu', 'cn': '瓦努阿图'},
{'prefix': '679', 'en': 'Fiji', 'cn': '斐济'},
{'prefix': '682', 'en': 'Cook Islands', 'cn': '库克群岛'},
{'prefix': '684', 'en': 'Samoa Eastern', 'cn': '萨摩亚东部'},
{'prefix': '685', 'en': 'Samoa Western', 'cn': '萨摩亚西部'},
{'prefix': '687', 'en': 'New Caledonia', 'cn': '新喀里多尼亚'},
{'prefix': '689', 'en': 'French Polynesia', 'cn': '法属波利尼西亚'},
{'prefix': '852', 'en': 'Hong Kong', 'cn': '香港'},
{'prefix': '853', 'en': 'Macao', 'cn': '澳门'},
{'prefix': '855', 'en': 'Cambodia', 'cn': '柬埔寨'},
{'prefix': '856', 'en': 'Laos', 'cn': '老挝'},
{'prefix': '880', 'en': 'Bangladesh', 'cn': '孟加拉国'},
{'prefix': '886', 'en': 'Taiwan', 'cn': '台湾'},
{'prefix': '960', 'en': 'Maldives', 'cn': '马尔代夫'},
{'prefix': '961', 'en': 'Lebanon', 'cn': '黎巴嫩'},
{'prefix': '962', 'en': 'Jordan', 'cn': '约旦'},
{'prefix': '963', 'en': 'Syria', 'cn': '叙利亚'},
{'prefix': '964', 'en': 'Iraq', 'cn': '伊拉克'},
{'prefix': '965', 'en': 'Kuwait', 'cn': '科威特'},
{'prefix': '966', 'en': 'Saudi Arabia', 'cn': '沙特阿拉伯'},
{'prefix': '967', 'en': 'Yemen', 'cn': '也门'},
{'prefix': '968', 'en': 'Oman', 'cn': '阿曼'},
{'prefix': '970', 'en': 'Palestinian', 'cn': '巴勒斯坦'},
{'prefix': '971', 'en': 'United Arab Emirates', 'cn': '阿拉伯联合酋长国'},
{'prefix': '972', 'en': 'Israel', 'cn': '以色列'},
{'prefix': '973', 'en': 'Bahrain', 'cn': '巴林'},
{'prefix': '974', 'en': 'Qotar', 'cn': '库塔'},
{'prefix': '975', 'en': 'Bhutan', 'cn': '不丹'},
{'prefix': '976', 'en': 'Mongolia', 'cn': '蒙古'},
{'prefix': '977', 'en': 'Nepal', 'cn': '尼泊尔'},
{'prefix': '992', 'en': 'Tajikistan', 'cn': '塔吉克斯坦'},
{'prefix': '993', 'en': 'Turkmenistan', 'cn': '土库曼斯坦'},
{'prefix': '994', 'en': 'Azerbaijan', 'cn': '阿塞拜疆'},
{'prefix': '995', 'en': 'Georgia', 'cn': '格鲁吉亚'},
{'prefix': '996', 'en': 'Kyrgyzstan', 'cn': '吉尔吉斯斯坦'},
{'prefix': '998', 'en': 'Uzbekistan', 'cn': '乌兹别克斯坦'},
{'prefix': '1242', 'en': 'Bahamas', 'cn': '巴哈马'},
{'prefix': '1246', 'en': 'Barbados', 'cn': '巴巴多斯'},
{'prefix': '1264', 'en': 'Anguilla', 'cn': '安圭拉'},
{'prefix': '1268', 'en': 'Antigua and Barbuda', 'cn': '安提瓜和巴布达'},
{'prefix': '1340', 'en': 'Virgin Islands', 'cn': '维尔京群岛'},
{'prefix': '1345', 'en': 'Cayman Islands', 'cn': '开曼群岛'},
{'prefix': '1441', 'en': 'Bermuda', 'cn': '百慕大'},
{'prefix': '1473', 'en': 'Grenada', 'cn': '格林纳达'},
{'prefix': '1649', 'en': 'Turks and Caicos Islands', 'cn': '特克斯和凯科斯群岛'},
{'prefix': '1664', 'en': 'Montserrat', 'cn': '蒙特塞拉特'},
{'prefix': '1671', 'en': 'Guam', 'cn': '关岛'},
| |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# The implementation largely follows the design in PyTorch's `torch.distributions`
#
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (<NAME>)
# Copyright (c) 2011-2012 NEC Laboratories America (<NAME>)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from jax import device_put, lax
from jax.dtypes import canonicalize_dtype
from jax.nn import softmax, softplus
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import expit, gammaln, logsumexp, xlog1py, xlogy
from numpyro.distributions import constraints
from numpyro.distributions.distribution import Distribution
from numpyro.distributions.util import (
binary_cross_entropy_with_logits,
binomial,
categorical,
clamp_probs,
get_dtype,
lazy_property,
multinomial,
promote_shapes,
sum_rightmost,
validate_sample
)
from numpyro.util import not_jax_tracer
def _to_probs_bernoulli(logits):
return 1 / (1 + jnp.exp(-logits))
def _to_logits_bernoulli(probs):
ps_clamped = clamp_probs(probs)
return jnp.log(ps_clamped) - jnp.log1p(-ps_clamped)
def _to_probs_multinom(logits):
return softmax(logits, axis=-1)
def _to_logits_multinom(probs):
minval = jnp.finfo(get_dtype(probs)).min
return jnp.clip(jnp.log(probs), a_min=minval)
class BernoulliProbs(Distribution):
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.boolean
has_enumerate_support = True
is_discrete = True
def __init__(self, probs, validate_args=None):
self.probs = probs
super(BernoulliProbs, self).__init__(batch_shape=jnp.shape(self.probs), validate_args=validate_args)
def sample(self, key, sample_shape=()):
return random.bernoulli(key, self.probs, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
return xlogy(value, self.probs) + xlog1py(1 - value, -self.probs)
@property
def mean(self):
return self.probs
@property
def variance(self):
return self.probs * (1 - self.probs)
def enumerate_support(self, expand=True):
values = jnp.arange(2).reshape((-1,) + (1,) * len(self.batch_shape))
if expand:
values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
return values
class BernoulliLogits(Distribution):
arg_constraints = {'logits': constraints.real}
support = constraints.boolean
has_enumerate_support = True
is_discrete = True
def __init__(self, logits=None, validate_args=None):
self.logits = logits
super(BernoulliLogits, self).__init__(batch_shape=jnp.shape(self.logits), validate_args=validate_args)
def sample(self, key, sample_shape=()):
return random.bernoulli(key, self.probs, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
return -binary_cross_entropy_with_logits(self.logits, value)
@lazy_property
def probs(self):
return _to_probs_bernoulli(self.logits)
@property
def mean(self):
return self.probs
@property
def variance(self):
return self.probs * (1 - self.probs)
def enumerate_support(self, expand=True):
values = jnp.arange(2).reshape((-1,) + (1,) * len(self.batch_shape))
if expand:
values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
return values
def Bernoulli(probs=None, logits=None, validate_args=None):
if probs is not None:
return BernoulliProbs(probs, validate_args=validate_args)
elif logits is not None:
return BernoulliLogits(logits, validate_args=validate_args)
else:
raise ValueError('One of `probs` or `logits` must be specified.')
class BinomialProbs(Distribution):
arg_constraints = {'probs': constraints.unit_interval,
'total_count': constraints.nonnegative_integer}
has_enumerate_support = True
is_discrete = True
def __init__(self, probs, total_count=1, validate_args=None):
self.probs, self.total_count = promote_shapes(probs, total_count)
batch_shape = lax.broadcast_shapes(jnp.shape(probs), jnp.shape(total_count))
super(BinomialProbs, self).__init__(batch_shape=batch_shape, validate_args=validate_args)
def sample(self, key, sample_shape=()):
return binomial(key, self.probs, n=self.total_count, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
log_factorial_n = gammaln(self.total_count + 1)
log_factorial_k = gammaln(value + 1)
log_factorial_nmk = gammaln(self.total_count - value + 1)
return (log_factorial_n - log_factorial_k - log_factorial_nmk +
xlogy(value, self.probs) + xlog1py(self.total_count - value, -self.probs))
@property
def mean(self):
return jnp.broadcast_to(self.total_count * self.probs, self.batch_shape)
@property
def variance(self):
return jnp.broadcast_to(self.total_count * self.probs * (1 - self.probs), self.batch_shape)
@property
def support(self):
return constraints.integer_interval(0, self.total_count)
def enumerate_support(self, expand=True):
if not_jax_tracer(self.total_count):
total_count = np.amax(self.total_count)
# NB: the error can't be raised if inhomogeneous issue happens when tracing
if np.amin(self.total_count) != total_count:
raise NotImplementedError("Inhomogeneous total count not supported"
" by `enumerate_support`.")
else:
total_count = jnp.amax(self.total_count)
values = jnp.arange(total_count + 1).reshape((-1,) + (1,) * len(self.batch_shape))
if expand:
values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
return values
class BinomialLogits(Distribution):
arg_constraints = {'logits': constraints.real,
'total_count': constraints.nonnegative_integer}
has_enumerate_support = True
is_discrete = True
enumerate_support = BinomialProbs.enumerate_support
def __init__(self, logits, total_count=1, validate_args=None):
self.logits, self.total_count = promote_shapes(logits, total_count)
batch_shape = lax.broadcast_shapes(jnp.shape(logits), jnp.shape(total_count))
super(BinomialLogits, self).__init__(batch_shape=batch_shape, validate_args=validate_args)
def sample(self, key, sample_shape=()):
return binomial(key, self.probs, n=self.total_count, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
log_factorial_n = gammaln(self.total_count + 1)
log_factorial_k = gammaln(value + 1)
log_factorial_nmk = gammaln(self.total_count - value + 1)
normalize_term = (self.total_count * jnp.clip(self.logits, 0) +
xlog1py(self.total_count, jnp.exp(-jnp.abs(self.logits))) -
log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
@lazy_property
def probs(self):
return _to_probs_bernoulli(self.logits)
@property
def mean(self):
return jnp.broadcast_to(self.total_count * self.probs, self.batch_shape)
@property
def variance(self):
return jnp.broadcast_to(self.total_count * self.probs * (1 - self.probs), self.batch_shape)
@property
def support(self):
return constraints.integer_interval(0, self.total_count)
def Binomial(total_count=1, probs=None, logits=None, validate_args=None):
if probs is not None:
return BinomialProbs(probs, total_count, validate_args=validate_args)
elif logits is not None:
return BinomialLogits(logits, total_count, validate_args=validate_args)
else:
raise ValueError('One of `probs` or `logits` must be specified.')
class CategoricalProbs(Distribution):
arg_constraints = {'probs': constraints.simplex}
has_enumerate_support = True
is_discrete = True
def __init__(self, probs, validate_args=None):
if jnp.ndim(probs) < 1:
raise ValueError("`probs` parameter must be at least one-dimensional.")
self.probs = probs
super(CategoricalProbs, self).__init__(batch_shape=jnp.shape(self.probs)[:-1],
validate_args=validate_args)
def sample(self, key, sample_shape=()):
return categorical(key, self.probs, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
batch_shape = lax.broadcast_shapes(jnp.shape(value), self.batch_shape)
value = jnp.expand_dims(value, axis=-1)
value = jnp.broadcast_to(value, batch_shape + (1,))
logits = _to_logits_multinom(self.probs)
log_pmf = jnp.broadcast_to(logits, batch_shape + jnp.shape(logits)[-1:])
return jnp.take_along_axis(log_pmf, value, axis=-1)[..., 0]
@property
def mean(self):
return jnp.full(self.batch_shape, jnp.nan, dtype=get_dtype(self.probs))
@property
def variance(self):
return jnp.full(self.batch_shape, jnp.nan, dtype=get_dtype(self.probs))
@property
def support(self):
return constraints.integer_interval(0, jnp.shape(self.probs)[-1] - 1)
def enumerate_support(self, expand=True):
values = jnp.arange(self.probs.shape[-1]).reshape((-1,) + (1,) * len(self.batch_shape))
if expand:
values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
return values
class CategoricalLogits(Distribution):
arg_constraints = {'logits': constraints.real_vector}
has_enumerate_support = True
is_discrete = True
def __init__(self, logits, validate_args=None):
if jnp.ndim(logits) < 1:
raise ValueError("`logits` parameter must be at least one-dimensional.")
self.logits = logits
super(CategoricalLogits, self).__init__(batch_shape=jnp.shape(logits)[:-1],
validate_args=validate_args)
def sample(self, key, sample_shape=()):
return random.categorical(key, self.logits, shape=sample_shape + self.batch_shape)
@validate_sample
def log_prob(self, value):
batch_shape = lax.broadcast_shapes(jnp.shape(value), self.batch_shape)
value = jnp.expand_dims(value, -1)
value = jnp.broadcast_to(value, batch_shape + (1,))
log_pmf = self.logits - logsumexp(self.logits, axis=-1, keepdims=True)
log_pmf = jnp.broadcast_to(log_pmf, batch_shape + jnp.shape(log_pmf)[-1:])
return jnp.take_along_axis(log_pmf, value, -1)[..., 0]
@lazy_property
def probs(self):
return _to_probs_multinom(self.logits)
@property
def mean(self):
return jnp.full(self.batch_shape, jnp.nan, dtype=get_dtype(self.logits))
@property
def variance(self):
return jnp.full(self.batch_shape, jnp.nan, dtype=get_dtype(self.logits))
@property
def support(self):
return constraints.integer_interval(0, jnp.shape(self.logits)[-1] - 1)
def enumerate_support(self, expand=True):
values = jnp.arange(self.logits.shape[-1]).reshape((-1,) + (1,) * len(self.batch_shape))
if expand:
values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
return values
def Categorical(probs=None, logits=None, validate_args=None):
if probs is not None:
return CategoricalProbs(probs, validate_args=validate_args)
elif logits is not None:
return CategoricalLogits(logits, validate_args=validate_args)
else:
raise ValueError('One of `probs` or `logits` must be specified.')
class Delta(Distribution):
arg_constraints = {'v': constraints.real, 'log_density': constraints.real}
support = constraints.real
is_discrete = True
def __init__(self, v=0., log_density=0., event_dim=0, validate_args=None, value=None):
if value is not None:
v = value
warnings.warn("`value` argument has been deprecated in favor of `v` argument.",
FutureWarning)
if event_dim > jnp.ndim(v):
raise ValueError('Expected event_dim <= v.dim(), actual {} vs {}'
.format(event_dim, jnp.ndim(v)))
batch_dim = jnp.ndim(v) - event_dim
batch_shape = jnp.shape(v)[:batch_dim]
event_shape = jnp.shape(v)[batch_dim:]
self.v = lax.convert_element_type(v, canonicalize_dtype(jnp.float64))
# NB: following Pyro implementation, log_density should be broadcasted to batch_shape
self.log_density = promote_shapes(log_density, shape=batch_shape)[0]
super(Delta, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape + self.event_shape
return jnp.broadcast_to(device_put(self.v), shape)
@validate_sample
def log_prob(self, value):
log_prob = jnp.log(value == self.v)
log_prob = sum_rightmost(log_prob, len(self.event_shape))
return log_prob + self.log_density
@property
def mean(self):
return self.v
@property
def variance(self):
return jnp.zeros(self.batch_shape + self.event_shape)
def tree_flatten(self):
return (self.v, self.log_density), self.event_dim
@classmethod
def tree_unflatten(cls, aux_data, params):
return cls(*params, event_dim=aux_data)
class OrderedLogistic(CategoricalProbs):
"""
A categorical distribution with ordered outcomes.
**References:**
1. *Stan Functions Reference, v2.20 section 12.6*,
Stan Development Team
:param numpy.ndarray predictor: prediction in real domain; typically this is output
of a linear model.
:param numpy.ndarray cutpoints: positions in real domain to separate categories.
"""
arg_constraints = {'predictor': constraints.real,
'cutpoints': constraints.ordered_vector}
def __init__(self, predictor, cutpoints, validate_args=None):
if jnp.ndim(predictor) == 0:
predictor, = promote_shapes(predictor, shape=(1,))
else:
predictor = predictor[..., None]
predictor, self.cutpoints = promote_shapes(predictor, cutpoints)
self.predictor = predictor[..., 0]
cumulative_probs = expit(cutpoints - predictor)
# add two boundary points 0 and 1
pad_width = | |
<gh_stars>0
import concurrent.futures
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from textwrap import indent
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Set
import pandas as pd
import typer
from more_itertools import chunked
from pydantic.error_wrappers import ValidationError
from cgr_gwas_qc import parsers, validators, yaml
from cgr_gwas_qc.config import config_to_yaml
from cgr_gwas_qc.exceptions import GwasQcValidationError, SampleSheetNullRowError
from cgr_gwas_qc.models.config import Config, ReferenceFiles, UserFiles
from cgr_gwas_qc.reporting import CASE_CONTROL_DTYPE, SEX_DTYPE
app = typer.Typer(add_completion=False)
@dataclass(frozen=True)
class ProblemFile:
Sample_ID: str
reason: str
file_type: Optional[str] = None
filename: Optional[str] = None
@app.command()
def main(
config_file: Path = typer.Option(
"config.yml", help="Path to the configuration file.", exists=True, readable=True
),
no_reference_check: bool = typer.Option(
False,
"--no-reference-files-check",
help="Skip checks of reference files. " "Not suggested.",
),
no_user_files_check: bool = typer.Option(
False, "--no-user-files-check", help="Skip checks of user files. " "Not suggested."
),
no_update_config: bool = typer.Option(
False, "--no-update-config", help="Do not update the config file. " "Not suggested."
),
threads: int = typer.Option(
4, "--threads", "-j", help="Number of theads to use when checking user files."
),
cluster_group_size: int = typer.Option(
1000, help="The number of samples to group to gether when running in cluster mode."
),
):
"""Check all input files to make sure they are readable and complete.
**Included Checks**. ``cgr pre-fight`` first checks the ``config.yml`` file makes sure all config settings are valid.
It then reads the sample sheet (or LIMS manifest file) and checks that all columns defined in the config are present.
These include ``workflow_params.subject_id_column``, ``workflow_params.expected_sex_column``, and ``workflow_params.case_control_column``.
Next it checks all reference files (BPM, VCF, TBI) and makes sure that they exist, are readable, and complete.
Finally, it will search for all IDAT and GTC files if ``user_files.idat_pattern`` and ``user_files.gtc_pattern`` are defined.
Again, it makes sure all files exist, are readable, and are complete.
**File Updates**.
This step also updates the ``config.yml`` file with the ``num_samples`` (from the sample sheet) and the ``num_snps`` (from the BPM file).
**Creates ``cgr_sample_sheet.csv``**.
Finally, this step creates a normalized version of the sample sheet (or LIMs manifest).
This include all of the columns in the sample sheet as well as the following added columns:
- ``Group_By_Subject_ID``: a column with the subject ID to use for subject level qc.
- ``expected_sex``: copies the expected sex column from the config.
- ``case_control``: copies the case control column from the config.
- ``is_internal_control``: a flag indicating if the sample is a CGR internal control (i.e., sVALID-001).
- ``is_user_exclusion``: a flag indicating if the sample was marked to be excluded in the config.
- ``is_missing_idats``: a flag indicating if the sample was missing an IDAT file.
- ``is_missing_gtc``: a flag indicating if the sample was missing its GTC file.
- ``is_sample_exclusion``: a flag indicating if the sample had missing IDAT or GTC files.
- ``num_samples_per_subject``: The number of samples per subject.
- ``replicate_ids``: A concatenated list of Sample_IDs from reach subject.
- ``cluster_group``: Group names used when running on a cluster in the form of ``cgroup#``.
You will almost always run::
$ cgr pre-flight --threads 4
"""
config = check_config(config_file)
ss = check_sample_sheet(
config.sample_sheet,
config.workflow_params.subject_id_column,
config.workflow_params.expected_sex_column,
config.workflow_params.case_control_column,
)
if not no_reference_check:
check_reference_files(config.reference_files)
problem_samples = (
check_user_files(config.user_files, ss, threads) if not no_user_files_check else set()
)
if config.Sample_IDs_to_remove:
# Remove IDs flagged in the config
problem_samples |= {
ProblemFile(Sample_ID, "UserExclusion") for Sample_ID in config.Sample_IDs_to_remove
}
# Create a parsed version of the sample sheet with some custom columns
typer.secho("Saving Updated Sample Sheet to (cgr_sample_sheet.csv)", fg=typer.colors.GREEN)
update_sample_sheet(
ss,
config.workflow_params.subject_id_column,
config.workflow_params.expected_sex_column,
config.workflow_params.case_control_column,
problem_samples,
cluster_group_size,
).to_csv("cgr_sample_sheet.csv", index=False)
# Update the config file with some dynamic settings
if not no_update_config:
# Update the config file with problem samples and re-calculate values
typer.secho("Saving Updated Config to (config.yml)", fg=typer.colors.GREEN)
update_config_file(config, ss)
def check_config(filename: Path) -> Config:
try:
data = yaml.load(filename)
config = Config.parse_obj(data)
except Exception as err:
if isinstance(err, OSError):
msg = err.args[1]
elif isinstance(err, ValidationError):
msg = str(err.args[0][0].exc).replace("\n", " ")
else:
msg = err.args[0]
typer.secho(f"Config ERROR: ({filename.as_posix()})\n\t{msg}\n", fg=typer.colors.RED)
typer.secho("Exiting... Cannot continue without a valid config file.", fg=typer.colors.RED)
raise SystemExit
typer.secho(f"Config OK ({filename.as_posix()})", fg=typer.colors.GREEN)
return config
def check_reference_files(reference_files: ReferenceFiles):
bpm_file = reference_files.illumina_manifest_file
try:
validators.bpm.validate(bpm_file)
typer.secho(f"BPM OK ({bpm_file})", fg=typer.colors.GREEN)
except Exception as err:
msg = err.args[0] if len(err.args) == 1 else err.args[1]
typer.secho(f"BPM ERROR: {msg} ({bpm_file})", fg=typer.colors.RED)
vcf_file = reference_files.thousand_genome_vcf
try:
validators.bgzip.validate(vcf_file)
typer.secho(f"VCF OK ({vcf_file})", fg=typer.colors.GREEN)
except Exception as err:
msg = err.args[0] if len(err.args) == 1 else err.args[1]
typer.secho(f"VCF ERROR: {msg} ({vcf_file})", fg=typer.colors.RED)
tbi_file = reference_files.thousand_genome_tbi
try:
validators.bgzip.validate(tbi_file)
typer.secho(f"VCF.TBI OK ({tbi_file})", fg=typer.colors.GREEN)
except Exception as err:
msg = err.args[0] if len(err.args) == 1 else err.args[1]
typer.secho(f"VCF.TBI ERROR: {msg} ({tbi_file})", fg=typer.colors.RED)
def check_sample_sheet(
filename: Path, subject_id_column: str, expected_sex_column: str, case_control_column: str
) -> pd.DataFrame:
try:
if parsers.sample_sheet.is_sample_manifest(filename):
# User provided a CGR like manifest file
validators.sample_sheet.validate_manifest(
filename, subject_id_column, expected_sex_column, case_control_column
)
df = parsers.sample_sheet.SampleManifest(filename).data
else:
# User provided a plain CSV file
validators.sample_sheet.validate_sample_sheet(
filename, subject_id_column, expected_sex_column, case_control_column
)
df = pd.read_csv(filename)
except SampleSheetNullRowError:
typer.secho(
f"Sample Sheet WARNING: Contains Empty Rows ({filename.as_posix()})",
fg=typer.colors.YELLOW,
)
except Exception as err:
if isinstance(err, FileNotFoundError):
msg = f"FileNotFound ({filename.as_posix()})"
else:
msg = err.args[1]
typer.secho(f"Sample Sheet ERROR: {msg}", fg=typer.colors.RED)
typer.secho("Exiting... Cannot continue without a valid sample sheet.", fg=typer.colors.RED)
raise SystemExit
typer.secho(f"Sample Sheet OK ({filename.as_posix()})", fg=typer.colors.GREEN)
return df
def _filter_list(problems: Iterable[ProblemFile], file_type: str) -> Dict[str, List[str]]:
res = defaultdict(list)
for problem in problems:
if problem.filename and problem.file_type == file_type:
res[problem.reason].append(problem.filename)
return res
def _pretty_print_paths(data: Mapping[str, Sequence[str]]) -> str:
"""For each exception output a list of files nicely."""
output = ""
for k, v in data.items():
output += f" {k}:\n"
files = "\n".join(sorted(v))
output += f"{indent(files, ' - ')}\n"
return output
def _pretty_print_user_problems(problems: Iterable[ProblemFile]):
if idat_red := _filter_list(problems, "idat_red"):
typer.secho(
"IDAT RED ERROR: There was a problem with these files:\n{}".format(
_pretty_print_paths(idat_red)
),
fg=typer.colors.RED,
)
else:
typer.secho("IDAT RED Files OK.", fg=typer.colors.GREEN)
if idat_green := _filter_list(problems, "idat_green"):
typer.secho(
"IDAT GREEN ERROR: There was a problem with these files:\n{}".format(
_pretty_print_paths(idat_green)
),
fg=typer.colors.RED,
)
else:
typer.secho("IDAT GREEN Files OK.", fg=typer.colors.GREEN)
if gtc := _filter_list(problems, "gtc"):
typer.secho(
"GTC ERROR: There was a problem with these files:\n{}".format(_pretty_print_paths(gtc)),
fg=typer.colors.RED,
)
else:
typer.secho("GTC Files OK.", fg=typer.colors.GREEN)
def _check_idat(filename: str, color: str, sample_id: str) -> Optional[ProblemFile]:
try:
validators.idat.validate(Path(filename))
except FileNotFoundError:
return ProblemFile(sample_id, "FileNotFound", f"idat_{color}", filename)
except PermissionError:
return ProblemFile(sample_id, "Permissions", f"idat_{color}", filename)
except GwasQcValidationError as err:
return ProblemFile(sample_id, err.args[0], f"idat_{color}", filename)
return None # No problems
def _check_gtc(filename: str, sample_id: str) -> Optional[ProblemFile]:
try:
validators.gtc.validate(Path(filename))
except FileNotFoundError:
return ProblemFile(sample_id, "FileNotFound", "gtc", filename)
except PermissionError:
return ProblemFile(sample_id, "Permissions", "gtc", filename)
except GwasQcValidationError as err:
return ProblemFile(sample_id, err.args[0], "gtc", filename)
return None # No problems
def _check_user_files(user_files: UserFiles, record: Mapping) -> Set[Optional[ProblemFile]]:
"""Check IDAT and GTC files for a given sample."""
sample_id = record["Sample_ID"]
problems = set()
if user_files.idat_pattern:
red_file = user_files.idat_pattern.red.format(**record)
problems.add(_check_idat(red_file, "red", sample_id))
green_file = user_files.idat_pattern.green.format(**record)
problems.add(_check_idat(green_file, "green", sample_id))
if user_files.gtc_pattern:
gtc_file = user_files.gtc_pattern.format(**record)
problems.add(_check_gtc(gtc_file, sample_id))
return problems
def check_user_files(user_files: UserFiles, ss: pd.DataFrame, threads: int) -> Set[ProblemFile]:
"""Check user files (IDAT, GTC) using multiple threads.
There can be tens of thousands of user files to process here. To speed
things up a bit we use parallel processing. This function spins up
``threads`` processes and runs user files checks for each sample.
Returns:
Sample_IDs that had a problem with either their IDAT or GTC files.
"""
typer.secho("Checking user files for {:,} samples.".format(ss.shape[0]))
with concurrent.futures.ProcessPoolExecutor(threads) as executer:
futures = {
executer.submit(_check_user_files, user_files, record.to_dict())
for _, record in ss.iterrows()
}
with typer.progressbar(length=len(futures), label="Progress:", show_pos=True) as bar:
problem_user_files = set()
for future in concurrent.futures.as_completed(futures):
results = future.result()
problem_user_files |= {problem for problem in results if problem}
bar.update(1)
_pretty_print_user_problems(problem_user_files)
return problem_user_files
def update_config_file(config: Config, ss: pd.DataFrame):
try:
if config.num_snps == 0:
config.num_snps = parsers.illumina.BeadPoolManifest(
config.reference_files.illumina_manifest_file
).num_loci
except Exception:
typer.secho(
" - Problem parsing the illumina manifest file, could not update 'config.num_snp'.",
fg=typer.colors.YELLOW,
)
if config.num_samples == 0:
config.num_samples = ss.shape[0]
config_to_yaml(config)
def _add_group_by_column(df: pd.DataFrame, subject_id_column: str = "Group_By"):
"""Select which column in the sample sheet to use for subject grouping.
This function adds the column `Group_By_Subject_ID` to the sample
sheet object. The sample sheet contains multiple columns with subject
level information. The user defines which column to use in the config
``config.workflow_settings.subject_id_column``.
Recently, Q1 2021, we started adding a | |
# validation work in March 9th
# The general purpose of this script is to generate the result
# published in cancer cell paper with ImmunoPepper
# Link: (https://www.sciencedirect.com/science/article/pii/S1535610818303064)
# validate 2019/03/07
import gzip
import pickle
import argparse
import sys
import os
import numpy as np
import pandas as pd
from immunopepper.constant import NOT_EXIST
from functools import reduce
def find_diff_coord(imm_gene_coord_dict,cancercell_gene_coord_dict):
def fuzzy_comp_str2str(query_coord,another_coord):
return np.sum(np.array(query_coord)-np.array(another_coord)) == 0
def fuzzy_comp_str2list(query_coord,another_coord_list):
if query_coord[2] == NOT_EXIST or len(another_coord_list) == 0:
return ('',False)
possible_match_id_list = np.where(np.array(another_coord_list)[:,0]==query_coord[0])[0]
for possible_match_id in possible_match_id_list:
coord_str_tuple = another_coord_list[possible_match_id]
if coord_str_tuple[3] != NOT_EXIST:
match = fuzzy_comp_str2str(query_coord,coord_str_tuple)
if match:
return (coord_str_tuple,True)
return ('',False)
unique_imm_coord_dict = {}
for i,gene_name in enumerate(imm_gene_coord_dict):
if i % 1000 == 0:
print(i)
imm_coord_set = imm_gene_coord_dict[gene_name]
if gene_name not in cancercell_gene_coord_dict:
cancercell_coord_set = {}
else:
cancercell_coord_set = cancercell_gene_coord_dict[gene_name]
for coord,vertex_id in imm_coord_set:
similar_result,found = fuzzy_comp_str2list(coord,cancercell_coord_set)
if not found:
gene_name_coord_str = gene_name+'_'+'_'.join([str(icoord) for icoord in coord])+'_'+vertex_id
new_or_append_value_to_dict_key(unique_imm_coord_dict,gene_name,gene_name_coord_str)
return unique_imm_coord_dict
def get_cancercell_ref_junction_dict(cancercell_junction_file):
gt_lines = open(cancercell_junction_file,'r').readlines()
ref_junction_dict = {} # (gene_name,coord_str_tuple) |-> peptide
cancercell_gene_coord_dict = {} # gene_name |-> coord_str_tuple
i = 0
while i < len(gt_lines)-1:
headline = gt_lines[i].strip()
items = headline.split('_')
gene_name = items[2]
coord_str_tuple = tuple([int(coord)-1 if a%2 ==0 else int(coord) for a, coord in enumerate(items[-4:])]) # remove coord correction
i += 1
peptide = gt_lines[i].strip()
ref_junction_dict[(gene_name,coord_str_tuple)] = peptide
new_or_append_value_to_dict_key(cancercell_gene_coord_dict,gene_name,coord_str_tuple)
i += 1
return ref_junction_dict,cancercell_gene_coord_dict
def get_cancercell_kmer_dict(cancercell_result_file):
def get_kmer_list(_str, k):
if len(_str) < k:
return [_str]
else:
kmer_list = [_str[i:i + k] for i in range(0, max(0, len(_str) - k + 1))]
return kmer_list
cancercell_pep_lines = open(cancercell_result_file, 'r').readlines()
i = 0
cancercell_kmer_dict = {}
print("Parsing cancercell kmer result file. Need around 1 minute.")
while i < len(cancercell_pep_lines):
line = cancercell_pep_lines[i]
gene_name = line.strip().split('_')[2]
i += 1
pep = cancercell_pep_lines[i].strip()
kmer_list = get_kmer_list(pep, 9)
new_dict = {kmer: gene_name for kmer in kmer_list}
cancercell_kmer_dict = merge_two_dicts(cancercell_kmer_dict, new_dict)
i += 1
return cancercell_kmer_dict
def get_immunopepper_meta_dict(meta_file):
"""
Why we have with_coord and without_coord?
It's due to the current mismatch. Immunopepper peptide.fa only has gene_name+vertex_id
while mat's peptide.fa has gene_name+vertex.
To try to explain the difference of reference peptide output,
we need build with_coord_dict.
However, when we trace back the flag tuple of those additional kmer to explain them,
we need without_coord dict.
In summary, to generate reference comparison result, we need with_coord dict;
for validating other samples and mutation types, we only need without_coord_dict
"""
meta_df = pd.read_csv(meta_file,sep='\t')
meta_flag_dict_key_with_coord = {} # (geneName_fourCoords_vertexId) |-> flag_tuple
meta_flag_dict_key_without_coord = {}
imm_gene_coord_dict = {} # gene_name |->List[(coord_str_tuple,vertex_id)], since one gene_name can have multuple lines
for i in range(len(meta_df)):
gene_name = meta_df['gene_name'][i]
stop_flag = int(meta_df['has_stop_codon'][i])
isolated_flag = int(meta_df['is_isolated'][i])
som_variant_comb_num = int(len(meta_df['variant_comb'][i].split(';')) > 1)
exon_coord = meta_df['exons_coor'][i]
coord_str_tuple = tuple([int(coord) if coord != NOT_EXIST else NOT_EXIST for coord in exon_coord.split(';')])
vertex_id = meta_df['vertex_idx'][i]
key_with_coord = gene_name + '_' + '_'.join(exon_coord.split(';')) + '_' + vertex_id
key_without_coord = gene_name+'_'+vertex_id.split(',')[0]+'_'+vertex_id.split(',')[1]
start_v1 = coord_str_tuple[0]
stop_v1 = coord_str_tuple[1]
is_less_than3_flag = int((int(stop_v1) - int(start_v1)) < 3)
flag_tuple = (stop_flag, isolated_flag, is_less_than3_flag,som_variant_comb_num)
meta_flag_dict_key_with_coord = new_or_append_value_to_dict_key(meta_flag_dict_key_with_coord,key_with_coord,flag_tuple)
meta_flag_dict_key_without_coord = new_or_append_value_to_dict_key(meta_flag_dict_key_without_coord,key_without_coord,flag_tuple)
imm_gene_coord_dict = new_or_append_value_to_dict_key(imm_gene_coord_dict,gene_name,(coord_str_tuple, vertex_id))
return meta_flag_dict_key_with_coord, meta_flag_dict_key_without_coord, imm_gene_coord_dict
def create_reference_cause_data():
"""
Generate reference cause dictionary.
It takes some time to generate so we will store those
require dictionaries as pickle for subsequent use.
Implement a comprehensive comparison between ImmunoPepper's reference_junction
and CancerCell's reference_junction. We want to see how many outputs only
exist in Immunopepper (denote as additional) and how many outputs only exist
in CancerCell (denote as missing).
"""
# load and parse immunopepper result
imm_ref_meta_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/TCGA-13-1489/ref_metadata.tsv.gz'
ref_meta_flag_dict_with_coord, ref_meta_flag_dict_without_coord, imm_gene_coord_dict = get_immunopepper_meta_dict(
imm_ref_meta_file)
# load and parse cancercell result
cancercell_junction_file ='/cluster/work/grlab/projects/TCGA/PanCanAtlas/peptides_neoantigen/' \
'analysis_pancan/ccell_rerun_2018/output/peptides.clean/split/REFERENCE.cj.fa'
ref_junction_dict, cancercell_gene_coord_dict = get_cancercell_ref_junction_dict(cancercell_junction_file)
# get the additional kmer dict (only appear in immunopepper result)
additional_imm_coord_dict = find_diff_coord(imm_gene_coord_dict, cancercell_gene_coord_dict)
# remove isolated case and only keep coord_str_tuple for further use
imm_gene_coord_dict_without_vid = {
gene_name: [coord_vid[0] for coord_vid in filter((lambda x: x[0][2] != NOT_EXIST), coord_vid_tuple_list)] for
gene_name, coord_vid_tuple_list in list(imm_gene_coord_dict.items())}
# get the missing kmer dict (only appear in cancercell paper result)
missing_cancercell_gene_coord_dict = {
gene_name: list(filter((lambda x: x in imm_gene_coord_dict_without_vid[gene_name]), coord_tuple_list)) for
gene_name, coord_tuple_list in list(cancercell_gene_coord_dict.items())}
# we know some of the missing kmers are due to extrapolation
# which means there are 3 identical coords and only one coord is different.
# We mark as True for those cases that can be explained by this.
unique_cancercell_gene_explain_dict = {
gene_name: [3 in np.sum((np.array(coord_tuple) - np.array(imm_gene_coord_dict_without_vid[gene_name])) == 0, axis=1)
for coord_tuple in coord_tuple_list] for gene_name, coord_tuple_list in
list(missing_cancercell_gene_coord_dict.items())}
missing_explain_num = np.sum([sum(item) for item in list(unique_cancercell_gene_explain_dict.values())]) # 65360/65360
# do a simple analysis of the result
additional_junc_pair_num = np.sum([len(item) for item in list(additional_imm_coord_dict.values())]) # 1103270
missing_junc_pair_num = np.sum([len(item) for item in list(missing_cancercell_gene_coord_dict.values())]) # 65360
total_junc_pair_num = np.sum([len(item) for item in list(imm_gene_coord_dict.values())]) # 1622126
total_cancercell_junc_pair_num = np.sum([len(item) for item in list(cancercell_gene_coord_dict.values())]) # 525539
print("{} additional junc pair and {} miss junc pair in {} immuno total junc pairs"
" and {} cancercell junc pairs. {} miss kmer are caused by extrapolation".format(additional_junc_pair_num,
missing_junc_pair_num,
total_junc_pair_num,
total_cancercell_junc_pair_num,
missing_explain_num))
# try to explain the additional output
additional_flag_list = []
additional_gene_name_list = []
for gene_name, additional_junc_coord_list in list(additional_imm_coord_dict.items()):
additional_flag_list.extend(
[reduce((lambda x, y: np.logical_or(x, y)), ref_meta_flag_dict_with_coord[additional_junc_coord]) for additional_junc_coord in
additional_junc_coord_list])
for additional_junc_coord in additional_junc_coord_list:
items = additional_junc_coord.split('_')
vertex_id = items[-1].split(',')
gene_name = items[0]
gene_name_str = '_'.join((gene_name, vertex_id[0], vertex_id[1]))
additional_gene_name_list.append(gene_name_str)
flag_explain_result = np.sum(np.array(additional_flag_list), axis=0)
print("stop codon constributes to {}, isolated contributes to {}, short vertices contributes to {}".format(
flag_explain_result[0], flag_explain_result[1], flag_explain_result[2])) # 826039, 199362, 3224
print("{} can not be explained by the three".format(sum(np.sum(np.array(additional_flag_list), axis=1) == 0))) # 204994
# load and parse reference junction kmer for further use
# It can be used to explain some missing kmers.
imm_ref_junction_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/TCGA-13-1489/ref_junction_kmer.txt'
ref_kmer_dict = {line.split('\t')[0]:line.split('\t')[1] for line in open(imm_ref_junction_file,'r') if line.strip().split('\t')[-1] == 'True'}
ref_kmer_set = set(ref_kmer_dict.keys())
with open('ref_cause_dict2.pkl', 'wb') as f:
pickle.dump((ref_kmer_set, imm_gene_coord_dict, cancercell_gene_coord_dict, additional_imm_coord_dict,
additional_gene_name_list, missing_cancercell_gene_coord_dict), f)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--samples", help="the sample names(can be string or ), can specify more than one sample", required=False, default='TCGA-13-1489')
parser.add_argument("--mutation_mode", help="specify the mutation mdoe", required=False, default='germline')
if len(argv) < 2:
parser.print_help()
sys.exit(1)
pargs = parser.parse_args(argv)
return pargs
def new_or_append_value_to_dict_key(_dict,key,value):
if key in _dict:
_dict[key].append(value)
else:
_dict[key] = [value]
return _dict
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
arg = parse_arguments(sys.argv[1:])
sample_name = arg.samples
mutation_mode = arg.mutation_mode
immunopepper_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/{}/{}_junction_kmer.txt'.format(sample_name,mutation_mode)
cancercell_kmer_file = '/cluster/work/grlab/projects/TCGA/PanCanAtlas/peptides_neoantigen/analysis_pancan/' \
'ccell_rerun_2018/output/peptides.clean/split/cj_kmers/{}.cj.{}.cj_kmers_9.fa'.format(sample_name,mutation_mode)
mutation_meta_gz_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/{}/{}_metadata.tsv.gz'.format(sample_name,mutation_mode)
if mutation_mode == 'somatic_and_germline':
cancercell_kmer_file = '/cluster/work/grlab/projects/TCGA/PanCanAtlas/peptides_neoantigen/analysis_pancan/' \
'ccell_rerun_2018/output/peptides.clean/split/cj_kmers/{}.cj.{}.cj_kmers_9.fa'.format(
sample_name, 'germline_somatic')
aux_germ_immunopepper_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/{}/{}_junction_kmer.txt'.format(
sample_name, 'germline')
aux_germ_immunopepper_dict = {line.split('\t')[0]:line.split('\t')[1] for line in open(aux_germ_immunopepper_file,'r') if line.strip().split('\t')[-1] == 'True'}
aux_germ_meta_gz_file = '/cluster/work/grlab/projects/TCGA/immunopepper_rerun/{}/{}_metadata.tsv.gz'.format(sample_name,'germline')
_, aux_mut_meta_flag_dict_without_coord, _ = get_immunopepper_meta_dict(aux_germ_meta_gz_file)
else:
# In Immunopepper's implementation, in the somatic_and_germline mode (in germline_somatic Immunopepper),
# the reference peptide is the germline-applied while in Matthias', the reference peptide is just the reference.
# Therefore, quite a lot of kmers are already included in 'germline_junction_kmer' and not included in
# 'somatic_germline_junction_kmer'. In Matthias' output, those germline kmers are also included in 'germline_somatic_cj_kmers'
# To compare the two, we need the auxillary dict generated from germline mode.
aux_germ_immunopepper_dict = {}
aux_mut_meta_flag_dict_without_coord = {}
########
# Part 0: load some auxillary function and data
########
if not os.path.exists('ref_cause_dict.pkl'):
print("Reference cause dictionary does not exist. Begin generating and might take 20 minutes...\n")
create_reference_cause_data()
f = open('ref_cause_dict.pkl','rb')
ref_kmer_set,imm_gene_coord_dict,cancercell_gene_coord_dict,additional_imm_coord_dict,additional_gene_name_list,missing_cancercell_gene_coord_dict = pickle.load(f)
# get immunopepper junction metadata dict
_,mut_meta_flag_dict_without_coord,_ = get_immunopepper_meta_dict(mutation_meta_gz_file)
mut_meta_flag_dict_without_coord = merge_two_dicts(mut_meta_flag_dict_without_coord,aux_mut_meta_flag_dict_without_coord)
# get cancercell kmer dict
# kmer: -> gene_name
cancercell_kmer_dict = get_cancercell_kmer_dict(cancercell_kmer_file)
cancercell_kmer_set = set(cancercell_kmer_dict.keys())
# get immunopepper kmer dict, only focus on the cross junction part
# kmer: -> gene_name
immunopepper_dict = {line.split('\t')[0]:line.split('\t')[1] for line in open(immunopepper_file,'r') if line.strip().split('\t')[-1] == 'True'}
immunopepper_dict = merge_two_dicts(immunopepper_dict,aux_germ_immunopepper_dict)
########
# Part 1: get kmer dict
########
# get all kmer returned by immunopepper
immunopepper_kmer = set(immunopepper_dict.keys())
common_kmer_set = cancercell_kmer_set.intersection(immunopepper_kmer)
additional_kmer_list = list(immunopepper_kmer.difference(cancercell_kmer_set))
missing_kmer_list = list(cancercell_kmer_set.difference(immunopepper_kmer))
num_common_kmer = len(common_kmer_set)
num_additional_kmer = len(additional_kmer_list)
num_missing_kmer = len(missing_kmer_list)
s_summary = ">>>>>>>>Validation Start\n\nComparison overview. (additional kmer means imm subtracts mat, miss kmer means mat subtracts imm).\n" \
">> {} common kmers, {} additional kmers and {} miss kmers".format(num_common_kmer,num_additional_kmer,num_missing_kmer)
print(s_summary)
########
# Part 2: Explain missing kmer
########
# case 1: Mutations occur at the extrapolation part
# so the whole peptide is kept alghough they are the same with ref peptide near the cross junction place
# these missing kmers | |
OoOO000.join(timeout=0.1)
ii1Ii11I.append(sum(OoOO000.result))
print ((sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i)) / 1000 / 1000) * 8
del OoOO000
if 1 - 1: II111iiii
OOooooO0Oo = Queue(6)
OO = threading.Thread(target=I1I1I, args=(OOooooO0Oo, files))
iIiIIi1 = threading.Thread(target=O0O, args=(OOooooO0Oo, len(files)))
i1I1iI1iIi111i = timeit.default_timer()
OO.start()
iIiIIi1.start()
while OO.isAlive():
OO.join(timeout=0.1)
while iIiIIi1.isAlive():
iIiIIi1.join(timeout=0.1)
return (sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i))
if 7 - 7: OOO0O - Oo0Ooo - oooO0oo0oOOOO + OOO0O
if 26 - 26: II1Ii1iI1i
class I11iiI1i1(threading.Thread):
if 47 - 47: o0oOoO00o - II1Ii1iI1i.II111iiii + OoooooooOO.i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * II1Ii1iI1i / Oo0Ooo / II1Ii1iI1i
def __init__(self, url, start, size):
self.url = url
oO0 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
O0OO0O = oO0 * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % O0OO0O[0: int(size) - 9]).encode()
del O0OO0O
self.result = None
self.starttime = start
threading.Thread.__init__(self)
if 81 - 81: oooO0oo0oOOOO.o0oOOo0O0Ooo % O0 / I1IiiI - oooO0oo0oOOOO
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not Oo.isSet()):
O0OOo00oo0oOo = O0ooo0O0oo0(self.url, data=self.data)
OoOo0o = urlopen(O0OOo00oo0oOo)
OoOo0o.read(11)
OoOo0o.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
if 43 - 43: i11iIiiIii + Oo0Ooo * II111iiii * o0ooo * O0
if 64 - 64: ooO0oo0oO0 % iIii1I11I1II1 * oooO0oo0oOOOO
def o0iI11I1II(url, sizes, quiet=False):
if 40 - 40: iIii1I11I1II1 / OoOoOO00 % I1ii11iIi11i + II111iiii
if 27 - 27: II111iiii * OoOoOO00 * iIii1I11I1II1
i1I1iI1iIi111i = timeit.default_timer()
if 86 - 86: OoO0O00 * ooO0oo0oO0.o0oOoO00o
def I1I1I(q, sizes):
for iI in sizes:
OoOO000 = I11iiI1i1(url, i1I1iI1iIi111i, iI)
OoOO000.start()
q.put(OoOO000, True)
if not quiet and not Oo.isSet():
sys.stdout.write('.')
sys.stdout.flush()
if 90 - 90: o0ooo % II1Ii1iI1i - iIii1I11I1II1 - iIii1I11I1II1 / i11iIiiIii % I1ii11iIi11i
ii1Ii11I = []
if 37 - 37: oooO0oo0oOOOO - I1IiiI.i111I * II1Ii1iI1i - o0oOoO00o
def O0O(q, total_sizes):
while len(ii1Ii11I) < total_sizes:
OoOO000 = q.get(True)
while OoOO000.isAlive():
OoOO000.join(timeout=0.1)
ii1Ii11I.append(OoOO000.result)
print ((sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i)) / 1000 / 1000) * 8
del OoOO000
if 8 - 8: OoO0O00 - I1IiiI % II1Ii1iI1i * OoooooooOO - OoO0O00 * o0ooo
OOooooO0Oo = Queue(6)
OO = threading.Thread(target=I1I1I, args=(OOooooO0Oo, sizes))
iIiIIi1 = threading.Thread(target=O0O, args=(OOooooO0Oo, len(sizes)))
i1I1iI1iIi111i = timeit.default_timer()
OO.start()
iIiIIi1.start()
while OO.isAlive():
OO.join(timeout=0.1)
while iIiIIi1.isAlive():
iIiIIi1.join(timeout=0.1)
return (sum(ii1Ii11I) / (timeit.default_timer() - i1I1iI1iIi111i))
if 6 - 6: OoooooooOO
if 17 - 17: I1IiiI % o0ooo
def OOOoo(dom, tagName):
O0oO = dom.getElementsByTagName(tagName)[0]
if 73 - 73: I1ii11iIi11i * i11iIiiIii % oooO0oo0oOOOO.I1ii11iIi11i
if 66 - 66: oooO0oo0oOOOO + oooO0oo0oOOOO + OOO0O / o0oOoO00o + ooO0oo0oO0
if 30 - 30: O0
if 44 - 44: oooO0oo0oOOOO / i111I / i111I
if 87 - 87: Oo0Ooo.I1IiiI - II111iiii + O0 / Oo0Ooo / oooO0oo0oOOOO
if 25 - 25: I1IiiI.I1IiiI - OoOoOO00 % OoOoOO00 - i11iIiiIii / o0ooo
return dict(list(O0oO.attributes.items()))
if 51 - 51: Oo0Ooo / OoOoOO00.ooO0oo0oO0 * o0oOOo0O0Ooo + OoO0O00 * I11iii11IIi
if 73 - 73: OoO0O00 + OoooooooOO - O0 - II1Ii1iI1i - II111iiii
def O0Oo0oOOoooOOOOo():
if 62 - 62: OOO0O
if 74 - 74: o0oOoO00o + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % ooO0oo0oO0
if 98 - 98: i111I % i11iIiiIii % OOO0O + II1Ii1iI1i
O0OOo00oo0oOo = O0ooo0O0oo0('https://www.speedtest.net/speedtest-config.php')
O0OOO = Ii(O0OOo00oo0oOo)
if O0OOO is False:
Ii11iI1i('Could not retrieve speedtest.net configuration')
sys.exit(1)
OOoOO0o0o0 = []
while 1:
OOoOO0o0o0.append(O0OOO.read(10240))
if len(OOoOO0o0o0[- 1]) == 0:
break
if int(O0OOO.code) != 200:
return None
O0OOO.close()
try:
try:
ii1I1 = ET.fromstring(''.encode().join(OOoOO0o0o0))
OooooOOoo0 = {
'client': ii1I1.find('client').attrib,
'times': ii1I1.find('times').attrib,
'download': ii1I1.find('download').attrib,
'upload': ii1I1.find('upload').attrib}
except Exception, iii:
xbmc.log('Exception for ET: ' + str(iii), level=xbmc.LOGDEBUG)
ii1I1 = DOM.parseString(''.join(OOoOO0o0o0))
OooooOOoo0 = {
'client': OOOoo(ii1I1, 'client'),
'times': OOOoo(ii1I1, 'times'),
'download': OOOoo(ii1I1, 'download'),
'upload': OOOoo(ii1I1, 'upload')}
except SyntaxError:
Ii11iI1i('Failed to parse speedtest.net configuration')
sys.exit(1)
del ii1I1
del OOoOO0o0o0
return OooooOOoo0
if 35 - 35: i111I % ooO0oo0oO0 - oooO0oo0oOOOO
if 20 - 20: i1IIi - OOO0O
def i1iI(client, all=False):
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % o0oOoO00o * o0oOoO00o * II111iiii
if 29 - 29: OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / ooO0oo0oO0 * iIii1I11I1II1
if 62 - 62: ooO0oo0oO0 / oooO0oo0oOOOO - OoO0O00.i111I
if 11 - 11: I1ii11iIi11i.OoO0O00 * I11iii11IIi * OoooooooOO + OOO0O
IiII111i1i11 = [
'https://www.speedtest.net/speedtest-servers-static.php',
'http://c.speedtest.net/speedtest-servers-static.php',
]
i111iIi1i1II1 = {}
for oooO in IiII111i1i11:
try:
O0OOo00oo0oOo = O0ooo0O0oo0(oooO)
O0OOO = Ii(O0OOo00oo0oOo)
if O0OOO is False:
raise I1II1III11iii
i1I1i111Ii = []
while 1:
i1I1i111Ii.append(O0OOO.read(10240))
if len(i1I1i111Ii[- 1]) == 0:
break
if int(O0OOO.code) != 200:
O0OOO.close()
raise I1II1III11iii
O0OOO.close()
try:
try:
ii1I1 = ET.fromstring(''.encode().join(i1I1i111Ii))
ooo = ii1I1.getiterator('server')
except Exception, iii:
xbmc.log('Exception for ET: ' + str(iii), level=xbmc.LOGDEBUG)
ii1I1 = DOM.parseString(''.join(i1I1i111Ii))
ooo = ii1I1.getElementsByTagName('server')
except SyntaxError:
raise I1II1III11iii
for i1i1iI1iiiI in ooo:
try:
Ooo0oOooo0 = i1i1iI1iiiI.attrib
except AttributeError:
Ooo0oOooo0 = dict(list(i1i1iI1iiiI.attributes.items()))
OOoOO0oo0ooO = iIIIIii1([float(client['lat']),
float(client['lon'])],
[float(Ooo0oOooo0.get('lat')),
float(Ooo0oOooo0.get('lon'))])
Ooo0oOooo0['d'] = OOoOO0oo0ooO
if OOoOO0oo0ooO not in i111iIi1i1II1:
i111iIi1i1II1[OOoOO0oo0ooO] = [Ooo0oOooo0]
else:
i111iIi1i1II1[OOoOO0oo0ooO].append(Ooo0oOooo0)
del ii1I1
del i1I1i111Ii
del ooo
except I1II1III11iii:
continue
if 61 - 61: OoOoOO00 - ooO0oo0oO0 - i1IIi
if 25 - 25: O0 * i111I + I1ii11iIi11i.o0oOOo0O0Ooo.o0oOOo0O0Ooo
if i111iIi1i1II1:
break
if 58 - 58: I1IiiI
if not i111iIi1i1II1:
Ii11iI1i('Failed to retrieve list of speedtest.net servers')
sys.exit(1)
if 53 - 53: i1IIi
o0OOOoO0 = []
for OOoOO0oo0ooO in sorted(i111iIi1i1II1.keys()):
for o0OoOo00o0o in i111iIi1i1II1[OOoOO0oo0ooO]:
o0OOOoO0.append(o0OoOo00o0o)
if len(o0OOOoO0) == 5 and not all:
break
else:
continue
break
if 41 - 41: OOO0O % OoO0O00 - Oo0Ooo * o0ooo * Oo0Ooo
del i111iIi1i1II1
return o0OOOoO0
if 69 - 69: ooO0oo0oO0 - OoooooooOO + o0oOOo0O0Ooo - i111I
if 23 - 23: i11iIiiIii
def II1iIi11(servers):
if 12 - 12: II1Ii1iI1i + i11iIiiIii * iIii1I11I1II1 / I1ii11iIi11i.i111I
if 5 - 5: i1IIi + I11iii11IIi / o0oOOo0O0Ooo.o0oOoO00o / i111I
if 32 - 32: I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: o0ooo * OoO0O00 - OOO0O + ooO0oo0oO0 * I1IiiI % OoO0O00
iI1i111I1Ii = {}
for i1i1iI1iiiI in servers:
i11i1ii1I = []
oooO = '%s/latency.txt' % os.path.dirname(i1i1iI1iiiI['url'])
o0OO0o0o00o = urlparse(oooO)
for OOOO in range(0, 3):
try:
if o0OO0o0o00o[0] == 'https':
oOo0 = HTTPSConnection(o0OO0o0o00o[1])
else:
oOo0 = HTTPConnection(o0OO0o0o00o[1])
OOOoOO = {'User-Agent': o00ooooO0oO}
i1I1iI1iIi111i = timeit.default_timer()
oOo0.request("GET", o0OO0o0o00o[2], headers=OOOoOO)
I11IIIi = oOo0.getresponse()
iIIiiI1II1i11 = (timeit.default_timer() - i1I1iI1iIi111i)
except (HTTPError, URLError, socket.error):
i11i1ii1I.append(3600)
continue
o0o0 = I11IIIi.read(9)
if int(I11IIIi.status) == 200 and o0o0 == 'test=test'.encode():
i11i1ii1I.append(iIIiiI1II1i11)
else:
i11i1ii1I.append(3600)
oOo0.close()
IIii1111 = round((sum(i11i1ii1I) / 6) * 1000, 3)
iI1i111I1Ii[IIii1111] = i1i1iI1iiiI
I1iI = sorted(iI1i111I1Ii.keys())[0]
IIIIiIiIi1 = iI1i111I1Ii[I1iI]
IIIIiIiIi1['latency'] = I1iI
if 2 - 2: o0oOoO00o % iIii1I11I1II1 * iIii1I11I1II1.o0oOOo0O0Ooo / o0oOoO00o
return IIIIiIiIi1
if 27 - 27: OoO0O00 + OOO0O - i1IIi
if 69 - 69: I11iii11IIi - O0 % I1ii11iIi11i + i11iIiiIii.OoOoOO00 / OoO0O00
def OoOoo00Ooo00(signum, frame):
if 57 - 57: o0ooo
if 32 - 32: II1Ii1iI1i - Oo0Ooo % OoooooooOO.o0oOoO00o / I11iii11IIi + I1IiiI
if 76 - 76: OOO0O
if 73 - 73: O0 * o0oOoO00o + II1Ii1iI1i + OOO0O
global Oo
Oo.set()
raise SystemExit('\nCancelling...')
if 40 - 40: II111iiii.OoOoOO00 * o0ooo + ooO0oo0oO0 + ooO0oo0oO0
if 9 - 9: i111I % OoooooooOO.oooO0oo0oOOOO % i111I
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / o0ooo.o0ooo
def OOoo(list=False, mini=None, server=None, share=False, simple=False, src=None, timeout=10, units=('bit', 8),
version=False):
iIIiiiI = xbmcgui.DialogProgress()
oo0 = [' ', ' ', ' ']
iIIiiiI.create(Iii1ii1II11i + ' - Powered by SpeedTest.net', oo0[0], oo0[1], oo0[2])
iIIiiiI.update(0, oo0[0], oo0[1], oo0[2])
if 34 - 34: I1IiiI % o0oOoO00o + OOO0O * iIii1I11I1II1
if 33 - 33: I1IiiI / OOO0O * ooO0oo0oO0 / I1ii11iIi11i + Oo0Ooo / o0oOoO00o
if 40 - 40: I1ii11iIi11i
global Oo, oOoOo00oOo
Oo = threading.Event()
if 60 - 60: I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
if 70 - 70: OoO0O00 % oooO0oo0oOOOO + ooO0oo0oO0 / II1Ii1iI1i % O0
if 100 - 100: o0oOOo0O0Ooo + ooO0oo0oO0 * o0oOOo0O0Ooo
oOOo0OOOo00O = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - II1Ii1iI1i + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1.OOO0O + iIii1I11I1II1
if 95 - 95: I1IiiI
if version:
version()
if 46 - 46: OoOoOO00 + OoO0O00
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract size information for chrome, executed by buildbot.
When this is run, the current directory (cwd) should be the outer build
directory (e.g., chrome-release/build/).
For a list of command-line options, call this script with '--help'.
"""
import errno
import glob
import json
import platform
import optparse
import os
import re
import stat
import subprocess
import sys
import tempfile
from slave import build_directory
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..'))
# Add Catapult to the path so we can import the chartjson-histogramset
# conversion.
sys.path.append(os.path.join(SRC_DIR, 'third_party', 'catapult', 'tracing'))
from tracing.value import convert_chart_json
class ResultsCollector(object):
def __init__(self):
self.results = {}
def add_result(self, name, identifier, value, units):
assert name not in self.results
self.results[name] = {
'identifier': identifier,
'value': int(value),
'units': units
}
# Legacy printing, previously used for parsing the text logs.
print 'RESULT %s: %s= %s %s' % (name, identifier, value, units)
def get_size(filename):
return os.stat(filename)[stat.ST_SIZE]
def get_linux_stripped_size(filename):
EU_STRIP_NAME = 'eu-strip'
# Assumes |filename| is in out/Release
# build/linux/bin/eu-strip'
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(filename)))
eu_strip_path = os.path.join(src_dir, 'build', 'linux', 'bin', EU_STRIP_NAME)
if (platform.architecture()[0] == '64bit' or
not os.path.exists(eu_strip_path)):
eu_strip_path = EU_STRIP_NAME
with tempfile.NamedTemporaryFile() as stripped_file:
strip_cmd = [eu_strip_path, '-o', stripped_file.name, filename]
result = 0
result, _ = run_process(result, strip_cmd)
if result != 0:
return (result, 0)
return (result, get_size(stripped_file.name))
def run_process(result, command):
p = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
if p.returncode != 0:
print 'ERROR from command "%s": %d' % (' '.join(command), p.returncode)
if result == 0:
result = p.returncode
return result, stdout
def main_mac(options, args, results_collector):
"""Print appropriate size information about built Mac targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
size_path = 'size'
# If there's a hermetic download of Xcode, directly invoke 'size' from it. The
# hermetic xcode binaries aren't a full Xcode install, so we can't modify
# DEVELOPER_DIR.
hermetic_size_path = os.path.join(
SRC_DIR, 'build', 'mac_files', 'xcode_binaries', 'Contents',
'Developer', 'Toolchains', 'XcodeDefault.xctoolchain', 'usr', 'bin',
'size')
if os.path.exists(hermetic_size_path):
size_path = hermetic_size_path
result = 0
# Work with either build type.
base_names = ('Chromium', 'Google Chrome')
for base_name in base_names:
app_bundle = base_name + '.app'
framework_name = base_name + ' Framework'
framework_bundle = framework_name + '.framework'
framework_dsym_bundle = framework_name + '.dSYM'
framework_unstripped_name = framework_name + '.unstripped'
chromium_app_dir = os.path.join(target_dir, app_bundle)
chromium_executable = os.path.join(chromium_app_dir,
'Contents', 'MacOS', base_name)
chromium_framework_dir = os.path.join(target_dir, framework_bundle)
chromium_framework_executable = os.path.join(chromium_framework_dir,
framework_name)
chromium_framework_dsym_dir = os.path.join(target_dir,
framework_dsym_bundle)
chromium_framework_dsym = os.path.join(chromium_framework_dsym_dir,
'Contents', 'Resources', 'DWARF',
framework_name)
chromium_framework_unstripped = os.path.join(target_dir,
framework_unstripped_name)
if os.path.exists(chromium_executable):
print_dict = {
# Remove spaces in the names so any downstream processing is less
# likely to choke.
'app_name' : re.sub(r'\s', '', base_name),
'app_bundle' : re.sub(r'\s', '', app_bundle),
'framework_name' : re.sub(r'\s', '', framework_name),
'framework_bundle' : re.sub(r'\s', '', framework_bundle),
'app_size' : get_size(chromium_executable),
'framework_size' : get_size(chromium_framework_executable),
'framework_dsym_name' : re.sub(r'\s', '', framework_name) + 'Dsym',
'framework_dsym_size' : get_size(chromium_framework_dsym),
}
# Collect the segment info out of the App
result, stdout = run_process(result, [size_path, chromium_executable])
print_dict['app_text'], print_dict['app_data'], print_dict['app_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the segment info out of the Framework
result, stdout = run_process(result, [size_path,
chromium_framework_executable])
print_dict['framework_text'], print_dict['framework_data'], \
print_dict['framework_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the whole size of the App bundle on disk (include the framework)
result, stdout = run_process(result, ['du', '-s', '-k', chromium_app_dir])
du_s = re.search(r'(\d+)', stdout).group(1)
print_dict['app_bundle_size'] = (int(du_s) * 1024)
results_collector.add_result(
print_dict['app_name'], print_dict['app_name'],
print_dict['app_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['app_name'], '__TEXT',
print_dict['app_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['app_name'], '__DATA',
print_dict['app_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['app_name'], '__OBJC',
print_dict['app_objc'], 'bytes')
results_collector.add_result(
print_dict['framework_name'], print_dict['framework_name'],
print_dict['framework_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['framework_name'], '__TEXT',
print_dict['framework_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['framework_name'], '__DATA',
print_dict['framework_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['framework_name'], '__OBJC',
print_dict['framework_objc'], 'bytes')
results_collector.add_result(
print_dict['app_bundle'], print_dict['app_bundle'],
print_dict['app_bundle_size'], 'bytes')
results_collector.add_result(
print_dict['framework_dsym_name'], print_dict['framework_dsym_name'],
print_dict['framework_dsym_size'], 'bytes')
# Found a match, don't check the other base_names.
return result
# If no base_names matched, fail script.
return 66
def check_linux_binary(target_dir, binary_name, options, results_collector):
"""Collect appropriate size information about the built Linux binary given.
Returns a tuple (result, sizes). result is the first non-zero exit
status of any command it executes, or zero on success. sizes is a list
of tuples (name, identifier, totals_identifier, value, units).
The printed line looks like:
name: identifier= value units
When this same data is used for totals across all the binaries, then
totals_identifier is the identifier to use, or '' to just use identifier.
"""
binary_file = os.path.join(target_dir, binary_name)
if not os.path.exists(binary_file):
# Don't print anything for missing files.
return 0, []
result = 0
sizes = []
sizes.append((binary_name, binary_name, 'size',
get_size(binary_file), 'bytes'))
result, stripped_size = get_linux_stripped_size(binary_file)
sizes.append((binary_name + '-stripped', 'stripped', 'stripped',
stripped_size, 'bytes'))
result, stdout = run_process(result, ['size', binary_file])
text, data, bss = re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
sizes += [
(binary_name + '-text', 'text', '', text, 'bytes'),
(binary_name + '-data', 'data', '', data, 'bytes'),
(binary_name + '-bss', 'bss', '', bss, 'bytes'),
]
# Determine if the binary has the DT_TEXTREL marker.
result, stdout = run_process(result, ['readelf', '-Wd', binary_file])
if re.search(r'\bTEXTREL\b', stdout) is None:
# Nope, so the count is zero.
count = 0
else:
# There are some, so count them.
result, stdout = run_process(result, ['eu-findtextrel', binary_file])
count = stdout.count('\n')
sizes.append((binary_name + '-textrel', 'textrel', '', count, 'relocs'))
return result, sizes
def main_linux(options, args, results_collector):
"""Print appropriate size information about built Linux targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
binaries = [
'chrome',
'nacl_helper',
'nacl_helper_bootstrap',
'libffmpegsumo.so',
'libgcflashplayer.so',
'libppGoogleNaClPluginChrome.so',
]
result = 0
totals = {}
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options,
results_collector)
if result == 0:
result = this_result
for name, identifier, totals_id, value, units in this_sizes:
results_collector.add_result(name, identifier, value, units)
totals_id = totals_id or identifier, units
totals[totals_id] = totals.get(totals_id, 0) + int(value)
files = [
'nacl_irt_x86_64.nexe',
'resources.pak',
]
for filename in files:
path = os.path.join(target_dir, filename)
try:
size = get_size(path)
except OSError, e:
if e.errno == errno.ENOENT:
continue # Don't print anything for missing files.
raise
results_collector.add_result(filename, filename, size, 'bytes')
totals['size', 'bytes'] += size
# TODO(mcgrathr): This should all be refactored so the mac and win flavors
# also deliver data structures rather than printing, and the logic for
# the printing and the summing totals is shared across all three flavors.
for (identifier, units), value in sorted(totals.iteritems()):
results_collector.add_result(
'totals-%s' % identifier, identifier, value, units)
return result
def check_android_binaries(binaries, target_dir, options, results_collector,
binaries_to_print=None):
"""Common method for printing size information for Android targets.
Prints size information for each element of binaries in target_dir.
If binaries_to_print is specified, the name of each binary from
binaries is replaced with corresponding element of binaries_to_print
in output. Returns the first non-zero exit status of any command it
executes, or zero on success.
"""
result = 0
if not binaries_to_print:
binaries_to_print = binaries
for (binary, binary_to_print) in zip(binaries, binaries_to_print):
this_result, this_sizes = check_linux_binary(target_dir, binary, options,
results_collector)
if result == 0:
result = this_result
for name, identifier, _, value, units in this_sizes:
name = name.replace('/', '_').replace(binary, binary_to_print)
identifier = identifier.replace(binary, binary_to_print)
results_collector.add_result(name, identifier, value, units)
return result
def main_android(options, args, results_collector):
"""Print appropriate size information about built Android targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = [
'chrome_public_apk/libs/armeabi-v7a/libchrome.so',
'lib/libchrome.so',
'libchrome.so',
]
return check_android_binaries(binaries, target_dir, options,
results_collector)
def main_android_webview(options, args, results_collector):
"""Print appropriate size information about Android WebViewChromium targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['lib/libwebviewchromium.so',
'libwebviewchromium.so']
return check_android_binaries(binaries, target_dir, options,
results_collector)
def main_android_cronet(options, args, results_collector):
"""Print appropriate size information about Android Cronet targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
# Use version in binary file name, but not in printed output.
binaries_with_paths = glob.glob(os.path.join(target_dir,'libcronet.*.so'))
num_binaries = len(binaries_with_paths)
assert num_binaries == 1, "Got %d binaries" | |
def set_Z_lims(self, lower=None, upper=None, extra_percent=0.0):
if lower is None:
lower = self.alt_limits[0]
if upper is None:
upper = self.alt_limits[1]
lower -= (upper-lower)*extra_percent
upper += (upper-lower)*extra_percent
self.coordinate_system.set_plotZ(lower, upper)
self.AltVsEw_axes.set_ylim([lower, upper])
self.NsVsAlt_axes.set_xlim([lower, upper])
# self.ancillary_axes.set_ylim( self.alt_limits )
def set_Zt_lims(self, lower=None, upper=None, extra_percent=0.0):
if lower is None:
lower = self.alt_limits[0]
if upper is None:
upper = self.alt_limits[1]
lower -= (upper-lower)*extra_percent
upper += (upper-lower)*extra_percent
self.coordinate_system.set_plotZt(lower, upper)
self.AltVsEw_axes.set_ylim([lower, upper])
self.NsVsAlt_axes.set_xlim([lower, upper])
# def set_X_lims(self, lower=None, upper=None, extra_percent=0.0):
# print('set lims depreciated')
# if lower is None:
# lower = self.X_limits[0]
# if upper is None:
# upper = self.X_limits[1]
#
# lower -= (upper-lower)*extra_percent
# upper += (upper-lower)*extra_percent
#
# self.coordinate_system.set_plotX(lower, upper)
#
# self.NsVsEw_axes.set_xlim( [lower, upper] )
# self.AltVsEw_axes.set_xlim( [lower, upper])
#
# def set_Y_lims(self, lower=None, upper=None, extra_percent=0.0):
# print('set lims depreciated')
# if lower is None:
# lower = self.Y_limits[0]
# if upper is None:
# upper = self.Y_limits[1]
#
# lower -= (upper-lower)*extra_percent
# upper += (upper-lower)*extra_percent
#
# self.coordinate_system.set_plotY(lower, upper)
#
# self.NsVsAlt_axes.set_ylim( [lower, upper] )
# self.NsVsEw_axes.set_ylim( [lower, upper] )
def set_just_X_lims(self, lower=None, upper=None, extra_percent=0.0):
if lower is None:
lower = self.X_limits[0]
if upper is None:
upper = self.X_limits[1]
lower -= (upper-lower)*extra_percent
upper += (upper-lower)*extra_percent
self.coordinate_system.set_plotX(lower, upper)
if self.rebalance_XY: ### if this is true, then we want to scale X and Y axis so that teh aspect ratio 1...I think
bbox = self.NsVsEw_axes.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
self.window_width = bbox.width
self.window_height = bbox.width
self.coordinate_system.rebalance_Y(self.window_width, self.window_height)
Ylims = self.coordinate_system.get_plotY()
self.NsVsAlt_axes.set_ylim( Ylims )
self.NsVsEw_axes.set_ylim( Ylims )
self.NsVsEw_axes.set_xlim( [lower, upper] )
self.AltVsEw_axes.set_xlim( [lower, upper])
def set_just_Y_lims(self, lower=None, upper=None, extra_percent=0.0):
if lower is None:
lower = self.Y_limits[0]
if upper is None:
upper = self.Y_limits[1]
lower -= (upper-lower)*extra_percent
upper += (upper-lower)*extra_percent
self.coordinate_system.set_plotY(lower, upper)
if self.rebalance_XY: ### if this is true, then we want to scale X and Y axis so that teh aspect ratio 1...I think
bbox = self.NsVsEw_axes.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
self.window_width = bbox.width
self.window_height = bbox.width
self.coordinate_system.rebalance_X(self.window_width, self.window_height)
Xlims = self.coordinate_system.get_plotX()
self.NsVsEw_axes.set_xlim( Xlims )
self.AltVsEw_axes.set_xlim( Xlims)
self.NsVsAlt_axes.set_ylim( [lower, upper] )
self.NsVsEw_axes.set_ylim( [lower, upper] )
def set_XY_lims(self, lowerX=None, upperX=None, lowerY=None, upperY=None,extra_percent=0.0):
if lowerX is None:
lowerX = self.X_limits[0]
if upperX is None:
upperX = self.X_limits[1]
if lowerY is None:
lowerY = self.Y_limits[0]
if upperY is None:
upperY = self.Y_limits[1]
lowerX -= (upperX-lowerX)*extra_percent
upperX += (upperX-lowerX)*extra_percent
lowerY -= (upperY-lowerY)*extra_percent
upperY += (upperY-lowerY)*extra_percent
self.coordinate_system.set_plotX(lowerX, upperX)
self.coordinate_system.set_plotY(lowerY, upperY)
if self.rebalance_XY: ### if this is true, then we want to scale X and Y axis so that teh aspect ratio 1...I think
bbox = self.NsVsEw_axes.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
self.window_width = bbox.width
self.window_height = bbox.width
self.coordinate_system.rebalance_XY(self.window_width, self.window_height)
lowerX, upperX = self.coordinate_system.get_plotX()
lowerY, upperY = self.coordinate_system.get_plotY()
self.NsVsEw_axes.set_xlim( [lowerX, upperX] )
self.AltVsEw_axes.set_xlim( [lowerX, upperX])
self.NsVsAlt_axes.set_ylim( [lowerY, upperY] )
self.NsVsEw_axes.set_ylim( [lowerY, upperY] )
def replot_data(self):
### quick hacks ###
self.AltVsT_axes.cla()
self.AltVsEw_axes.cla()
self.NsVsEw_axes.cla()
self.NsVsAlt_axes.cla()
self.ancillary_axes.cla()
self.ancillary_axes.set_axis_off() ## probably should test if this needs to be on at some point
self.AltVsT_axes.set_xlabel(self.coordinate_system.t_label, fontsize=self.axis_label_size)
self.AltVsT_axes.set_ylabel(self.coordinate_system.z_label, fontsize=self.axis_label_size)
self.AltVsEw_axes.set_ylabel(self.coordinate_system.zt_label, fontsize=self.axis_label_size)
self.NsVsEw_axes.set_xlabel(self.coordinate_system.x_label, fontsize=self.axis_label_size)
self.NsVsEw_axes.set_ylabel(self.coordinate_system.y_label, fontsize=self.axis_label_size)
self.NsVsAlt_axes.set_xlabel(self.coordinate_system.zt_label, fontsize=self.axis_label_size)
#### create button press/release events
self.key_press = self.fig.canvas.mpl_connect('key_press_event', self.key_press_event)
self.key_release = self.fig.canvas.mpl_connect('key_release_event', self.key_release_event)
self.button_press = self.fig.canvas.mpl_connect('button_press_event', self.button_press_event) ##mouse button
self.button_release = self.fig.canvas.mpl_connect('button_release_event', self.button_release_event) ##mouse button
print()
for DS in self.data_sets:
DS.plot( self.AltVsT_axes, self.AltVsEw_axes, self.NsVsEw_axes, self.NsVsAlt_axes, self.ancillary_axes, self.coordinate_system )
#### set limits ####
X, Y, Z, Zt, T = self.coordinate_system.get_limits_plotCoords()
self.NsVsEw_axes.set_xlim( X )
self.AltVsEw_axes.set_xlim( X )
self.NsVsAlt_axes.set_ylim( Y )
self.NsVsEw_axes.set_ylim( Y )
self.AltVsT_axes.set_ylim( Zt )
self.AltVsEw_axes.set_ylim( Z )
self.NsVsAlt_axes.set_xlim( Z )
# self.ancillary_axes.set_ylim( self.alt_limits )
self.AltVsT_axes.set_xlim( T )
#### create selectors on plots
# self.TAlt_selector_rect = None
self.TAlt_selector_rect = RectangleSelector(self.AltVsT_axes, self.TAlt_selector, useblit=False,
rectprops=dict(alpha=0.5, facecolor='red'), button=1, state_modifier_keys={'move':'', 'clear':'', 'square':'', 'center':''})
# self.XAlt_selector_rect = None
self.XAlt_selector_rect = RectangleSelector(self.AltVsEw_axes, self.XAlt_selector, useblit=False,
rectprops=dict(alpha=0.5, facecolor='red'), button=1, state_modifier_keys={'move':'', 'clear':'', 'square':'', 'center':''})
# self.XY_selector_rect = None
self.XY_selector_rect = RectangleSelector(self.NsVsEw_axes, self.XY_selector, useblit=False,
rectprops=dict(alpha=0.5, facecolor='red'), button=1, state_modifier_keys={'move':'', 'clear':'', 'square':'', 'center':''})
# self.YAlt_selector_rect = None
self.YAlt_selector_rect = RectangleSelector(self.NsVsAlt_axes, self.AltY_selector, useblit=False,
rectprops=dict(alpha=0.5, facecolor='red'), button=1, state_modifier_keys={'move':'', 'clear':'', 'square':'', 'center':''})
#### calbacks for various events
def TAlt_selector(self, eclick, erelease):
minT = min(eclick.xdata, erelease.xdata)
maxT = max(eclick.xdata, erelease.xdata)
minAlt = min(eclick.ydata, erelease.ydata)
maxAlt = max(eclick.ydata, erelease.ydata)
if minT == maxT or minAlt==maxAlt:
self.mouse_move = False
return
self.mouse_move = True
self.previous_view_states.append( self.previous_view_state( self.coordinate_system.get_limits_plotCoords() ) )
if len(self.previous_view_states) > self.previous_depth:
N = len(self.previous_view_states) - self.previous_depth
self.previous_view_states = self.previous_view_states[N:]
if self.z_button_pressed: ## then we zoom out,
Ztlims = self.coordinate_system.get_plotZt()
Tlims = self.coordinate_system.get_plotT()
minT = 2.0*Tlims[0] - minT
maxT = 2.0*Tlims[1] - maxT
minAlt = 2.0*Ztlims[0] - minAlt
maxAlt = 2.0*Ztlims[1] - maxAlt
self.set_T_lims(minT, maxT)
self.set_Zt_lims(minAlt, maxAlt)
self.replot_data()
self.draw()
def XAlt_selector(self, eclick, erelease):
minX = min(eclick.xdata, erelease.xdata)
maxX = max(eclick.xdata, erelease.xdata)
minA = min(eclick.ydata, erelease.ydata)
maxA = max(eclick.ydata, erelease.ydata)
if minA==maxA or minX==maxX:
self.mouse_move = False
return
self.mouse_move = True
self.previous_view_states.append( self.previous_view_state( self.coordinate_system.get_limits_plotCoords() ) )
if len(self.previous_view_states) > self.previous_depth:
N = len(self.previous_view_states) - self.previous_depth
self.previous_view_states = self.previous_view_states[N:]
if self.z_button_pressed:
Xlims = self.coordinate_system.get_plotX()
Zlims = self.coordinate_system.get_plotZ()
minA = 2.0*Zlims[0] - minA
maxA = 2.0*Zlims[1] - maxA
minX = 2.0*Xlims[0] - minX
maxX = 2.0*Xlims[1] - maxX
self.set_Z_lims(minA, maxA)
self.set_just_X_lims(minX, maxX)
self.replot_data()
self.draw()
def AltY_selector(self, eclick, erelease):
minA = min(eclick.xdata, erelease.xdata)
maxA = max(eclick.xdata, erelease.xdata)
minY = min(eclick.ydata, erelease.ydata)
maxY = max(eclick.ydata, erelease.ydata)
if minA==maxA or minY==maxY:
self.mouse_move = False
return
self.mouse_move = True
self.previous_view_states.append( self.previous_view_state( self.coordinate_system.get_limits_plotCoords() ) )
if len(self.previous_view_states) > self.previous_depth:
N = len(self.previous_view_states) - self.previous_depth
self.previous_view_states = self.previous_view_states[N:]
if self.z_button_pressed:
Ylims = self.coordinate_system.get_plotY()
Zlims = self.coordinate_system.get_plotZ()
minA = 2.0*Zlims[0] - minA
maxA = 2.0*Zlims[1] - maxA
minY = 2.0*Ylims[0] - minY
maxY = 2.0*Ylims[1] - maxY
self.set_Z_lims(minA, maxA)
self.set_just_Y_lims(minY, maxY)
self.replot_data()
self.draw()
def XY_selector(self, eclick, erelease):
minX = min(eclick.xdata, erelease.xdata)
maxX = max(eclick.xdata, erelease.xdata)
minY = min(eclick.ydata, erelease.ydata)
maxY = max(eclick.ydata, erelease.ydata)
if minX==maxX or minY==maxY:
self.mouse_move = False
else:
self.mouse_move = True
self.previous_view_states.append( self.previous_view_state( self.coordinate_system.get_limits_plotCoords() ) )
if len(self.previous_view_states) > self.previous_depth:
N = len(self.previous_view_states) - self.previous_depth
self.previous_view_states = self.previous_view_states[N:]
if self.z_button_pressed:
Xlims = self.coordinate_system.get_plotX()
Ylims = self.coordinate_system.get_plotY()
minX = 2.0*Xlims[0] - minX
maxX = 2.0*Xlims[1] - maxX
minY = 2.0*Ylims[0] - minY
maxY = 2.0*Ylims[1] - maxY
self.set_XY_lims(minX, maxX, minY, maxY)
self.replot_data()
self.draw()
def key_press_event(self, event):
self.key_press_callback( event )
# print "key press:", event.key
if event.key == 'z':
self.z_button_pressed = True
elif event.key == 'c':
print(event.inaxes)
print(event.xdata)
print(event.ydata)
def key_release_event(self, event):
# print "key release:", event.key
if event.key == 'z':
self.z_button_pressed = False
def button_press_event(self, event):
# print "mouse pressed:", event
if event.button == 2 and len(self.previous_view_states)>0: ##middle mouse, back up
previous_state = self.previous_view_states.pop(-1)
X, Y, Z, Zt, T = previous_state.limits
self.set_XY_lims(X[0], X[1], Y[0], Y[1])
self.set_Z_lims(Z[0], Z[1])
self.set_Zt_lims(Zt[0], Zt[1])
self.set_T_lims(T[0], T[1])
self.replot_data()
self.draw()
elif event.button == 3: ##right mouse, record location for drag
self.right_mouse_button_location = [event.xdata, event.ydata]
self.right_button_axis = event.inaxes
def button_release_event(self, event):
if event.button == 1:
pass
# if len(self.data_sets)>0 and self.data_sets[0].name == "arrow" and not self.mouse_move:
# print("mouse moved:", self.mouse_move)
# C_X, C_Y, C_Z, C_T = self.coordinate_system.transform( [self.data_sets[0].XYZT[0]], [self.data_sets[0].XYZT[1]], [self.data_sets[0].XYZT[2]], [self.data_sets[0].XYZT[3]] )
# if event.inaxes is self.AltVsT_axes:
# C_T[0] = event.xdata
# C_Z[0] = event.ydata
# elif event.inaxes is self.NsVsEw_axes:
# C_X[0] = event.xdata
# C_Y[0] = event.ydata
# elif event.inaxes is self.AltVsEw_axes:
# C_X[0] = event.xdata
# C_Z[0] = event.ydata
# elif event.inaxes is self.NsVsAlt_axes:
# C_Z[0] = event.xdata
# C_Y[0] = event.ydata
#
# minX, minY, minZ, minT = self.coordinate_system.invert( C_X, C_Y, C_Z, C_T )
# self.data_sets[0].XYZT[0] = minX[0]
# self.data_sets[0].XYZT[1] = minY[0]
# self.data_sets[0].XYZT[2] = minZ[0]
# self.data_sets[0].XYZT[3] = minT[0]
# self.data_sets[0].set_arrow()
# self.replot_data()
# self.draw()
elif event.button == 3: ##drag
if event.inaxes != self.right_button_axis: return
deltaX = self.right_mouse_button_location[0] - event.xdata
deltaY = self.right_mouse_button_location[1] - event.ydata
lims = self.coordinate_system.get_limits_plotCoords()
self.previous_view_states.append( self.previous_view_state( lims ) )
Xlims, Ylims, Zlims, Ztlims, Tlims = lims
if event.inaxes is self.AltVsT_axes:
self.set_T_lims( Tlims[0] + deltaX, Tlims[1] + deltaX)
self.set_Zt_lims( Ztlims[0] + deltaY, | |
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
import re
import io
import ast
import requests
import numpy as np
import pandas as pd
import random
import multiprocessing
import threading, queue
from decimal import Decimal
from ttictoc import tic, toc
from typing import Any, Text, Dict, List, Union, Optional
from rasa_sdk import Action, Tracker
from rasa_sdk import FormValidationAction
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.types import DomainDict
from rasa_sdk.executor import CollectingDispatcher
import warnings
from statistics import mean
from os import path, getenv
from datetime import datetime
import matplotlib.pyplot as plt
from botocore.exceptions import ClientError
from boto3.exceptions import S3UploadFailedError
import boto3
from sqlalchemy import create_engine
import sqlalchemy.types as sql_types
DB_AWS_ACCESS_KEY_ID = getenv('DB_AWS_ACCESS_KEY_ID')
DB_AWS_SECRET_ACCESS_KEY = getenv('DB_AWS_SECRET_ACCESS_KEY')
DB_AWS_BUCKET = 'journeypic'
# ------------------------------------------------------------------
def connect_to_server(params_dict, logging_func=print, debug=False):
connit = params_dict['connit_type'] + '://' + params_dict['connit_user'] + ':' \
+ params_dict['connit_pass'] + '@' \
+ params_dict['connit_host'] + ':' \
+ params_dict['connit_port'] + '/' \
+ params_dict['connit_db']
if debug:
logging_func(connit)
sql_engine = create_engine(connit, echo=False)
try:
sql_engine.connect()
logging_func("Connected Successfully")
except Exception as e:
logging_func("Error connecting to SQL server!\n\n%s\n" % str(e))
raise (e)
return sql_engine
def read_table(sql_engine, sql_query, logging_func=print, debug=False):
df = pd.read_sql(sql_query, sql_engine)
if debug:
match1 = search('FROM (.*) ORDER', sql_query)
match2 = search('FROM (.*) LIMIT', sql_query)
table_name = "Data"
if match1:
table_name = match1.group(1)
elif match2:
table_name = match2.group(1)
logging_func('\n%s %s:' % (table_name, str(df.shape)))
logging_func(df.head().to_string())
return df
# ------------------------------------------------------------------
def res_timer(res, tracker):
timer_state = tracker.get_slot('timer_state') if tracker.get_slot('timer_state') else 'n/a'
if timer_state == 'on':
res += '\nElapsed time: %.2f sec' % toc()
return res
# ------------------------------------------------------------------
def res_error(res, tracker, e):
timer_state = tracker.get_slot('timer_state') if tracker.get_slot('timer_state') else 'n/a'
if timer_state == 'on':
res += '\nERROR: %s' % e
return res
# ------------------------------------------------------------------
def simpleQuestionAnswer(tracker, entity, db_dict, user_intent=""):
lut_df = db_dict['lut']
custom_df = db_dict['nutrients_qna']
feature = lut_df['Entity'][entity]
try:
if feature in custom_df.index:
res = custom_df.loc[feature][user_intent]
else:
res = custom_df[[str(s) in feature for s in custom_df.index.tolist()]][user_intent][0]
if 'slot#' in res:
res_list = res.split(' ')
for k, el in enumerate(res_list):
if 'slot#' in el:
res_list[k] = str(tracker.get_slot(el.split('#')[1]))
res = ' '.join(res_list)
res_list = re.findall('\{.*?\}', res)
for match in res_list:
res = res.replace(match, str(eval(match[1:-1])))
except:
res = "אין לי מושג, מצטער!"
return res
def checkPrecentinres(title, x):
precent_position = None
listNumbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
if 'אחוז' in title:
precent_position = title.find('אחוז')
if '%' in title:
precent_position = title.find('%')
if precent_position is not None:
if title[precent_position - 2] == '0' and title[precent_position - 3] not in listNumbers:
title = title[:title.find(x)]
title += x
return title
# ------------------------------------------------------------------
def upload_file_to_s3(local_file, s3_folder, s3_file, aws_access_key_id, aws_secret_access_key, aws_bucket,
debug_en=False):
""" upload a given file to given location on Amazon-S3 """
success = True
HTTP_OK = 200
# Connect to Amazon-S3 client:
s3_client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
# Make a new directory on S3 (if not already exists):
if s3_folder + '/' in [x['Key'] for x in s3_client.list_objects(Bucket=aws_bucket)['Contents']]:
pass
elif not debug_en:
res = s3_client.put_object(Bucket=aws_bucket, Key='%s/' % s3_folder)
success = res['ResponseMetadata']['HTTPStatusCode'] == HTTP_OK
if not success:
return success, ""
# Upload local_file to S3:
x = 3
if not debug_en:
try:
if path.exists(local_file):
s3_client.upload_file(local_file, aws_bucket, path.join(s3_folder, s3_file))
s3_client.put_object_acl(ACL='public-read', Bucket=aws_bucket, Key=path.join(s3_folder, s3_file))
except (ClientError, S3UploadFailedError) as e:
success = False, ""
return success, "https://%s.s3.eu-central-1.amazonaws.com/%s/%s" % (aws_bucket, s3_folder, s3_file)
# ------------------------------------------------------------------
def donut_generator(names, sizes, radius=0.7, textstr_title='',
colors=None, figname="image.png"):
CARBS_GRAMS_CALOIRES = 4
PROTEIN_GRAMS_CALOIRES = 4
FAT_GRAMS_CALOIRES = 9
if colors is None:
colors = []
my_circle = plt.Circle((0, 0), radius, color='white')
fig, ax = plt.subplots()
labels = [':' + k1 + '\nםרג ' + str(round(k2, 2)) for k1, k2 in zip(names, sizes)]
if colors:
ax.pie(sizes, colors=colors)
else:
ax.pie(sizes)
plt.legend(bbox_to_anchor=(1.0, 0.88), fontsize=18, labels=labels)
p = plt.gcf()
p.gca().add_artist(my_circle)
if textstr_title:
ax.text(0.34, 1.05, textstr_title, transform=ax.transAxes, weight='bold',
fontsize=30, verticalalignment='center_baseline')
sizes[0] *= PROTEIN_GRAMS_CALOIRES
sizes[1] *= CARBS_GRAMS_CALOIRES
sizes[2] *= FAT_GRAMS_CALOIRES
sum2 = round(sum(sizes), 2)
textstr_center1 = str(sum2)
textstr_center2 = 'קלוריות'[::-1]
ax.text(0.39, 0.56, textstr_center1, transform=ax.transAxes, weight='bold',
fontsize=24, verticalalignment='center_baseline')
ax.text(0.37, 0.44, textstr_center2, transform=ax.transAxes,
fontsize=18, verticalalignment='center_baseline')
if figname:
fig.patch.set_facecolor('white')
fig.savefig(figname, bbox_inches='tight', facecolor='white')
else:
plt.show()
# ------------------------------------------------------------------
def donut_generator_wrapper(title, data):
names = [x[::-1] for x in list(data.keys())]
sizes = list(data.values())
colors = ['darkorange', 'lightgreen', 'blue']
textstr_title = title[::-1]
figname = "donut_image1.png"
donut_generator(names=names,
sizes=sizes,
radius=0.7,
textstr_title=textstr_title,
colors=colors,
figname=figname)
return figname
# ------------------------------------------------------------------
def iniliatize_Diagram(title, data):
unique_filename = lambda fname: "%s_%s%s" % (path.splitext(fname)[0],
datetime.now().strftime("%m%d%Y_%H%M%S"),
path.splitext(fname)[1])
figname = donut_generator_wrapper(title, data)
res, figure_url = upload_file_to_s3(local_file=figname,
s3_folder="auto_generated",
s3_file=unique_filename(figname),
aws_access_key_id=DB_AWS_ACCESS_KEY_ID,
aws_secret_access_key=DB_AWS_SECRET_ACCESS_KEY,
aws_bucket=DB_AWS_BUCKET)
return figure_url
# ------------------------------------------------------------------
def activate_load_db(name, table, dic):
dic[name] = load_db(table)
def get_tables(bits):
table_dict = {'0x1': 'tzameret', '0x2': 'lut', '0x4': 'nutrients_qna',
'0x8': 'food_qna', '0x10': 'common_food',
'0x20': 'food_ranges', '0x40': 'micro_nutrients',
'0x80': 'food_units', '0x100': 'bloodtest_vals',
'0x200': 'food_units_aliases', '0x400': 'food_units_features',
'0x800': 'subs_tags_alias', '0x1000': 'Weights_and_measures'}
scale = 16
bits_binary = bin(int(bits, scale))[2:].zfill(len(bits) * 4)
numbers_zero = ''
numbers = []
for digit in reversed(bits_binary):
if digit != '1':
numbers_zero += digit
else:
numbers.append('1' + numbers_zero)
numbers_zero += '0'
for i, value in enumerate(numbers):
decimal_representation = int(value, 2)
temp = hex(decimal_representation)
temp = int(temp, 16)
numbers[i] = hex(temp)
manager = multiprocessing.Manager()
db_dict = manager.dict()
jobs = []
for value in numbers:
# Pass the user at position i instead of the whole list
p = multiprocessing.Process(target=activate_load_db, args=(table_dict[value], value, db_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
return db_dict
def load_db(db_bitmap, read_databse_en=True):
# available_tables_df=read_database
sql_params = {'connit_type': 'postgresql',
'connit_user': 'newtrds',
'connit_pass': '<PASSWORD>!',
'connit_host': 'newt-tzameret-db.c1ub7aqk5fah.eu-central-1.rds.amazonaws.com',
'connit_port': '5432',
'connit_db': 'postgres',
'max_records': 1000}
sql_engine = connect_to_server(sql_params)
if db_bitmap == '0x1':
tzameret = read_table(sql_engine, "SELECT * FROM tzameret_entity")
return tzameret
# "Zameret_hebrew_features" - entities aliases
if db_bitmap == '0x2':
lut = read_table(sql_engine, "SELECT * FROM rasa_lut_entity")
lut = lut.set_index('Entity Alias')
return lut
# "Zameret_hebrew_features" - nutrients_questions
if db_bitmap == '0x4':
nutrients_qna = read_table(sql_engine, "SELECT * FROM rasa_nutrients_qna_entity")
nutrients_qna = nutrients_qna.set_index('Entity')
return nutrients_qna
# "Zameret_hebrew_features" - Food questions
if db_bitmap == '0x8':
food_qna = read_table(sql_engine, "SELECT * FROM rasa_food_qna_entity")
food_qna = food_qna.set_index('nutrition_density')
return food_qna
# "Zameret_hebrew_features" - List of common foods
if db_bitmap == '0x10':
common_food = read_table(sql_engine, "SELECT * FROM common_food_entity")
common_food = common_food.set_index('common_name')
return common_food
# "Newt Machine Readable" - FoodItemRanges
if db_bitmap == '0x20':
food_ranges = read_table(sql_engine, "SELECT * FROM food_ranges_entity")
food_ranges = food_ranges.set_index('Nutrient')
return food_ranges
# "Newt Machine Readable" - MicroNutrients
if db_bitmap == '0x40':
micro_nutrients = read_table(sql_engine, "SELECT * FROM micronutrients_entity")
return micro_nutrients
# "Newt Machine Readable" - MicroNutrients
if db_bitmap == '0x80':
food_units = read_table(sql_engine, "SELECT * FROM food_units_entity")
return food_units
# "Newt Machine Readable" - BloodTestValues
if db_bitmap == '0x100':
bloodtest_vals = read_table(sql_engine, "SELECT * FROM bloodtest_vals_entity")
return bloodtest_vals
# "Zameret_hebrew_features" - Weight aliases
if db_bitmap == '0x200':
food_units_aliases = read_table(sql_engine, "SELECT * FROM food_units_aliases_entity")
return food_units_aliases
# "Zameret_hebrew_features" - For Noa
if db_bitmap == '0x400':
food_units_features_df = read_table(sql_engine, "SELECT * FROM tzameret_newt_entity")
food_units_features = food_units_features_df.dropna(axis=0, how='all')
food_units_features = food_units_features.rename({'Primary_SN': 'smlmitzrach'},
axis=1)
return food_units_features
# "Zameret_hebrew_features" - subs_tags_alias
if db_bitmap == '0x800':
subs_tags_alias = read_table(sql_engine, "SELECT * FROM subs_tags_aliases_entity")
subs_tags_alias = subs_tags_alias.set_index('Entity Alias').fillna(0)
return subs_tags_alias
if db_bitmap == '0x1000':
Weights_and_measures = read_table(sql_engine, "SELECT * FROM weights_measures")
return Weights_and_measures
def load_db_googleSheet(db_bitmap):
db_dict = {}
# "Zameret food list 22_JAN_2020"
if (db_bitmap & 0x1) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=84892416"
s = requests.get(url).content
db_dict['tzameret'] = pd.read_csv(io.StringIO(s.decode('utf-8'))).fillna(0)
# "Zameret_hebrew_features" - entities aliases
if (db_bitmap & 0x2) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1805881936"
s = requests.get(url).content
db_dict['lut'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity Alias"],
usecols=["Entity Alias", "Entity", "Units",
"Entity name", "RDA name",
"action_simple_question",
"action_nutrition_howmanyxiny_x",
"action_nutrition_howmanyxiny_y",
"action_nutrition_is_food_healthy",
"action_nutrition_is_food_recommended",
"action_nutrition_what_is_healthier_x",
"action_nutrition_what_is_healthier_y",
"action_nutrition_get_rda",
"action_nutrition_bloodtest_generic",
"action_nutrition_bloodtest_value",
"action_nutrition_food_substitute",
"action_nutrition_compare_foods",
"action_nutrition_howmanyxyinz"]).fillna(0)
# "Zameret_hebrew_features" - nutrients_questions
if (db_bitmap & 0x4) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1706335378"
s = requests.get(url).content
db_dict['nutrients_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Entity"]).fillna(0)
# "Zameret_hebrew_features" - Food questions
if (db_bitmap & 0x8) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=1099284657"
s = requests.get(url).content
db_dict['food_qna'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["nutrition_density"],
usecols=["nutrition_density", "energy_density",
"description_density"]).fillna(0)
# "Zameret_hebrew_features" - List of common foods
if (db_bitmap & 0x10) > 0:
url = "https://docs.google.com/spreadsheets/d/1VvXmu5l58XwcDDtqz0bkHIl_dC92x3eeVdZo2uni794/export?format=csv&gid=495295419"
s = requests.get(url).content
db_dict['common_food'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["common_name"],
usecols=["common_name", "shmmitzrach", "smlmitzrach"]).fillna(0)
# "Newt Machine Readable" - FoodItemRanges
if (db_bitmap & 0x20) > 0:
url = "https://docs.google.com/spreadsheets/d/1IPTflCe6shaP-FBAuXWSFCX5hSuAo7bMGczNMTSTYY0/export?format=csv&gid=885087351"
s = requests.get(url).content
db_dict['food_ranges'] = pd.read_csv(io.StringIO(s.decode('utf-8')),
header=0,
index_col=["Nutrient"],
usecols=["Nutrient", "Medium - threshold per 100gr",
"High - threshold per 100gr",
"good_or_bad", "tzameret_name", "hebrew_name"]).fillna(0)
# "Newt Machine Readable" - MicroNutrients
if (db_bitmap | |
that you give in the "x" local variable of this method.
"""
def getReluActivation(self, x):
if (x > 0):
return x
else:
return 0
"""
getReluActivationDerivative(x="the instant independent value from which you want to know the derivate of the dependent ReLU value/result")
This method calculates and returns the derivate ReLU function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
def getReluActivationDerivative(self, x):
if (x > 0):
return 1
else:
return 0
"""
getTanhActivation(x="the instant independent value from which you want to know the dependent Hyperbolic Tangent (Tanh) value/result")
This method calculates and returns the Hyperbolic Tangent (Tanh) function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getTanhActivation(self, x):
import math
a = math.exp(x)
b = math.exp(-x)
return ((a-b)/(a+b))
"""
getReluActivation(x="the instant independent value from which you want to know the dependent Sigmoid value/result")
This method calculates and returns the Sigmoid function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
def getSigmoidActivation(self, x):
import math
return (1/(1+math.exp(-x)))
"""
getRaiseToTheSecondPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheSecondPowerActivation(self, x):
return x*x
"""
getRaiseToTheSecondPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheSecondPowerDerivative(self, x):
return 2*x
"""
getRaiseToTheThirdPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheThirdPowerActivation(self, x):
return x*x*x
"""
getRaiseToTheThirdPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheThirdPowerDerivative(self, x):
return 3*x*x
"""
getRaiseToTheFourthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheFourthPowerActivation(self, x):
return x*x*x*x
"""
getRaiseToTheFourthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheFourthPowerDerivative(self, x):
return 4*x*x*x
"""
getRaiseToTheFifthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheFifthPowerActivation(self, x):
return x*x*x*x*x
"""
getRaiseToTheFifthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheFifthPowerDerivative(self, x):
return 5*x*x*x*x
"""
getRaiseToTheSixthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getRaiseToTheSixthPowerActivation(self, x):
return x*x*x*x*x*x
"""
getRaiseToTheSixthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getRaiseToTheSixthPowerDerivative(self, x):
return 6*x*x*x*x*x
"""
getExponentialActivation(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the Exponential-Euler function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
def getExponentialActivation(self, x):
import math
return math.exp(x)
"""
getExponentialDerivative(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the derivate Exponential-Euler function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
def getExponentialDerivative(self, x):
import math
return math.exp(x)
"""
getSingleArtificialNeuron(activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your neuron to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your neuron to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates a single Artificial Neuron and, within this method,
such neuron trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[[28.235246103419946],
[1.12749544645359],
[-1.7353168202914326],
[0.7285727543658252]]
accuracyFromTraining =
95.06995458954695
predictedData =
[[28.868494779855514],
[32.80418405006583],
[25.89997715314427],
[38.25484973427189],
[16.295874460357858],
[26.67205741761012],
[27.198762118476985],
[26.859066716794352],
[31.50391014224514],
[26.42881371215305],
[38.14632853395502],
[30.297502725191123],
[26.929105800646223]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getSingleArtificialNeuron(self, activationFunction='sigmoid', learningRate=1, numberOfEpochs=1000, stopTrainingIfAcurracy=95, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True):
if ((activationFunction!='none') and (activationFunction!='sigmoid') and (activationFunction!='relu') and (activationFunction!='tanh') and (activationFunction!='raiseTo2ndPower') and (activationFunction!='raiseTo3rdPower') and (activationFunction!='raiseTo4thPower') and (activationFunction!='raiseTo5thPower') and (activationFunction!='raiseTo6thPower') and (activationFunction!='exponential')):
raise Exception('ERROR: The selected Activation Function does not exist or has not been programmed in this method yet.')
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
# from . import MortrackML_Library as mSL
# import math
import random
numberOfIndependentRows= len(self.x_samplesList)
numberOfIndependentVariables = len(self.x_samplesList[0])
matrix_x = []
for row in range(0, numberOfIndependentRows):
temporalRow = []
temporalRow.append(1)
for column in range(0, numberOfIndependentVariables):
temporalRow.append(self.x_samplesList[row][column])
matrix_x.append(temporalRow)
matrix_y = self.y_samplesList
# We innitialize the weight vector random values from -1 up to +1
if | |
<reponame>ViriginaWangluyao/test
''' Utils for io, language, connectivity graphs etc '''
import os
import sys
import re
sys.path.append('build')
import MatterSim
import string
import json
import time
import math
from collections import Counter, defaultdict
import numpy as np
import networkx as nx
from param import args
# padding, unknown word, end of sentence
base_vocab = ['<PAD>', '<UNK>', '<EOS>']
padding_idx = base_vocab.index('<PAD>')
def load_nav_graphs(scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3]-pose2['pose'][3])**2\
+ (pose1['pose'][7]-pose2['pose'][7])**2\
+ (pose1['pose'][11]-pose2['pose'][11])**2)**0.5
graphs = {}
for scan in scans:
with open('connectivity/%s_connectivity.json' % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i,item in enumerate(data):
if item['included']:
for j,conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]]);
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(item['image_id'],data[j]['image_id'],weight=distance(item,data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
def load_datasets(splits):
"""
:param splits: A list of split.
if the split is "something@5000", it will use a random 5000 data from the data
:return:
"""
import random
data = []
old_state = random.getstate()
for split in splits:
# It only needs some part of the dataset?
components = split.split("@")
number = -1
if len(components) > 1:
split, number = components[0], int(components[1])
# Load Json
# if split in ['train', 'val_seen', 'val_unseen', 'test',
# 'val_unseen_half1', 'val_unseen_half2', 'val_seen_half1', 'val_seen_half2']: # Add two halves for sanity check
if "/" not in split:
with open('tasks/R2R/data/R2R_%s.json' % split) as f:
new_data = json.load(f)
else:
with open(split) as f:
new_data = json.load(f)
# Partition
if number > 0:
random.seed(0) # Make the data deterministic, additive
random.shuffle(new_data)
new_data = new_data[:number]
# Join
data += new_data
random.setstate(old_state) # Recover the state of the random generator
return data
class Tokenizer(object):
''' Class to tokenize and encode a sentence. '''
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') # Split on any non-alphanumeric character
def __init__(self, vocab=None, encoding_length=20):
self.encoding_length = encoding_length
self.vocab = vocab
self.word_to_index = {}
self.index_to_word = {}
if vocab:
for i,word in enumerate(vocab):
self.word_to_index[word] = i
new_w2i = defaultdict(lambda: self.word_to_index['<UNK>'])
new_w2i.update(self.word_to_index)
self.word_to_index = new_w2i
for key, value in self.word_to_index.items():
self.index_to_word[value] = key
old = self.vocab_size()
self.add_word('<BOS>')
assert self.vocab_size() == old+1
print("OLD_VOCAB_SIZE", old)
print("VOCAB_SIZE", self.vocab_size())
print("VOACB", len(vocab))
def finalize(self):
"""
This is used for debug
"""
self.word_to_index = dict(self.word_to_index) # To avoid using mis-typing tokens
def add_word(self, word):
assert word not in self.word_to_index
self.word_to_index[word] = self.vocab_size() # vocab_size() is the
self.index_to_word[self.vocab_size()] = word
@staticmethod
def split_sentence(sentence):
''' Break sentence into a list of words and punctuation '''
toks = []
for word in [s.strip().lower() for s in Tokenizer.SENTENCE_SPLIT_REGEX.split(sentence.strip()) if len(s.strip()) > 0]:
# Break up any words containing punctuation only, e.g. '!?', unless it is multiple full stops e.g. '..'
if all(c in string.punctuation for c in word) and not all(c in '.' for c in word):
toks += list(word)
else:
toks.append(word)
return toks
def vocab_size(self):
return len(self.index_to_word)
def encode_sentence(self, sentence, max_length=None):
if max_length is None:
max_length = self.encoding_length
if len(self.word_to_index) == 0:
sys.exit('Tokenizer has no vocab')
encoding = [self.word_to_index['<BOS>']]
for word in self.split_sentence(sentence):
encoding.append(self.word_to_index[word]) # Default Dict
encoding.append(self.word_to_index['<EOS>'])
if len(encoding) <= 2:
return None
#assert len(encoding) > 2
if len(encoding) < max_length:
encoding += [self.word_to_index['<PAD>']] * (max_length-len(encoding)) # Padding
elif len(encoding) > max_length:
encoding[max_length - 1] = self.word_to_index['<EOS>'] # Cut the length with EOS
return np.array(encoding[:max_length])
def decode_sentence(self, encoding, length=None):
sentence = []
if length is not None:
encoding = encoding[:length]
for ix in encoding:
if ix == self.word_to_index['<PAD>']:
break
else:
sentence.append(self.index_to_word[ix])
return " ".join(sentence)
def shrink(self, inst):
"""
:param inst: The id inst
:return: Remove the potential <BOS> and <EOS>
If no <EOS> return empty list
"""
if len(inst) == 0:
return inst
end = np.argmax(np.array(inst) == self.word_to_index['<EOS>']) # If no <EOS>, return empty string
if len(inst) > 1 and inst[0] == self.word_to_index['<BOS>']:
start = 1
else:
start = 0
# print(inst, start, end)
return inst[start: end]
def build_vocab(splits=['train'], min_count=5, start_vocab=base_vocab):
''' Build a vocab, starting with base vocab containing a few useful tokens. '''
count = Counter()
t = Tokenizer()
data = load_datasets(splits)
for item in data:
for instr in item['instructions']:
count.update(t.split_sentence(instr))
vocab = list(start_vocab)
for word,num in count.most_common():
if num >= min_count:
vocab.append(word)
else:
break
return vocab
def write_vocab(vocab, path):
print('Writing vocab of size %d to %s' % (len(vocab),path))
with open(path, 'w') as f:
for word in vocab:
f.write("%s\n" % word)
def read_vocab(path):
with open(path) as f:
vocab = [word.strip() for word in f.readlines()]
return vocab
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def read_img_features(feature_store):
import csv
import base64
from tqdm import tqdm
print("Start loading the image feature")
start = time.time()
if "detectfeat" in args.features:
views = int(args.features[10:])
else:
views = 36
args.views = views
tsv_fieldnames = ['scanId', 'viewpointId', 'image_w', 'image_h', 'vfov', 'features']
features = {}
with open(feature_store, "r") as tsv_in_file: # Open the tsv file.
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=tsv_fieldnames)
for item in reader:
long_id = item['scanId'] + "_" + item['viewpointId']
features[long_id] = np.frombuffer(base64.decodestring(item['features'].encode('ascii')),
dtype=np.float32).reshape((views, -1)) # Feature of long_id is (36, 2048)
print("Finish Loading the image feature from %s in %0.4f seconds" % (feature_store, time.time() - start))
return features
def read_candidates(candidates_store):
import csv
import base64
from collections import defaultdict
print("Start loading the candidate feature")
start = time.time()
TSV_FIELDNAMES = ['scanId', 'viewpointId', 'heading', 'elevation', 'next', 'pointId', 'idx', 'feature']
candidates = defaultdict(lambda: list())
items = 0
with open(candidates_store, "r") as tsv_in_file: # Open the tsv file.
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=TSV_FIELDNAMES)
for item in reader:
long_id = item['scanId'] + "_" + item['viewpointId']
candidates[long_id].append(
{'heading': float(item['heading']),
'elevation': float(item['elevation']),
'scanId': item['scanId'],
'viewpointId': item['next'],
'pointId': int(item['pointId']),
'idx': int(item['idx']) + 1, # Because a bug in the precompute code, here +1 is important
'feature': np.frombuffer(
base64.decodestring(item['feature'].encode('ascii')),
dtype=np.float32)
}
)
items += 1
for long_id in candidates:
assert (len(candidates[long_id])) != 0
assert sum(len(candidate) for candidate in candidates.values()) == items
# candidate = candidates[long_id]
# print(candidate)
print("Finish Loading the candidates from %s in %0.4f seconds" % (candidates_store, time.time() - start))
candidates = dict(candidates)
return candidates
def add_exploration(paths):
explore = json.load(open("tasks/R2R/data/exploration.json", 'r'))
inst2explore = {path['instr_id']: path['trajectory'] for path in explore}
for path in paths:
path['trajectory'] = inst2explore[path['instr_id']] + path['trajectory']
return paths
def angle_feature(heading, elevation):
import math
# twopi = math.pi * 2
# heading = (heading + twopi) % twopi # From 0 ~ 2pi
# It will be the same
return np.array([math.sin(heading), math.cos(heading),
math.sin(elevation), math.cos(elevation)] * (args.angle_feat_size // 4),
dtype=np.float32)
def new_simulator():
import MatterSim
# Simulator image parameters
WIDTH = 640
HEIGHT = 480
VFOV = 60
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.init()
return sim
def get_point_angle_feature(baseViewId=0):
sim = new_simulator()
feature = np.empty((36, args.angle_feat_size), np.float32)
base_heading = (baseViewId % 12) * math.radians(30)
for ix in range(36):
if ix == 0:
sim.newEpisode('ZMojNkEp431', '2f4d90acd4024c269fb0efe49a8ac540', 0, math.radians(-30))
elif ix % 12 == 0:
sim.makeAction(0, 1.0, 1.0)
else:
sim.makeAction(0, 1.0, 0)
state = sim.getState()
assert state.viewIndex == ix
heading = state.heading - base_heading
feature[ix, :] = angle_feature(heading, state.elevation)
return feature
def get_all_point_angle_feature():
return [get_point_angle_feature(baseViewId) for baseViewId in range(36)]
def add_idx(inst):
toks = Tokenizer.split_sentence(inst)
return " ".join([str(idx)+tok for idx, tok in enumerate(toks)])
import signal
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
from collections import OrderedDict
class Timer:
def __init__(self):
self.cul = OrderedDict()
self.start = {}
self.iter = 0
def reset(self):
self.cul = OrderedDict()
self.start = {}
self.iter = 0
def tic(self, key):
self.start[key] = time.time()
def toc(self, key):
delta = time.time() - self.start[key]
if key not in self.cul:
self.cul[key] = delta
else:
self.cul[key] += delta
def step(self):
self.iter += 1
def show(self):
total = sum(self.cul.values())
for key in self.cul:
print("%s, total time %0.2f, avg time %0.2f, part of %0.2f" %
(key, self.cul[key], self.cul[key]*1./self.iter, self.cul[key]*1./total))
print(total / self.iter)
stop_word_list = [
",", ".", "and", "?", "!"
]
def stop_words_location(inst, mask=False):
toks = Tokenizer.split_sentence(inst)
sws = [i for i, tok in enumerate(toks) if tok in stop_word_list] # The index of the stop words
if len(sws) | |
import warnings
from functools import partial
import onnx
from onnx import numpy_helper
import tensorflow as tf
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
import numpy as np
from tensorflow.python.ops.image_ops_impl import ResizeMethodV1
class Operations:
def make_op(self, op_type, inputs, attrs):
# print(op_type)
# print([i.shape for i in inputs])
# print(attrs)
# print()
return getattr(self, 'op_' + op_type.lower())(*inputs, **attrs)
class DataFormat: pass
class OnnxTensor(DataFormat): pass
class OnnxConstant(OnnxTensor): pass
class InterleavedImageBatch(DataFormat): pass
class OptimizationMissingWarning(Warning): pass
def ensure_data_format(tensor, format):
if issubclass(tensor.data_format, format):
return tensor
elif tensor.data_format is OnnxConstant and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
out = tensor.transpose([0, 2, 3, 1])
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is OnnxTensor and format is InterleavedImageBatch:
assert len(tensor.shape) == 4
n, c, h, w = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, h, w, c])
else:
out = tf.transpose(tensor, [0, 2, 3, 1])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = InterleavedImageBatch
return out
elif tensor.data_format is InterleavedImageBatch and format is OnnxTensor:
assert len(tensor.shape) == 4
n, h, w, c = tensor.shape
if h == w == 1 or c == 1:
out = tf.reshape(tensor, [n, c, h, w])
else:
out = tf.transpose(tensor, [0, 3, 1, 2])
warnings.warn("Transpose inserted. Please report at https://github.com/AxisCommunications/onnx-to-keras/issues", OptimizationMissingWarning)
out.data_format = OnnxTensor
return out
else:
raise NotImplementedError
def compatible_data_format(format1, format2):
return issubclass(format1, format2) or issubclass(format2, format1)
def ensure_compatible_data_format(a, b):
if compatible_data_format(a.data_format, b.data_format):
return a, b
if b.data_format is OnnxConstant:
return a, ensure_data_format(b, a.data_format)
return ensure_data_format(a, b.data_format), b
class Constant(np.ndarray):
data_format = OnnxConstant
class TfKerasOperations(Operations):
keras = tf.keras
def parse_attr(self, a):
if a.type == onnx.AttributeProto.INT:
return a.i
elif a.type == onnx.AttributeProto.INTS:
return tuple(a.ints)
elif a.type == onnx.AttributeProto.FLOAT:
return a.f
elif a.type == onnx.AttributeProto.STRING:
return a.s
elif a.type == onnx.AttributeProto.TENSOR:
return self.make_constant(numpy_helper.to_array(a.t))
else:
raise NotImplementedError
def make_constant(self, x):
return np.asarray(x).view(Constant)
def make_input(self, shape, dtype, name=None):
dtype = tf.as_dtype(dtype)
# XXX: Assumes all inputs are image batches that we want to transpose
assert len(shape) == 4
tensor = tf.keras.layers.Input((shape[2], shape[3], shape[1]), shape[0], name, dtype)
tensor.data_format = InterleavedImageBatch
return tensor
def op_conv(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None, group=None):
# Torch: (out_channels, in_channels, kH, kW)
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
if group == 1:
# Tf; filter_height, filter_width, in_channels, out_channels
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[3]
ConvClass = self.keras.layers.Conv2D
elif group == x.shape[3]:
# Tf; filter_height, filter_width, out_channels, in_channels
weights = weights.transpose(2, 3, 0, 1)
filters = weights.shape[2]
def ConvClass(filters, kernel_size, strides, dilation_rate, padding,
kernel_initializer, use_bias=True, bias_initializer='zeros'):
return self.keras.layers.DepthwiseConv2D(kernel_size, strides, dilation_rate=dilation_rate,
padding=padding, use_bias=use_bias,
bias_initializer=bias_initializer,
depthwise_initializer=kernel_initializer)
else:
raise NotImplementedError
if pads == (0,0,0,0):
padding = 'valid'
elif (kernel_shape[0] == kernel_shape[1] and pads[0] == pads[1] == pads[2] == pads[3] and
pads[0] * 2 + 1 == kernel_shape[0] and strides == (1, 1) and dilations == (1, 1)):
padding = 'same'
elif (kernel_shape == (3, 3) and pads == (1,1,1,1) and strides == (2,2) and dilations == (1, 1) and
x.shape[1] % 2 == 1 and x.shape[2] % 2 == 1):
padding = 'same'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
if bias is None:
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', use_bias=False)
out = conv(x)
conv.set_weights([weights.view(np.ndarray)])
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
conv = ConvClass(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros', bias_initializer='zeros')
out = conv(x)
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_relu(self, x):
out = self.keras.layers.ReLU()(x)
out.data_format = x.data_format
return [out]
def op_leakyrelu(self, x, alpha):
out = self.keras.layers.LeakyReLU(alpha=alpha)(x)
out.data_format = x.data_format
return [out]
def op_sigmoid(self, x):
out = self.keras.activations.sigmoid(x)
out.data_format = x.data_format
return [out]
def op_softmax(self, x, axis):
out = self.keras.activations.softmax(x, axis=axis)
out.data_format = x.data_format
return [out]
def op_prelu(self, x, alpha):
alpha = ensure_data_format(alpha, OnnxConstant) # XXX Assumes no ops on alpha
if len(alpha) == 1:
shared = list(range(1, len(x.shape)))
alpha = alpha.reshape((1,) * (len(x.shape) - 1))
elif len(alpha) == x.shape[-1]:
shared = list(range(1, len(x.shape) - 1))
else:
raise NotImplementedError
alpha_initializer = self.keras.initializers.Constant(alpha.view(np.ndarray))
out = self.keras.layers.PReLU(shared_axes=shared, alpha_initializer=alpha_initializer)(x)
out.data_format = x.data_format
return [out]
def op_maxpool(self, x, kernel_shape, pads, strides, ceil_mode=0):
assert ceil_mode == 0
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
if pads == (0, 0, 0, 0):
padding = 'valid'
else:
# ((top_pad, bottom_pad), (left_pad, right_pad))
pad = self.keras.layers.ZeroPadding2D(((pads[0], pads[2]), (pads[1], pads[3])))
x = pad(x)
padding = 'valid'
out = self.keras.layers.MaxPool2D(kernel_shape, strides, padding)(x)
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_concat(self, *tensors, axis):
if all(t.data_format is InterleavedImageBatch for t in tensors):
axis = (0, 3, 1, 2)[axis]
out = self.keras.layers.Concatenate(axis)(list(tensors))
out.data_format = InterleavedImageBatch
elif all(t.data_format is OnnxConstant for t in tensors):
out = self.make_constant(np.concatenate(tensors, axis))
else:
raise NotImplementedError
return [out]
def op_convtranspose(self, x, weights, bias=None, kernel_shape=None, strides=None, pads=None, dilations=None,
group=None, output_padding=(0, 0)):
assert kernel_shape is not None
assert strides is not None
assert pads is not None
assert dilations is not None
assert group is not None
weights = ensure_data_format(weights, OnnxConstant) # XXX Assumes no ops on weights
if bias is None:
use_bias = False
bias_initializer = None
else:
bias = ensure_data_format(bias, OnnxConstant) # XXX Assumes no ops on weights
use_bias = True
if len(kernel_shape) == 2:
x = ensure_data_format(x, InterleavedImageBatch)
assert kernel_shape == weights.shape[2:4]
_, h_in, w_in, _ = x.shape
h_out = (h_in - 1) * strides[0] - 2 * pads[0] + dilations[0] * (kernel_shape[0] - 1) + 1 + output_padding[0]
w_out=(w_in - 1) * strides[1] - 2 * pads[1] + dilations[1] * (kernel_shape[1] - 1) + 1 + output_padding[1]
if pads == (0,0,0,0):
padding = 'valid'
elif h_out == strides[0] * h_in and w_out == strides[1] * w_in and output_padding==(0,0):
padding = 'same'
output_padding = None # output_padding overrides the padding argument in keras
else:
raise NotImplementedError
# Tf; filter_height, filter_width, out_channels, in_channels
# Torch: (in_channels, out_channels, kH, kW)
weights = weights.transpose(2, 3, 1, 0)
filters = weights.shape[2]
if group == 1:
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
out = conv(x)
if use_bias:
conv.set_weights([weights.view(np.ndarray), bias.view(np.ndarray)])
else:
conv.set_weights([weights.view(np.ndarray)])
else:
splits = tf.split(x, group, axis=-1)
convolved_splits = []
n = weights.shape[3] // group
assert group * n == weights.shape[3]
for i, split in enumerate(splits):
conv = self.keras.layers.Conv2DTranspose(filters, kernel_shape, strides,
dilation_rate=dilations, padding=padding,
kernel_initializer='zeros',
use_bias=use_bias, bias_initializer='zeros',
output_padding=output_padding)
convolved_splits.append(conv(split))
grouped_weights = weights[:, :, :, i*n:(i+1)*n]
if use_bias:
grouped_bias = bias[i*n:(i+1)*n]
conv.set_weights([grouped_weights.view(np.ndarray), grouped_bias.view(np.ndarray)])
else:
conv.set_weights([grouped_weights.view(np.ndarray)])
out = tf.concat(convolved_splits, -1)
assert out.shape[1] == h_out
assert out.shape[2] == w_out
out.data_format = InterleavedImageBatch
return [out]
else:
raise NotImplementedError
def op_batchnormalization(self, x, weight, bias, running_mean, running_var, momentum, epsilon):
if len(x.shape) != 4:
raise NotImplementedError
norm = self.keras.layers.BatchNormalization(momentum=momentum, epsilon=epsilon)
out = norm(x)
norm.set_weights([weight.view(np.ndarray), bias.view(np.ndarray),
running_mean.view(np.ndarray), running_var.view(np.ndarray)])
out.data_format = x.data_format
return [out]
def op_unsqueeze(self, x, axes):
x = ensure_data_format(x, OnnxTensor)
out = x
if isinstance(x, Constant):
for ax in sorted(axes):
out = np.expand_dims(out, ax).view(Constant)
out.data_format = x.data_format
else:
for ax in sorted(axes):
out = self.keras.backend.expand_dims(out, ax)
out.data_format = OnnxTensor
return [out]
def op_clip(self, x, min, max):
if min == 0:
out = self.keras.layers.ReLU(max)(x)
else:
out = self.keras.backend.clip(x, min, max)
out.data_format = x.data_format
return [out]
def op_add(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Add()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_sub(self, x1, x2):
x1, x2 = ensure_compatible_data_format(x1, x2)
out = self.keras.layers.Subtract()([x1, x2])
out.data_format = x1.data_format
return [out]
def op_reducemean(self, x, axes, keepdims):
x = ensure_data_format(x, InterleavedImageBatch)
if axes == (2, 3) and keepdims == 0:
out = self.keras.layers.GlobalAveragePooling2D()(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_gemm(self, x, weights, bias, beta, transB, alpha):
x = ensure_data_format(x, OnnxTensor)
if beta == 1.0 and transB == 1 and alpha == 1.0:
out = self.keras.layers.Dense(weights.shape[0], kernel_initializer='zeros',
bias_initializer='zeros',
weights=[weights.view(np.ndarray).T, bias.view(np.ndarray)])(x)
out.data_format = OnnxTensor
else:
raise NotImplementedError
return [out]
def op_pad(self, x, pads, mode, value=0.0):
x = ensure_data_format(x, InterleavedImageBatch)
if mode == b'constant' and len(pads) == 8:
assert len(x.shape) * 2 == len(pads)
if pads[0] == pads[1] == pads[4] == pads[5] == 0:
# ((top_pad, bottom_pad), (left_pad, right_pad))
| |
<filename>Model.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
from block_zoo import *
import copy
import logging
import ujson as json
from utils.exceptions import ConfigurationError, LayerUndefinedError, LayerConfigUndefinedError
from queue import Queue
from utils.common_utils import transform_tensors2params, transfer_to_gpu
from block_zoo.Embedding import *
EMBED_LAYER_NAME = 'Embedding'
EMBED_LAYER_ID = 'embedding'
def get_conf(layer_id, layer_name, input_layer_ids, all_layer_configs, model_input_ids, use_gpu,
conf_dict=None, shared_conf=None, succeed_embedding_flag=False, output_layer_flag=False,
target_num=None, fixed_lengths=None):
""" get layer configuration
Args
layer_id: layer identifier
layer_name: name of layer such as BiLSTM
input_layer_ids (list): the inputs of current layer
all_layer_configs (dict): records the conf class of each layer.
model_input_ids (set): the inputs of the model, e.g. ['query', 'passage']
use_gpu:
conf_dict:
shared_conf: if fixed_lengths is not None, the output_dim of shared_conf should be corrected!
flag:
output_layer_flag:
target_num: used for inference the dimension of output space if someone declare a dimension of -1
fixed_lengths
Returns:
configuration class coresponds to the layer
"""
if shared_conf:
conf = copy.deepcopy(shared_conf)
else:
try:
conf_dict['use_gpu'] = use_gpu
# for classification tasks, we usually add a Linear layer to project the output to dimension of number of classes. If we don't know the #classes, we can use '-1' instead and we would calculate the number of classes from the corpus.
if layer_name == 'Linear':
if isinstance(conf_dict['hidden_dim'], list) and conf_dict['hidden_dim'][-1] == -1:
assert output_layer_flag is True, "Only in the last layer, hidden_dim == -1 is allowed!"
assert target_num is not None, "Number of targets should be given!"
conf_dict['hidden_dim'][-1] = target_num
elif isinstance(conf_dict['hidden_dim'], int) and conf_dict['hidden_dim'] == -1:
assert output_layer_flag is True, "Only in the last layer, hidden_dim == -1 is allowed!"
assert target_num is not None, "Number of targets should be given!"
conf_dict['hidden_dim'] = target_num
conf = eval(layer_name + "Conf")(**conf_dict)
except NameError as e:
raise LayerConfigUndefinedError("\"%sConf\" has not been defined" % layer_name)
# verify the rank consistence of joint layers
if layer_name == EMBED_LAYER_NAME:
# the embedding layer
pass
else:
# make sure all the inputs to current layer exist
for input_layer_id in input_layer_ids:
if not (input_layer_id in all_layer_configs or input_layer_id in model_input_ids):
raise ConfigurationError("The input %s of layer %s does not exist. Please define it before "
"defining layer %s!" % (input_layer_id, layer_id, layer_id))
former_output_ranks = [all_layer_configs[input_layer_id].output_rank if input_layer_id in all_layer_configs else all_layer_configs[EMBED_LAYER_ID].output_rank for input_layer_id in input_layer_ids]
# inference input_dim
conf.input_dims = [all_layer_configs[input_layer_id].output_dim if input_layer_id in all_layer_configs else all_layer_configs[EMBED_LAYER_ID].output_dim for input_layer_id in input_layer_ids]
# If the inputs come from embedding layer and fixed_lengths exist, set the length to input_dims
if len(input_layer_ids) == 1 and input_layer_ids[0] in model_input_ids and fixed_lengths:
conf.input_dims[0][1] = fixed_lengths[input_layer_ids[0]]
# check and verify input ranks
if conf.num_of_inputs > 0:
if conf.num_of_inputs != len(input_layer_ids):
raise ConfigurationError("%s only accept %d inputs but you feed %d inputs to it!" % \
(layer_name, conf.num_of_inputs, len(input_layer_ids)))
elif conf.num_of_inputs == -1:
conf.num_of_inputs = len(input_layer_ids)
if isinstance(conf.input_ranks, list):
conf.input_ranks = conf.input_ranks * conf.num_of_inputs
else:
logging.warning("[For developer of %s] The input_ranks attribute should be a list!" % (layer_name))
[conf.input_ranks] * conf.num_of_inputs
for input_rank, former_output_rank in zip(conf.input_ranks, former_output_ranks):
if input_rank != -1 and input_rank != former_output_rank:
raise ConfigurationError("Input ranks of %s are inconsistent with former layers" % layer_id)
conf.input_ranks = copy.deepcopy(former_output_ranks)
# inference and varification inside the layer
conf.inference() # update some attributes which relies on input dimension or something else
conf.verify() # verify if the configuration is legal
logging.debug('Layer id: %s; name: %s; input_dims: %s; input_ranks: %s; output_dim: %s; output_rank: %s' % (layer_id, layer_name, conf.input_dims if layer_id != 'embedding' else 'None', conf.input_ranks, conf.output_dim, conf.output_rank))
return conf
def get_layer(layer_name, conf):
"""
Args:
layer_name:
conf: configuration class
Returns:
specific layer
"""
try:
layer = eval(layer_name)(conf)
except NameError as e:
raise Exception("%s; Layer \"%s\" has not been defined" % (str(e), layer_name))
return layer
class Model(nn.Module):
def __init__(self, conf, problem, vocab_info, use_gpu):
"""
Args:
inputs: ['string1', 'string2']
layer_archs: The layers must produce tensors with similar shapes. The layers may be nested.
[
{
'layer': Layer name,
'conf': {xxxx}
},
[
{
'layer': Layer name,
'conf': {},
},
{
'layer': Layer name,
'conf': {},
}
]
]
vocab_info:
{
'word': {
'vocab_size': xxx,
'init_weights': np matrix
}
'postag': {
'vocab_size': xxx,
'init_weights': None
}
}
"""
super(Model, self).__init__()
inputs = conf.object_inputs_names
layer_archs = conf.architecture
target_num = problem.output_target_num()
# correct the real fixed length if begin/end of sentence are added
if conf.fixed_lengths:
fixed_lengths_corrected = copy.deepcopy(conf.fixed_lengths)
for seq in fixed_lengths_corrected:
if problem.with_bos_eos:
fixed_lengths_corrected[seq] += 2
else:
fixed_lengths_corrected = None
self.use_gpu = use_gpu
all_layer_configs = dict()
self.layers = nn.ModuleDict()
self.layer_inputs = dict()
self.layer_dependencies = dict()
self.layer_dependencies[EMBED_LAYER_ID] = set()
# change output_layer_id to list for support multi_output
self.output_layer_id = []
for layer_index, layer_arch in enumerate(layer_archs):
output_layer_flag = True if 'output_layer_flag' in layer_arch and layer_arch['output_layer_flag'] is True else False
succeed_embedding_flag = True if layer_index > 0 and 'inputs' in layer_arch and \
[input in inputs for input in layer_arch['inputs']].count(True) == len(layer_arch['inputs']) else False
if output_layer_flag:
self.output_layer_id.append(layer_arch['layer_id'])
# if hasattr(self, 'output_layer_id'):
# raise ConfigurationError("There should be only one output!")
# else:
# self.output_layer_id = layer_arch['layer_id']
if layer_index == 0:
# embedding layer
emb_conf = copy.deepcopy(vocab_info)
for input_cluster in emb_conf:
emb_conf[input_cluster]['dim'] = layer_arch['conf'][input_cluster]['dim']
emb_conf[input_cluster]['fix_weight'] = layer_arch['conf'][input_cluster].get('fix_weight', False)
all_layer_configs[EMBED_LAYER_ID] = get_conf(EMBED_LAYER_ID, layer_arch['layer'],
None, all_layer_configs, inputs, self.use_gpu, conf_dict={'conf': emb_conf},
shared_conf=None, succeed_embedding_flag=False, output_layer_flag=output_layer_flag,
target_num=target_num, fixed_lengths=fixed_lengths_corrected)
self.add_layer(EMBED_LAYER_ID, get_layer(layer_arch['layer'], all_layer_configs[EMBED_LAYER_ID]))
else:
if layer_arch['layer'] in self.layers and not 'conf' in layer_arch:
# reuse formly defined layers (share the same parameters)
logging.debug("Layer id: %s; Sharing configuration with layer %s" % (layer_arch['layer_id'], layer_arch['layer']))
conf_dict = None
shared_conf = all_layer_configs[layer_arch['layer']]
else:
conf_dict = layer_arch['conf']
shared_conf = None
# if the layer is EncoderDecoder, inference the vocab size
if layer_arch['layer'] == 'EncoderDecoder':
layer_arch['conf']['decoder_conf']['decoder_vocab_size'] = target_num
all_layer_configs[layer_arch['layer_id']] = get_conf(layer_arch['layer_id'], layer_arch['layer'],
layer_arch['inputs'], all_layer_configs, inputs, self.use_gpu, conf_dict=conf_dict,
shared_conf=shared_conf, succeed_embedding_flag=succeed_embedding_flag,
output_layer_flag=output_layer_flag, target_num=target_num,
fixed_lengths=fixed_lengths_corrected)
if layer_arch['layer'] in self.layers and not 'conf' in layer_arch:
self.add_layer(layer_arch['layer_id'], self.layers[layer_arch['layer']])
else:
self.add_layer(layer_arch['layer_id'], get_layer(layer_arch['layer'], all_layer_configs[layer_arch['layer_id']]))
self.layer_inputs[layer_arch['layer_id']] = layer_arch['inputs']
# register dependencies, except embeddings
cur_layer_depend = set()
for layer_depend_id in layer_arch['inputs']:
if not layer_depend_id in inputs:
cur_layer_depend.add(layer_depend_id)
self.add_dependency(layer_arch['layer_id'], cur_layer_depend)
logging.debug("Layer dependencies: %s" % repr(self.layer_dependencies))
if not hasattr(self, 'output_layer_id'):
raise ConfigurationError("Please define an output layer")
self.layer_topological_sequence = self.get_topological_sequence()
def add_layer(self, layer_id, layer):
""" register a layer
Args:
layer_id:
layer:
Returns:
"""
if layer_id in self.layers:
raise ConfigurationError("The layer id %s is not unique!")
else:
self.layers[layer_id] = layer
def add_dependency(self, layer_id, depend_layer_id):
""" add the layers have to be proceed before layer_id
Args:
layer_id:
depend_layer_id:
Returns:
"""
if not layer_id in self.layer_dependencies:
self.layer_dependencies[layer_id] = set()
if isinstance(depend_layer_id, int):
self.layer_dependencies[layer_id].add(depend_layer_id)
else:
self.layer_dependencies[layer_id] |= set(depend_layer_id)
def remove_dependency(self, depend_layer_id):
""" remove dependencies on layer_id
Args:
layer_id:
Returns:
"""
for layer_id in self.layer_dependencies:
self.layer_dependencies[layer_id].remove(depend_layer_id)
def get_topological_sequence(self):
""" get topological sequence of nodes in the model
Returns:
"""
total_layer_ids = Queue()
for layer_id in self.layers.keys():
if layer_id != EMBED_LAYER_ID:
total_layer_ids.put(layer_id)
topological_list = []
circular_cnt = 0 # used for checking if there is at least one legal topological sorting
while not total_layer_ids.empty():
layer_id = total_layer_ids.get()
if len(self.layer_dependencies[layer_id]) == 0:
for layer_id2 in self.layer_dependencies:
if layer_id in self.layer_dependencies[layer_id2]:
self.layer_dependencies[layer_id2].remove(layer_id)
circular_cnt = 0
topological_list.append(layer_id)
else:
total_layer_ids.put(layer_id)
circular_cnt += 1
if circular_cnt >= total_layer_ids.qsize():
rest_layers = []
while not total_layer_ids.empty():
rest_layers.append(total_layer_ids.get())
raise ConfigurationError("The model architecture is illegal because there is a circular dependency "
"or there are some isolated layers. The layers can not be resolved: [%s]" % (", ".join(rest_layers)))
logging.debug("Topological sequence of nodes: %s" % (",".join(topological_list)))
return topological_list
def forward(self, inputs_desc, length_desc, *param_list):
"""
Args:
with the help of transform_tensors2params(inputs_desc, length_desc, param_list), we can get the below inputs and lengths
inputs: dict.
{
"string1":{
'word': word ids, [batch size, seq len]
'postag': postag ids,[batch size, seq len]
...
}
"string2":{
'word': word ids,[batch size, seq len]
'postag': postag ids,[batch size, seq len]
...
}
}
lengths: dict.
{
"string1": [...]
"string2": [...]
}
Returns:
"""
inputs, lengths = transform_tensors2params(inputs_desc, length_desc, param_list)
representation = dict()
representation[EMBED_LAYER_ID] = dict()
repre_lengths = dict()
repre_lengths[EMBED_LAYER_ID] = dict()
for input in inputs:
representation[input] = self.layers[EMBED_LAYER_ID](inputs[input], use_gpu=self.is_cuda())
if self.use_gpu:
repre_lengths[input] = transfer_to_gpu(lengths[input])
else:
repre_lengths[input] = lengths[input]
for layer_id in self.layer_topological_sequence:
#logging.debug("To proces layer %s" % layer_id)
input_params = []
for input_layer_id in self.layer_inputs[layer_id]:
input_params.append(representation[input_layer_id])
input_params.append(repre_lengths[input_layer_id])
| |
false,
"name": "name",
"type_key": "String"
}
],
"given_name": null,
"key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.32aa7ec6e7407e8a502d0a6094909a9365103a8e": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"in_process\\": {}}",
"description": null,
"is_required": false,
"name": "execution",
"type_key": "Selector.fd22b7b986baf6998a8c16e63e78f44dd5e3f78f"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "loggers",
"type_key": "Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"io_manager\\": {}}",
"description": null,
"is_required": false,
"name": "resources",
"type_key": "<KEY>"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"noop_solid\\": {}}",
"description": null,
"is_required": false,
"name": "solids",
"type_key": "Shape.ba913521099bed4314e25592059869c8f3a3c96e"
}
],
"given_name": null,
"key": "Shape.32aa7ec6e7407e8a502d0a6094909a9365103a8e",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.3baab16166bacfaf4705811e64d356112fd733cb": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"log_level\\": \\"INFO\\", \\"name\\": \\"dagster\\"}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441"
}
],
"given_name": null,
"key": "Shape.3baab16166bacfaf4705811e64d356112fd733cb",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.41de0e2d7b75524510155d0bdab8723c6feced3b": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "result",
"type_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742"
}
],
"given_name": null,
"key": "Shape.41de0e2d7b75524510155d0bdab8723c6feced3b",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": "Explicit modules to preload in the forkserver.",
"is_required": false,
"name": "preload_modules",
"type_key": "Array.String"
}
],
"given_name": null,
"key": "Shape.45a8f1f21db73ecbfa5b4e07b9aedc1835cef1ef",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.4b53b73df342381d0d05c5f36183dc99cb9676e2": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "path",
"type_key": "String"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.69ff9be621991cc7961ea5e667d43edaac9d2339": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "outputs",
"type_key": "Array.Shape.41de0e2d7b75524510155d0bdab8723c6feced3b"
}
],
"given_name": null,
"key": "Shape.69ff9be621991cc7961ea5e667d43edaac9d2339",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.743e47901855cb245064dd633e217bfcb49a11a7": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "marker_to_close",
"type_key": "String"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"enabled\\": {}}",
"description": null,
"is_required": false,
"name": "retries",
"type_key": "Selector.1bfb167aea90780aa679597800c71bd8c65ed0b2"
}
],
"given_name": null,
"key": "Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ba913521099bed4314e25592059869c8f3a3c96e": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"field_aliases": {
"solids": "ops"
},
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{}",
"description": null,
"is_required": false,
"name": "noop_solid",
"type_key": "Shape.69ff9be621991cc7961ea5e667d43edaac9d2339"
}
],
"given_name": null,
"key": "Shape.ba913521099bed4314e25592059869c8f3a3c96e",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ca5906d9a0377218b4ee7d940ad55957afa73d1b": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"retries\\": {\\"enabled\\": {}}}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.979b3d2fece4f3eb92e90f2ec9fb4c85efe9ea5c"
}
],
"given_name": null,
"key": "Shape.ca5906d9a0377218b4ee7d940ad55957afa73d1b",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [],
"given_name": null,
"key": "Shape.da39a3ee5e6b4b0d3255bfef95601890afd80709",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.e248cccc2d2206bf427e9bc9c2d22833f2aeb6d4": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "0",
"description": null,
"is_required": false,
"name": "max_concurrent",
"type_key": "Int"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"enabled\\": {}}",
"description": null,
"is_required": false,
"name": "retries",
"type_key": "Selector.1bfb167aea90780aa679597800c71bd8c65ed0b2"
},
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": "Select how subprocesses are created. Defaults to spawn.\\nWhen forkserver is selected, set_forkserver_preload will be called with either:\\n* the preload_modules list if provided by config\\n* the module containing the Job if it was loaded from a module\\n* dagster\\nhttps://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods",
"is_required": false,
"name": "start_method",
"type_key": "Selector.0f5471adc2ad814d1c9fd94e2fa73c07217dea47"
}
],
"given_name": null,
"key": "<KEY>",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c": {
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "console",
"type_key": "Shape.3baab16166bacfaf4705811e64d356112fd733cb"
}
],
"given_name": null,
"key": "Shape.ebeaf4550c200fb540f2e1f3f2110debd8c4157c",
"kind": {
"__enum__": "ConfigTypeKind.STRICT_SHAPE"
},
"scalar_kind": null,
"type_param_keys": null
},
"String": {
"__class__": "ConfigTypeSnap",
"description": "",
"enum_values": null,
"fields": null,
"given_name": "String",
"key": "String",
"kind": {
"__enum__": "ConfigTypeKind.SCALAR"
},
"scalar_kind": {
"__enum__": "ConfigScalarKind.STRING"
},
"type_param_keys": null
}
}
},
"dagster_type_namespace_snapshot": {
"__class__": "DagsterTypeNamespaceSnapshot",
"all_dagster_type_snaps_by_key": {
"Any": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Any",
"is_builtin": true,
"key": "Any",
"kind": {
"__enum__": "DagsterTypeKind.ANY"
},
"loader_schema_key": "Selector.f2fe6dfdc60a1947a8f8e7cd377a012b47065bc4",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Any",
"type_param_keys": []
},
"Bool": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Bool",
"is_builtin": true,
"key": "Bool",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Bool-Selector.be5d518b39e86a43c5f2eecaf538c1f6c7711b59",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Bool",
"type_param_keys": []
},
"Float": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Float",
"is_builtin": true,
"key": "Float",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Float-Selector.d00a37e3807d37c9f69cc62997c4a5f4a176e5c3",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Float",
"type_param_keys": []
},
"Int": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Int",
"is_builtin": true,
"key": "Int",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.Int-Selector.a9799b971d12ace70a2d8803c883c863417d0725",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "Int",
"type_param_keys": []
},
"Nothing": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "Nothing",
"is_builtin": true,
"key": "Nothing",
"kind": {
"__enum__": "DagsterTypeKind.NOTHING"
},
"loader_schema_key": null,
"materializer_schema_key": null,
"name": "Nothing",
"type_param_keys": []
},
"String": {
"__class__": "DagsterTypeSnap",
"description": null,
"display_name": "String",
"is_builtin": true,
"key": "String",
"kind": {
"__enum__": "DagsterTypeKind.SCALAR"
},
"loader_schema_key": "ScalarUnion.String-Selector.e04723c9d9937e3ab21206435b22247cfbe58269",
"materializer_schema_key": "Selector.e52fa3afbe531d9522fae1206f3ae9d248775742",
"name": "String",
"type_param_keys": []
}
}
},
"dep_structure_snapshot": {
"__class__": "DependencyStructureSnapshot",
"solid_invocation_snaps": [
{
"__class__": "SolidInvocationSnap",
"input_dep_snaps": [],
"is_dynamic_mapped": false,
"solid_def_name": "noop_solid",
"solid_name": "noop_solid",
"tags": {}
}
]
},
"description": null,
"graph_def_name": "noop_pipeline",
"lineage_snapshot": null,
"mode_def_snaps": [
{
"__class__": "ModeDefSnap",
"description": null,
"logger_def_snaps": [
{
"__class__": "LoggerDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": true,
"default_value_as_json_str": "{\\"log_level\\": \\"INFO\\", \\"name\\": \\"dagster\\"}",
"description": null,
"is_required": false,
"name": "config",
"type_key": "Shape.241ac489ffa5f718db6444bae7849fb86a62e441"
},
"description": "The default colored console logger.",
"name": "console"
}
],
"name": "default",
"resource_def_snaps": [
{
"__class__": "ResourceDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
"description": null,
"name": "io_manager"
}
],
"root_config_key": "Shape.32aa7ec6e7407e8a502d0a6094909a9365103a8e"
}
],
"name": "noop_pipeline",
"solid_definitions_snapshot": {
"__class__": "SolidDefinitionsSnapshot",
"composite_solid_def_snaps": [],
"solid_def_snaps": [
{
"__class__": "SolidDefSnap",
"config_field_snap": {
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": false,
"name": "config",
"type_key": "Any"
},
"description": null,
"input_def_snaps": [],
"name": "noop_solid",
"output_def_snaps": [
{
"__class__": "OutputDefSnap",
"dagster_type_key": "Any",
"description": null,
"is_dynamic": false,
"is_required": true,
"name": "result"
}
],
"required_resource_keys": [],
"tags": {}
}
]
},
"tags": {}
}'''
snapshots['test_multi_type_config_array_dict_fields[Permissive] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": "List of Array.Permissive.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Permissive.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Permissive.1f37a068c7c51aba23e9c41475c78eebc4e58471"
]
}'''
snapshots['test_multi_type_config_array_dict_fields[Selector] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": "List of Array.Selector.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Selector.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Selector.1f37a068c7c51aba23e9c41475c78eebc4e58471"
]
}'''
snapshots['test_multi_type_config_array_dict_fields[Shape] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": "List of Array.Shape.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Shape.1f37a068c7c51aba23e9c41475c78eebc4e58471",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Shape.1f37a068c7c51aba23e9c41475c78eebc4e58471"
]
}'''
snapshots['test_multi_type_config_array_map 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": "List of Array.Map.String.Int",
"enum_values": null,
"fields": null,
"given_name": null,
"key": "Array.Map.String.Int",
"kind": {
"__enum__": "ConfigTypeKind.ARRAY"
},
"scalar_kind": null,
"type_param_keys": [
"Map.String.Int"
]
}'''
snapshots['test_multi_type_config_nested_dicts[nested_dict_types0] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "foo",
"type_key": "Permissive.c1ae6abf6c3c9e951eeefe4fde820cafc053ee40"
}
],
"given_name": null,
"key": "Selector.cb18f2a8fc9fa17668d8f4fd6b44c86c30c56774",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
}'''
snapshots['test_multi_type_config_nested_dicts[nested_dict_types1] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
{
"__class__": "ConfigFieldSnap",
"default_provided": false,
"default_value_as_json_str": null,
"description": null,
"is_required": true,
"name": "foo",
"type_key": "Shape.9bbda63934c371bf9be9a1cbb6fff9f5ee0be828"
}
],
"given_name": null,
"key": "Selector.b188a7737a2fecf0fca8cf94d331be517176dddf",
"kind": {
"__enum__": "ConfigTypeKind.SELECTOR"
},
"scalar_kind": null,
"type_param_keys": null
}'''
snapshots['test_multi_type_config_nested_dicts[nested_dict_types2] 1'] = '''{
"__class__": "ConfigTypeSnap",
"description": null,
"enum_values": null,
"fields": [
| |
height_y > 0.7:
vr_z_offset = 0.01
if hmd_height + curr_offset[2] + vr_z_offset <= self.vr_settings.height_bounds[1]:
self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
# Update haptics for body and hands
if self.main_vr_robot:
vr_body_id = self.main_vr_robot.links["body"].get_body_id()
vr_hands = [
("left_controller", self.main_vr_robot.links["left_hand"]),
("right_controller", self.main_vr_robot.links["right_hand"]),
]
# Check for body haptics
wall_ids = [x.get_body_id() for x in self.scene.objects_by_category["walls"]]
for c_info in p.getContactPoints(vr_body_id):
if wall_ids and (c_info[1] in wall_ids or c_info[2] in wall_ids):
for controller in ["left_controller", "right_controller"]:
is_valid, _, _ = self.get_data_for_vr_device(controller)
if is_valid:
# Use 90% strength for body to warn user of collision with wall
self.trigger_haptic_pulse(controller, 0.9)
# Check for hand haptics
for hand_device, hand_obj in vr_hands:
is_valid, _, _ = self.get_data_for_vr_device(hand_device)
if is_valid:
if len(p.getContactPoints(hand_obj.get_body_id())) > 0 or (
hasattr(hand_obj, "object_in_hand") and hand_obj.object_in_hand
):
# Only use 30% strength for normal collisions, to help add realism to the experience
self.trigger_haptic_pulse(hand_device, 0.3)
def register_main_vr_robot(self, vr_robot):
"""
Register the robot representing the VR user.
"""
self.main_vr_robot = vr_robot
def gen_vr_data(self):
"""
Generates a VrData object containing all of the data required to describe the VR system in the current frame.
This data is used to power the BehaviorRobot each frame.
"""
v = dict()
for device in VR_DEVICES:
is_valid, trans, rot = self.get_data_for_vr_device(device)
device_data = [is_valid, trans.tolist(), rot.tolist()]
device_data.extend(self.get_device_coordinate_system(device))
v[device] = device_data
if device in VR_CONTROLLERS:
v["{}_button".format(device)] = self.get_button_data_for_controller(device)
# Store final rotations of hands, with model rotation applied
for hand in ["right", "left"]:
# Base rotation quaternion
base_rot = self.main_vr_robot.links["{}_hand".format(hand)].base_rot
# Raw rotation of controller
controller_rot = v["{}_controller".format(hand)][2]
# Use dummy translation to calculation final rotation
final_rot = p.multiplyTransforms([0, 0, 0], controller_rot, [0, 0, 0], base_rot)[1]
v["{}_controller".format(hand)].append(final_rot)
is_valid, torso_trans, torso_rot = self.get_data_for_vr_tracker(self.vr_settings.torso_tracker_serial)
v["torso_tracker"] = [is_valid, torso_trans, torso_rot]
v["eye_data"] = self.get_eye_tracking_data()
v["event_data"] = self.get_vr_events()
reset_actions = []
for controller in VR_CONTROLLERS:
reset_actions.append(self.query_vr_event(controller, "reset_agent"))
v["reset_actions"] = reset_actions
v["vr_positions"] = [self.get_vr_pos().tolist(), list(self.get_vr_offset())]
return VrData(v)
def gen_vr_robot_action(self):
"""
Generates an action for the BehaviorRobot to perform based on VrData collected this frame.
Action space (all non-normalized values that will be clipped if they are too large)
* See BehaviorRobot.py for details on the clipping thresholds for
Body:
- 6DOF pose delta - relative to body frame from previous frame
Eye:
- 6DOF pose delta - relative to body frame (where the body will be after applying this frame's action)
Left hand, right hand (in that order):
- 6DOF pose delta - relative to body frame (same as above)
- Trigger fraction delta
- Action reset value
Total size: 28
"""
# Actions are stored as 1D numpy array
action = np.zeros((28,))
# Get VrData for the current frame
v = self.gen_vr_data()
# Update body action space
hmd_is_valid, hmd_pos, hmd_orn, hmd_r = v.query("hmd")[:4]
torso_is_valid, torso_pos, torso_orn = v.query("torso_tracker")
vr_body = self.main_vr_robot.links["body"]
prev_body_pos, prev_body_orn = vr_body.get_position_orientation()
inv_prev_body_pos, inv_prev_body_orn = p.invertTransform(prev_body_pos, prev_body_orn)
if self.vr_settings.using_tracked_body:
if torso_is_valid:
des_body_pos, des_body_orn = torso_pos, torso_orn
else:
des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
else:
if hmd_is_valid:
des_body_pos, des_body_orn = hmd_pos, p.getQuaternionFromEuler([0, 0, calc_z_rot_from_right(hmd_r)])
else:
des_body_pos, des_body_orn = prev_body_pos, prev_body_orn
body_delta_pos, body_delta_orn = p.multiplyTransforms(
inv_prev_body_pos, inv_prev_body_orn, des_body_pos, des_body_orn
)
action[:3] = np.array(body_delta_pos)
action[3:6] = np.array(p.getEulerFromQuaternion(body_delta_orn))
# Get new body position so we can calculate correct relative transforms for other VR objects
clipped_body_delta_pos, clipped_body_delta_orn = vr_body.clip_delta_pos_orn(action[:3], action[3:6])
clipped_body_delta_orn = p.getQuaternionFromEuler(clipped_body_delta_orn)
new_body_pos, new_body_orn = p.multiplyTransforms(
prev_body_pos, prev_body_orn, clipped_body_delta_pos, clipped_body_delta_orn
)
# Also calculate its inverse for further local transform calculations
inv_new_body_pos, inv_new_body_orn = p.invertTransform(new_body_pos, new_body_orn)
# Update action space for other VR objects
body_relative_parts = ["right", "left", "eye"]
for part_name in body_relative_parts:
vr_part = (
self.main_vr_robot.links[part_name]
if part_name == "eye"
else self.main_vr_robot.links["{}_hand".format(part_name)]
)
# Process local transform adjustments
prev_world_pos, prev_world_orn = vr_part.get_position_orientation()
prev_local_pos, prev_local_orn = vr_part.get_local_position_orientation()
_, inv_prev_local_orn = p.invertTransform(prev_local_pos, prev_local_orn)
if part_name == "eye":
valid, world_pos, world_orn = hmd_is_valid, hmd_pos, hmd_orn
else:
valid, world_pos, _ = v.query("{}_controller".format(part_name))[:3]
# Need rotation of the model so it will appear aligned with the physical controller in VR
world_orn = v.query("{}_controller".format(part_name))[6]
# Keep in same world position as last frame if controller/tracker data is not valid
if not valid:
world_pos, world_orn = prev_world_pos, prev_world_orn
# Get desired local position and orientation transforms
des_local_pos, des_local_orn = p.multiplyTransforms(
inv_new_body_pos, inv_new_body_orn, world_pos, world_orn
)
# Get the delta local orientation in the reference frame of the body
_, delta_local_orn = p.multiplyTransforms(
[0, 0, 0],
des_local_orn,
[0, 0, 0],
inv_prev_local_orn,
)
delta_local_orn = p.getEulerFromQuaternion(delta_local_orn)
# Get the delta local position in the reference frame of the body
delta_local_pos = np.array(des_local_pos) - np.array(prev_local_pos)
if part_name == "eye":
action[6:9] = np.array(delta_local_pos)
action[9:12] = np.array(delta_local_orn)
elif part_name == "left":
action[12:15] = np.array(delta_local_pos)
action[15:18] = np.array(delta_local_orn)
else:
action[20:23] = np.array(delta_local_pos)
action[23:26] = np.array(delta_local_orn)
# Process trigger fraction and reset for controllers
if part_name in ["right", "left"]:
prev_trig_frac = vr_part.trigger_fraction
if valid:
trig_frac = v.query("{}_controller_button".format(part_name))[0]
delta_trig_frac = trig_frac - prev_trig_frac
else:
delta_trig_frac = 0.0
if part_name == "left":
action[18] = delta_trig_frac
else:
action[26] = delta_trig_frac
# If we reset, action is 1, otherwise 0
reset_action = v.query("reset_actions")[0] if part_name == "left" else v.query("reset_actions")[1]
reset_action_val = 1.0 if reset_action else 0.0
if part_name == "left":
action[19] = reset_action_val
else:
action[27] = reset_action_val
return action
def sync_vr_compositor(self):
"""
Sync VR compositor.
"""
self.renderer.vr_compositor_update()
def perform_vr_start_pos_move(self):
"""
Sets the VR position on the first step iteration where the hmd tracking is valid. Not to be confused
with self.set_vr_start_pos, which simply records the desired start position before the simulator starts running.
"""
# Update VR start position if it is not None and the hmd is valid
# This will keep checking until we can successfully set the start position
if self.vr_start_pos:
hmd_is_valid, _, _, _ = self.renderer.vrsys.getDataForVRDevice("hmd")
if hmd_is_valid:
offset_to_start = np.array(self.vr_start_pos) - self.get_hmd_world_pos()
if self.vr_height_offset is not None:
offset_to_start[2] = self.vr_height_offset
self.set_vr_offset(offset_to_start)
self.vr_start_pos = None
def fix_eye_tracking_value(self):
"""
Calculates and fixes eye tracking data to its value during step(). This is necessary, since multiple
calls to get eye tracking data return different results, due to the SRAnipal multithreaded loop that
runs in parallel to the iGibson main thread
"""
self.eye_tracking_data = self.renderer.vrsys.getEyeTrackingData()
def poll_vr_events(self):
"""
Returns VR event data as list of lists.
List is empty if all events are invalid. Components of a single event:
controller: 0 (left_controller), 1 (right_controller)
button_idx: any valid idx in EVRButtonId enum in openvr.h header file
press: 0 (unpress), 1 (press)
"""
self.vr_event_data = self.renderer.vrsys.pollVREvents()
# Enforce store_first_button_press_per_frame option, if user has enabled it
if self.vr_settings.store_only_first_event_per_button:
temp_event_data = []
# Make sure we only store the first (button, press) combo of each type
event_set = set()
for ev_data in self.vr_event_data:
controller, button_idx, _ = ev_data
key = (controller, button_idx)
if key not in event_set:
temp_event_data.append(ev_data)
event_set.add(key)
self.vr_event_data = temp_event_data[:]
return self.vr_event_data
def get_vr_events(self):
"""
Returns the VR events processed by the simulator
"""
return self.vr_event_data
def get_button_for_action(self, action):
"""
Returns (button, state) tuple corresponding to an action
:param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
"""
return (
None
if action not in self.vr_settings.action_button_map
else tuple(self.vr_settings.action_button_map[action])
)
def query_vr_event(self, controller, action):
"""
Queries system for a VR event, and returns true if that event happened this frame
:param controller: device to query for - can be left_controller or right_controller
:param action: an action name listed in "action_button_map" dictionary for the current device in the vr_config.yml
"""
# Return false if any of input parameters are invalid
if (
controller not in ["left_controller", "right_controller"]
or action not in self.vr_settings.action_button_map.keys()
):
return False
# Search through event list to try to find desired event
controller_id = 0 if controller == "left_controller" else 1
button_idx, press_id = self.vr_settings.action_button_map[action]
for ev_data in self.vr_event_data:
if controller_id == ev_data[0] and button_idx == ev_data[1] and press_id == ev_data[2]:
return True
# Return false if event was not found this frame
return False
def get_data_for_vr_device(self, device_name):
"""
Call this after step - returns all VR device data for a specific device
Returns is_valid (indicating validity of data), translation and | |
<reponame>JacobParrott/OccultationProfiler<filename>FindDopplerMain.py
import numpy as np
import spiceypy as spice
import spiceypy.utils.support_types as stypes
import pandas as pd
from os import *
import matplotlib.pyplot as plt
import time as timer
from scipy import constants
from tqdm import tqdm
import math
#Import custom modules
#import main
import atmosphere
import swiftmain
#THE CONTROL TIME = 636491202
#EXPERIEMNT mro->ODYSSEY = 24269137.689745
#your egress value because you are an idiot = 241895765.6228938
def ElectricCall(time):
process_id = getpid()
print("Process ID:", process_id)
epoch = 636491202.20059
target = '-143'
obs = '-41'
#need a funtion that can interpolate between times at 10 Hz, then just sub those positions into the next function
#MEX TGO positions to be created in this function with 10 Hz interpolation
samplingfrequency =1
referencedirection = [0,0,0]
result = np.zeros([samplingfrequency])
overshoot = np.zeros([samplingfrequency])
#636491202.20059
#MEX,TGO, Distance = ephemerides(636491202,time, samplingfrequency)
#mex = MEX[:,0] ; tgo = TGO[0]
for i in range(samplingfrequency):
[tgo1, _] = spice.spkpos('MARS', epoch - time, 'IAU_MARS', 'NONE', target)
[mex1, _] = spice.spkpos('MARS', epoch - time, 'IAU_MARS', 'NONE', obs)
dis = mex1-tgo1
Distance = np.linalg.norm(dis)
#print(f"Progess:{(i/samplingfrequency)*100} %")
#mex = MEX[:,i]+0 ; tgo = TGO[:,i]+0
mex = mex1 ; tgo = tgo1
initialangle,xyzpoints= producegeometrymeter(mex,tgo) #make 2D for huge speed improvements.
Bending, S , referencedirection = flatbending(xyzpoints,initialangle, mex,tgo, referencedirection)
result = S
#result[i] = np.stack((ElectricDistance, Distance), axis = 0)
return S
def TangentPointAltitude(time):
epoch = 636491202.20059
target = '-143'
obs = '-41'
alt = np.zeros(len(time)+1)
for i in time:
[tgo, _] = spice.spkpos('MARS', epoch - i, 'IAU_MARS', 'NONE', target)
[mex, _] = spice.spkpos('MARS', epoch - i, 'IAU_MARS', 'NONE', obs)
[states,_] = spice.spkezr(target, epoch-i, 'IAU_MARS', 'NONE', obs)
sc2scvector = states[0:3]
displacement = np.linalg.norm(sc2scvector)
sc2scunitvector = np.true_divide(sc2scvector, displacement)
marsrad = spice.bodvrd('MARS', 'RADII', 3)
_,alt[i] = spice.npedln(marsrad[1][0], marsrad[1][1], marsrad[1][2], tgo, sc2scunitvector)
return alt *1000
def GeoCall(time):
epoch = 636491202.20059
target = '-143'
obs = '-41'
process_id = getpid()
print("Process ID:", process_id)
[tgo1, _] = spice.spkpos('MARS', epoch - time, 'IAU_MARS', 'NONE', target)
[mex1, _] = spice.spkpos('MARS', epoch - time, 'IAU_MARS', 'NONE', obs)
dis = mex1-tgo1
result = np.linalg.norm(dis)
return result
#the smallest time quantum in SPICE is 1 second. to measure a dopplershift we need faster than this.
# To sidestep this limmitation we interpolate 10 positions between seconds
def ephemerides(et,when, samplingfrequency):
Distance = np.zeros([samplingfrequency])
#Find the locations of MEX & TGO at epoch and epoch +1 second
time = et+when # here is when u have set the order of the
TGO = np.zeros((3,samplingfrequency)) ; MEX = np.zeros((3,samplingfrequency))
[tgo1, _] = spice.spkpos('MARS', time-samplingfrequency, 'IAU_MARS', 'NONE', target) ; [tgo2, _] = spice.spkpos('MARS', time, 'IAU_MARS', 'NONE', target)
[mex1, _] = spice.spkpos('MARS', time-samplingfrequency, 'IAU_MARS', 'NONE', '-41') ; [mex2, _] = spice.spkpos('MARS', time, 'IAU_MARS', 'NONE', '-41')
# [dis, _] = spice.spkpos('-143', time+1, 'IAU_MARS', 'NONE', '-41')
# Distance = np.linalg.norm(dis)
#find ten positions between the two epochs for both MEX & TGO
delta_tgo = (tgo2 - tgo1)/samplingfrequency ; delta_mex = (mex2-mex1) /samplingfrequency
for i in range(samplingfrequency):
MEX[:,i] = mex1 + (delta_mex *i)
TGO[:,i] = tgo1 + (delta_tgo *i)
#[dis, _] = spice.spkpos('-143', time+(i/samplingfrequency), 'IAU_MARS', 'NONE', '-41')
dis = MEX[:,i]-TGO[:,i]
Distance[i] = np.linalg.norm(dis)
return MEX, TGO, Distance
def producegeometrymeter(MEX,TGO):
#maybe completly thin this out, you know this is moslty pointless, what does it actually make
class SpiceVariables:
obs = '-41' # NAIF code for MEX '-74'
target = '-143'# NAIF code for TGO ['EARTH'/'SUN'/ a groundstation etc] 'MARS ODYSSEY'
obsfrm = 'IAU_MARS'
abcorr = 'NONE'
crdsys = 'LATITUDINAL'
coord = 'LATITUDE'
stepsz = 1.0 # Check every [300] seconds if there is an occultation
MAXILV = 100000 #Max number of occultations that can be returned by gfoclt
bshape = 'POINT'
fshape = 'DSK/UNPRIORITIZED'
front = 'MARS'
fframe = 'IAU_MARS'
TFMT = 'YYYY-MM-DD HR:MN:SC' # Format that Cosmographia understands
sv = SpiceVariables()
#THIS COULD BE REMOVED
# [TGO, _] = spice.spkpos(sv.front, et-when, sv.fframe, 'NONE', sv.target)
# [MEX, _] = spice.spkpos(sv.front, et-when, sv.fframe, 'NONE', sv.obs)
TGO = TGO +0 ; MEX = MEX +0 #force to be non-strided
dist = math.floor(spice.vdist(TGO,MEX))
angleseparation = (spice.vsep(MEX, TGO)) # angle taken a mars center
initialangle = (spice.vsep(-MEX, (TGO-MEX))) * (180/math.pi)# angle taken at mars-MEX-tgo, that points to tgo. needed for the bending functions original starting angle
#script needs to work via periods of ray and not meters. [totalperiods is the main iterable, not meters]
sc2sc = TGO - MEX
norm = np.linalg.norm(sc2sc)
unitsc2sc = sc2sc/norm #this needs to shrink if the repeatable expands
points = np.empty([3,dist])
sza = np.empty([1,dist])
angleprogression = np.empty([1,dist])
xyzpoints = np.zeros([3,dist])
marsrad = spice.bodvrd(sv.front, 'RADII', 3)
flatteningcoefficient = ( marsrad[1][0] - marsrad[1][2] ) / marsrad[1][0]
equatorialradii = marsrad[1][0]
# find direction of sun, it will not change much during the occultation. so only calc it once
#[SUN, _] = spice.spkpos(sv.front, et, sv.fframe, 'NONE', 'SUN')
for i in range(dist):
xyzpoint = MEX + (i * unitsc2sc) #move along ray, 1000 wavelength distance at a time (685 m). but unitsc2sc is in km...
xyzpoints[:,i] = xyzpoint
#sza[0,i] = spice.vsep(SUN,xyzpoint)
angleprogression[0,i] = (spice.vsep( xyzpoint, MEX)) * (180 / math.pi)
points[:,i] = spice.recgeo(xyzpoint, equatorialradii,flatteningcoefficient)
points[0,i] = (points[0,i] * (-180 / math.pi))
points[1,i] = (points[1,i] * (-180 / math.pi))
# ray = np.concatenate((points,sza), axis=0) # important for when sza is included
#plt.plot(angleprogression[0,:], ray[2,:])
#plt.show()
# ray is in lat/lon/alt + sza and xyzpoints is cartesian, both describe the same thing
return initialangle,xyzpoints
def flatbending(xyzpoints,initialangle, MEX,TGO, referencedirection):
class SpiceVariables:
obs = '-74' # NAIF code for MEX
target = 'MARS ODYSSEY'# NAIF code for TGO ['EARTH'/'SUN'/ a groundstation etc]
obsfrm = 'IAU_MARS'
abcorr = 'NONE'
crdsys = 'LATITUDINAL'
coord = 'LATITUDE'
stepsz = 100.0 # Check every 300 seconds if there is an occultation
MAXILV = 100000 #Max number of occultations that can be returned by gfoclt
bshape = 'POINT'
fshape = 'DSK/UNPRIORITIZED'
front = 'MARS'
fframe = 'IAU_MARS'
TFMT = 'YYYY-MM-DD HR:MN:SC' # Format that Cosmographia understands
sv = SpiceVariables()
#form a coordinate system where tgo is @ y=0 and x= (5000 +norm), Mar's Barrycenter being @ [5000,0]
subgroupsize = 1
#initialise non-global variables
miniray = np.zeros(subgroupsize)
raystep = np.zeros((2,100000000))# create a large array to populate and then shrink later
barry2mex = np.linalg.norm(MEX)
barry2tgo = np.linalg.norm(TGO)
#find the martian geomoerty so you can reliably find the altitude of a point
marsrad = spice.bodvrd(sv.front, 'RADII', 3)
flatteningcoefficient = ( marsrad[1][0] - marsrad[1][2] ) / marsrad[1][0]
equatorialradii = marsrad[1][0]
TGO = TGO +0 ; MEX = MEX +0 #force to be non-strided
_,_, MEXalt = spice.recgeo(MEX, equatorialradii,flatteningcoefficient)
_,_, TGOalt = spice.recgeo(TGO, equatorialradii,flatteningcoefficient)
#the possition of MEX is found by assuming that it will be somewhere over the relative horizon from TGO
# (meaning over θ = 90°), finding the angle between the MEX and TGO's negative vector, will give the coords of MEX
MexRelativeElevation = spice.vsep(-TGO, MEX) #radians
mex_y = barry2mex * np.sin(MexRelativeElevation)
mex_x = barry2mex * np.cos(MexRelativeElevation)
mex = np.array([0-mex_x, mex_y])
tgo = np.array([0+barry2tgo, 0])
barry = np.array([0, 0])
#to plot the non-refracted propogation, we must convert the 3d xyzpoints to 2d, we do this the same way we found the x&y for MEX
# ,using the norm distance and sep from -TGO5
length = np.size(xyzpoints,1)
UnrefractedDistance = np.linalg.norm(xyzpoints[:,0]- xyzpoints[:,-1]) #in km
UnrefractedRay = np.zeros([2,length])
for i in range(length): #conversion to 2D
point = xyzpoints[:,i] + 0 #need to put vector into temp variable as spice cant handle strided array inputs
angle = spice.vsep(-TGO, point)
norm = np.linalg.norm(point)
point_x = norm * np.cos(angle)
point_y = norm * np.sin(angle)
UnrefractedRay[0,i] = 0 - point_x
UnrefractedRay[1,i] = point_y
#this will produce and angle that is likly not going to be exactly on
#the original propagation path, you compare to this if there is a drifting error, as both this and the resultant refracted ray
# have the same bias error. THIS ANGLE IS BENDING ANTICLOCKWISE IN THIS FRAME (BENDING UPWARDS)
initialtheta = -(spice.vsep(MEX-TGO, MEX))
nicetohave = np.degrees(initialtheta)
#THIS NEEDS TO VARY IF THERE IS AN OVERSHOOT
unit | |
<filename>LSDPlottingTools/LSDMap_GDALIO.py<gh_stars>1-10
## LSDMap_GDALIO.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with rasters
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## SMM
## 26/07/2014
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#from __future__ import absolute_import, division, print_function, unicode_literals
from __future__ import absolute_import, division, print_function
import osgeo.gdal as gdal
import osgeo.gdal_array as gdal_array
import numpy as np
from osgeo import osr
from osgeo import ogr
import os
from os.path import exists
from osgeo.gdalconst import GA_ReadOnly
#==============================================================================
def getNoDataValue(rasterfn):
"""This gets the nodata value from the raster
Args:
rasterfn (str): The filename (with path and extension) of the raster
Returns:
float: nodatavalue; the nodata value
Author: SMM
"""
raster = gdal.Open(rasterfn)
band = raster.GetRasterBand(1)
return band.GetNoDataValue()
#==============================================================================
#==============================================================================
def setNoDataValue(rasterfn):
"""This sets the nodata value from the raster
Args:
rasterfn (str): The filename (with path and extension) of the raster
Returns:
None
Author: SMM
"""
raster = gdal.Open(rasterfn)
band = raster.GetRasterBand(1)
return band.SetNoDataValue()
#==============================================================================
#==============================================================================
def GetUTMMaxMin(FileName):
"""This gets the minimum and maximum UTM values.
*WARNING* it assumes raster is already projected into UTM, and is in ENVI format! It reads from an ENVI header file.
Args:
FileName (str): The filename (with path and extension) of the raster
Returns:
float: The cell size in metres
float: The X minimum (easting) in metres
float: The X maximum (easting) in metres
float: The Y minimum (northing) in metres
float: The Y maximum (northing) in metres
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(FileName)
CellSize = GeoT[1]
XMin = GeoT[0]
XMax = XMin+CellSize*xsize
YMax = GeoT[3]
YMin = YMax-CellSize*ysize
return CellSize,XMin,XMax,YMin,YMax
#==============================================================================
#==============================================================================
# Gets the pixel area, assumes units are projected
#==============================================================================
def GetPixelArea(FileName):
"""Gets the area in m^2 of the pixels
Args:
rasterfn (str): The filename (with path and extension) of the raster
Returns:
float: Pixel_area (float): The area of each pixel
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(FileName)
CellSize = GeoT[1]
return CellSize*CellSize
#==============================================================================
#==============================================================================
# this takes rows and columns of minium and maximum values and converts them
# to UTM
def GetUTMMaxMinFromRowsCol(FileName,x_max_col,x_min_col,y_max_row,y_min_row):
"""This gets the minimum and maximum UTM values but you give it the row and column numbers.
Note:
This assumes raster is already projected into UTM, and is in ENVI format! It reads from an ENVI header file.
Args:
FileName (str): The filename (with path and extension) of the raster
x_max_col (int): The column to use as the maximum
x_min_col (int): The column to use as the minimum
y_max_row (int): The row to use as the maximum
y_min_row (int): The row to use as the minimum
Returns:
float: The X maximum (easting) in metres
float: The X minimum (easting) in metres
float: The Y maximum (northing) in metres
float: The Y minimum (northing) in metres
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(FileName)
CellSize = GeoT[1]
XMin = GeoT[0]
YMax = GeoT[3]
YMin = YMax-CellSize*ysize
xmax_UTM = XMin+x_max_col*CellSize
xmin_UTM = XMin+x_min_col*CellSize
# need to be careful with the ymax_UTM since the rows go from the top
# but the header index is to bottom corner
print("yll: "+str(YMin)+" and nrows: " +str(ysize) + " dx: "+str(CellSize))
ymax_from_bottom = ysize-y_min_row
ymin_from_bottom = ysize-y_max_row
ymax_UTM = YMin+ymax_from_bottom*CellSize
ymin_UTM = YMin+ymin_from_bottom*CellSize
return xmax_UTM,xmin_UTM,ymax_UTM,ymin_UTM
#==============================================================================
#==============================================================================
# This gets the x and y vectors of the data
#==============================================================================
def GetLocationVectors(FileName):
"""This gets a vector of the x and y locations of the coordinates
Note:
This assumes raster is already projected into UTM, and is in ENVI format! It reads from an ENVI header file.
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
float: A vector of the x locations (eastings)
float: A vector of the y locations (northings)
Author: SMM
"""
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(FileName)
CellSize,XMin,XMax,YMin,YMax = GetUTMMaxMin(FileName)
x_vec = np.arange(XMin,XMax,CellSize)
y_vec = np.arange(YMin,YMax,CellSize)
return x_vec,y_vec
#==============================================================================
#==============================================================================
# This gets the extent of the raster
def GetRasterExtent(FileName):
"""This gets a vector of the minimums and maximums of the coordinates
Note:
This assumes raster is already projected into UTM, and is in ENVI format! It reads from an ENVI header file.
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
float: A vector that contains
* extent[0]: XMin
* extent[1]: XMax
* extent[2]: YMin
* extent[3]: YMax
Author: SMM
"""
CellSize,XMin,XMax,YMin,YMax = GetUTMMaxMin(FileName)
extent = [XMin,XMax,YMin,YMax]
return extent
#==============================================================================
# Function to read the original file's projection:
def GetGeoInfo(FileName):
"""This gets information from the raster file using gdal
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
float: A vector that contains:
* NDV: the nodata values
* xsize: cellsize in x direction
* ysize: cellsize in y direction
* GeoT: the tranform (a string)
* Projection: the Projection (a string)
* DataType: The type of data (an int explaing the bits of each data element)
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
SourceDS = gdal.Open(FileName, gdal.GA_ReadOnly)
if SourceDS == None:
raise Exception("Unable to read the data file")
NDV = SourceDS.GetRasterBand(1).GetNoDataValue()
xsize = SourceDS.RasterXSize
ysize = SourceDS.RasterYSize
GeoT = SourceDS.GetGeoTransform()
Projection = osr.SpatialReference()
Projection.ImportFromWkt(SourceDS.GetProjectionRef())
DataType = SourceDS.GetRasterBand(1).DataType
DataType = gdal.GetDataTypeName(DataType)
return NDV, xsize, ysize, GeoT, Projection, DataType
#==============================================================================
#==============================================================================
# This gets the UTM zone, if it exists
def GetUTMEPSG(FileName):
"""Uses GDAL to get the EPSG string from the raster.
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
str: The EPSG string
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
# see if the file exists and get the dataset
SourceDS = gdal.Open(FileName, gdal.GA_ReadOnly)
if SourceDS == None:
raise Exception("Unable to read the data file")
EPSG_string = 'NULL'
# get the projection
print("Let me get that projection for you")
prj=SourceDS.GetProjection()
srs=osr.SpatialReference(wkt=prj)
if srs.IsProjected:
#print("Trying projcs")
#print(str(srs.GetAttrValue(str('PROJCS'),0)))
print(srs.GetAttrValue(str('projcs')))
proj_str = srs.GetAttrValue(str('projcs'))
print("The projection string is: "+proj_str)
print(proj_str)
if proj_str != None:
# extract the UTM information
if "UTM Zone" in proj_str:
first_split = proj_str.split(',')
first_half = first_split[0]
second_half = first_split[1]
if "Northern" in second_half:
N_or_S = "N"
else:
N_or_S = "S"
second_split = first_half.split(' ')
zone = second_split[2]
else:
proj_split = proj_str.split('_')
zone = proj_split[-1]
N_or_S = zone[-1]
zone = zone[:-1]
# adding some logic for zones < 10
if len(zone) < 2:
zone = '0'+zone
EPSG_string = 'epsg:'
if N_or_S == 'S':
EPSG_string = EPSG_string+'327'+zone
else:
EPSG_string = EPSG_string+'326'+zone
print("The EPSG string is: "+EPSG_string)
else:
raise Exception("This is not a projected coordinate system!")
print(EPSG_string)
return EPSG_string
#==============================================================================
# Function to read the original file's projection:
def GetNPixelsInRaster(FileName):
"""This gets the total number of pixels in the raster
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
int: The total number of pixels
Author: SMM
"""
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(FileName)
return xsize*ysize
#==============================================================================
#==============================================================================
# Function to read the original file's projection:
def CheckNoData(FileName):
"""This looks through the head file of an ENVI raster and if it doesn't find the nodata line it rewrites the file to include the nodata line.
Args:
FileName (str): The filename (with path and extension) of the raster.
Return:
int: The total number of pixels (although what it is really doing is updating the header file. The return is just to check if it is working and yes I know this is stupid. )
Author: SMM
"""
if exists(FileName) is False:
raise Exception('[Errno 2] No such file or directory: \'' + FileName + '\'')
# read the file, and check if there is a no data value
SourceDS = gdal.Open(FileName, gdal.GA_ReadOnly)
if SourceDS == None:
raise Exception("Unable to read the data file")
NoDataValue = SourceDS.GetRasterBand(1).GetNoDataValue()
print("In the check nodata routine. Nodata is: ")
print(NoDataValue)
if NoDataValue == None:
print("This raster does not have no data. Updating the header file")
header_name = FileName[:-4]
| |
the variables for which del was inserted.
"""
blocks = wrap_parfor_blocks(parfor)
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap)
dead_map = compute_dead_maps(cfg, blocks, live_map, usedefs.defmap)
# treat loop variables and size variables as live
loop_vars = {
l.start.name for l in parfor.loop_nests if isinstance(
l.start, ir.Var)}
loop_vars |= {
l.stop.name for l in parfor.loop_nests if isinstance(
l.stop, ir.Var)}
loop_vars |= {
l.step.name for l in parfor.loop_nests if isinstance(
l.step, ir.Var)}
loop_vars |= {l.index_variable.name for l in parfor.loop_nests}
# for var_list in parfor.array_analysis.array_size_vars.values():
# loop_vars |= {v.name for v in var_list if isinstance(v, ir.Var)}
dead_set = set()
for label in blocks.keys():
# only kill vars that are actually dead at the parfor's block
dead_map.internal[label] &= curr_dead_set
dead_map.internal[label] -= loop_vars
dead_set |= dead_map.internal[label]
dead_map.escaping[label] &= curr_dead_set
dead_map.escaping[label] -= loop_vars
dead_set |= dead_map.escaping[label]
# dummy class to replace func_ir. _patch_var_dels only accesses blocks
class DummyFuncIR(object):
def __init__(self, blocks):
self.blocks = blocks
post_proc = postproc.PostProcessor(DummyFuncIR(blocks))
post_proc._patch_var_dels(dead_map.internal, dead_map.escaping)
unwrap_parfor_blocks(parfor)
return dead_set | loop_vars
postproc.ir_extension_insert_dels[Parfor] = parfor_insert_dels
# reorder statements to maximize fusion
def maximize_fusion(func_ir, blocks):
call_table, _ = get_call_table(blocks)
for block in blocks.values():
order_changed = True
while order_changed:
order_changed = False
i = 0
while i < len(block.body) - 2:
stmt = block.body[i]
next_stmt = block.body[i + 1]
# swap only parfors with non-parfors
# don't reorder calls with side effects (e.g. file close)
# only read-read dependencies are OK
# make sure there is no write-write, write-read dependencies
if (isinstance(
stmt, Parfor) and not isinstance(
next_stmt, Parfor) and not isinstance(
next_stmt, ir.Print)
and (not isinstance(next_stmt, ir.Assign)
or has_no_side_effect(
next_stmt.value, set(), call_table)
or guard(is_assert_equiv, func_ir, next_stmt.value))):
stmt_accesses = {v.name for v in stmt.list_vars()}
stmt_writes = get_parfor_writes(stmt)
next_accesses = {v.name for v in next_stmt.list_vars()}
next_writes = get_stmt_writes(next_stmt)
if len((stmt_writes & next_accesses)
| (next_writes & stmt_accesses)) == 0:
block.body[i] = next_stmt
block.body[i + 1] = stmt
order_changed = True
i += 1
return
def is_assert_equiv(func_ir, expr):
func_name, mod_name = find_callname(func_ir, expr)
return func_name == 'assert_equiv'
def get_parfor_writes(parfor):
assert isinstance(parfor, Parfor)
writes = set()
blocks = parfor.loop_body.copy()
blocks[-1] = parfor.init_block
for block in blocks.values():
for stmt in block.body:
writes.update(get_stmt_writes(stmt))
if isinstance(stmt, Parfor):
writes.update(get_parfor_writes(stmt))
return writes
def try_fuse(equiv_set, parfor1, parfor2):
"""try to fuse parfors and return a fused parfor, otherwise return None
"""
dprint("try_fuse trying to fuse \n", parfor1, "\n", parfor2)
# fusion of parfors with different dimensions not supported yet
if len(parfor1.loop_nests) != len(parfor2.loop_nests):
dprint("try_fuse parfors number of dimensions mismatch")
return None
ndims = len(parfor1.loop_nests)
# all loops should be equal length
def is_equiv(x, y):
return x == y or equiv_set.is_equiv(x, y)
for i in range(ndims):
nest1 = parfor1.loop_nests[i]
nest2 = parfor2.loop_nests[i]
if not (is_equiv(nest1.start, nest2.start) and
is_equiv(nest1.stop, nest2.stop) and
is_equiv(nest1.step, nest2.step)):
dprint("try_fuse parfor dimension correlation mismatch", i)
return None
# TODO: make sure parfor1's reduction output is not used in parfor2
# only data parallel loops
if has_cross_iter_dep(parfor1) or has_cross_iter_dep(parfor2):
dprint("try_fuse parfor cross iteration dependency found")
return None
# make sure parfor2's init block isn't using any output of parfor1
parfor1_body_usedefs = compute_use_defs(parfor1.loop_body)
parfor1_body_vardefs = set()
for defs in parfor1_body_usedefs.defmap.values():
parfor1_body_vardefs |= defs
init2_uses = compute_use_defs({0: parfor2.init_block}).usemap[0]
if not parfor1_body_vardefs.isdisjoint(init2_uses):
dprint("try_fuse parfor2 init block depends on parfor1 body")
return None
return fuse_parfors_inner(parfor1, parfor2)
def fuse_parfors_inner(parfor1, parfor2):
# fuse parfor2 into parfor1
# append parfor2's init block on parfor1's
parfor1.init_block.body.extend(parfor2.init_block.body)
# append parfor2's first block to parfor1's last block
parfor2_first_label = min(parfor2.loop_body.keys())
parfor2_first_block = parfor2.loop_body[parfor2_first_label].body
parfor1_first_label = min(parfor1.loop_body.keys())
parfor1_last_label = max(parfor1.loop_body.keys())
parfor1.loop_body[parfor1_last_label].body.extend(parfor2_first_block)
# add parfor2 body blocks to parfor1's except first
parfor1.loop_body.update(parfor2.loop_body)
parfor1.loop_body.pop(parfor2_first_label)
# replace parfor2 indices with parfor1's
ndims = len(parfor1.loop_nests)
index_dict = {parfor2.index_var.name: parfor1.index_var}
for i in range(ndims):
index_dict[parfor2.loop_nests[i].index_variable.name] = parfor1.loop_nests[
i].index_variable
replace_vars(parfor1.loop_body, index_dict)
# re-order labels from min to max
blocks = wrap_parfor_blocks(parfor1, entry_label=parfor1_first_label)
blocks = rename_labels(blocks)
unwrap_parfor_blocks(parfor1, blocks)
nameset = set(x.name for x in index_dict.values())
remove_duplicate_definitions(parfor1.loop_body, nameset)
parfor1.patterns.extend(parfor2.patterns)
if config.DEBUG_ARRAY_OPT_STATS:
print('Parallel for-loop #{} is fused into for-loop #{}.'.format(
parfor2.id, parfor1.id))
return parfor1
def remove_duplicate_definitions(blocks, nameset):
"""Remove duplicated definition for variables in the given nameset, which
is often a result of parfor fusion.
"""
for label, block in blocks.items():
body = block.body
new_body = []
defined = set()
for inst in body:
if isinstance(inst, ir.Assign):
name = inst.target.name
if name in nameset:
if name in defined:
continue
defined.add(name)
new_body.append(inst)
block.body = new_body
return
def has_cross_iter_dep(parfor):
# we consevatively assume there is cross iteration dependency when
# the parfor index is used in any expression since the expression could
# be used for indexing arrays
# TODO: make it more accurate using ud-chains
indices = {l.index_variable for l in parfor.loop_nests}
for b in parfor.loop_body.values():
for stmt in b.body:
# GetItem/SetItem nodes are fine since can't have expression inside
# and only simple indices are possible
if isinstance(stmt, (ir.SetItem, ir.StaticSetItem)):
continue
# tuples are immutable so no expression on parfor possible
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
op = stmt.value.op
if op in ['build_tuple', 'getitem', 'static_getitem']:
continue
# other statements can have potential violations
if not indices.isdisjoint(stmt.list_vars()):
dprint("has_cross_iter_dep found", indices, stmt)
return True
return False
def dprint(*s):
if config.DEBUG_ARRAY_OPT == 1:
print(*s)
def get_parfor_pattern_vars(parfor):
""" get the variables used in parfor pattern information
"""
out = set()
# currently, only stencil pattern has variables
for pattern in parfor.patterns:
if pattern[0] == 'stencil':
left_lengths = pattern[1][0]
right_lengths = pattern[1][1]
for v in left_lengths+right_lengths:
if isinstance(v, ir.Var):
out.add(v.name)
return out
def remove_dead_parfor(parfor, lives, arg_aliases, alias_map, typemap):
""" remove dead code inside parfor including get/sets
"""
# remove dead get/sets in last block
# FIXME: I think that "in the last block" is not sufficient in general. We might need to
# remove from any block.
last_label = max(parfor.loop_body.keys())
last_block = parfor.loop_body[last_label]
# save array values set to replace getitems
saved_values = {}
new_body = []
for stmt in last_block.body:
if (isinstance(stmt, ir.SetItem) and stmt.index.name ==
parfor.index_var.name and stmt.target.name not in lives):
saved_values[stmt.target.name] = stmt.value
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
rhs = stmt.value
if rhs.op == 'getitem' and isinstance(rhs.index, ir.Var):
if rhs.index.name == parfor.index_var.name:
# replace getitem if value saved
stmt.value = saved_values.get(rhs.value.name, rhs)
new_body.append(stmt)
last_block.body = new_body
alias_set = set(alias_map.keys())
# after getitem replacement, remove extra setitems
new_body = []
in_lives = copy.copy(lives)
for stmt in reversed(last_block.body):
# aliases of lives are also live for setitems
alias_lives = in_lives & alias_set
for v in alias_lives:
in_lives |= alias_map[v]
if (isinstance(stmt, ir.SetItem) and stmt.index.name ==
parfor.index_var.name and stmt.target.name not in in_lives):
continue
in_lives |= {v.name for v in stmt.list_vars()}
new_body.append(stmt)
new_body.reverse()
last_block.body = new_body
# process parfor body recursively
remove_dead_parfor_recursive(
parfor, lives, arg_aliases, alias_map, typemap)
# remove parfor if empty
is_empty = len(parfor.init_block.body) == 0
for block in parfor.loop_body.values():
is_empty &= len(block.body) == 0
if is_empty:
return None
return parfor
ir_utils.remove_dead_extensions[Parfor] = remove_dead_parfor
def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, typemap):
"""create a dummy function from parfor and call remove dead recursively
"""
blocks = parfor.loop_body.copy() # shallow copy is enough
first_body_block = min(blocks.keys())
assert first_body_block > 0 # we are using 0 for init block here
last_label = max(blocks.keys())
return_label = last_label + 1
loc = blocks[last_label].loc
scope = blocks[last_label].scope
blocks[return_label] = ir.Block(scope, loc)
# add dummy jump in init_block for CFG to work
blocks[0] = parfor.init_block
blocks[0].body.append(ir.Jump(first_body_block, loc))
# add lives in a dummpy return to last block to avoid their removal
tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc)
# dummy type for tuple_var
typemap[tuple_var.name] = types.containers.UniTuple(
types.intp, 2)
live_vars = [ir.Var(scope, v, loc) for v in lives]
tuple_call = ir.Expr.build_tuple(live_vars, loc)
blocks[return_label].body.append(ir.Assign(tuple_call, tuple_var, loc))
blocks[return_label].body.append(ir.Return(tuple_var, loc))
branch = ir.Branch(0, first_body_block, return_label, loc)
blocks[last_label].body.append(branch)
# args var including aliases is ok
remove_dead(blocks, arg_aliases, typemap, alias_map, arg_aliases)
typemap.pop(tuple_var.name) # remove dummy tuple type
blocks[0].body.pop() # remove dummy jump
blocks[last_label].body.pop() # remove branch
return
def find_potential_aliases_parfor(parfor, args, typemap, alias_map, arg_aliases):
blocks = wrap_parfor_blocks(parfor)
ir_utils.find_potential_aliases(
blocks, args, typemap, alias_map, arg_aliases)
unwrap_parfor_blocks(parfor)
return
ir_utils.alias_analysis_extensions[Parfor] = find_potential_aliases_parfor
def wrap_parfor_blocks(parfor, entry_label = None):
"""wrap parfor blocks for analysis/optimization like CFG"""
blocks = parfor.loop_body.copy() # shallow copy is enough
if entry_label == None:
entry_label = min(blocks.keys())
assert entry_label > 0 # | |
above that in the identifier so make sure the unique
# keyword is present
if attr[level][0] not in identifier:
raise InvalidIdentifier(
f"The Identifier is missing a unique key for " f"the '{level}' level"
)
def clear(session):
"""Delete all entries from the database.
Parameters
----------
session : sqlalchemy.orm.session.Session
The session we are using to clear the database.
"""
for instance in session.query(Instance).all():
session.delete(instance)
session.commit()
def create(db_location, echo=False):
"""Create a new database at `db_location` if one doesn't already exist.
Parameters
----------
db_location : str
The location of the database.
echo : bool, optional
Turn the sqlalchemy logging on (default ``False``).
"""
engine = create_engine(db_location, echo=echo)
# Create the tables (won't recreate tables already present)
Base.metadata.create_all(engine)
return engine
def remove_instance(instance_uid, session):
"""Remove a SOP Instance from the database.
Parameters
----------
instance_uid : pydicom.uid.UID
The (0008,0018) *SOP Instance UID* of the SOP Instance to be removed
from the database.
session : sqlalchemy.orm.session.Session
The session to use when querying the database for the instance.
"""
matches = (
session.query(Instance).filter(Instance.sop_instance_uid == instance_uid).all()
)
if matches:
session.delete(matches[0])
session.commit()
def search(model, identifier, session):
"""Search the database.
Optional keys are not supported.
Parameters
----------
model : pydicom.uid.UID
The Query/Retrieve Information Model. Supported models are:
- *Patient Root Query Retrieve Information Model* for C-FIND, C-GET
and C-MOVE
- *Study Root Query Retrieve Information Model* for C-FIND, C-GET and
C-MOVE
identifier : pydicom.dataset.Dataset
The Query/Retrieve request's *Identifier* dataset.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
Returns
-------
list of Instance
The matching database Instances.
Raises
------
ValueError
If the `identifier` is invalid.
"""
if model not in _STUDY_ROOT and model not in _PATIENT_ROOT:
raise ValueError(f"Unknown information model '{model.name}'")
# Remove all optional keys, after this only unique/required will remain
for elem in identifier:
kw = elem.keyword
if kw != "QueryRetrieveLevel" and kw not in _ATTRIBUTES:
delattr(identifier, kw)
if model in _C_GET or model in _C_MOVE:
# Part 4, C.2.2.1.2: remove required keys from C-GET/C-MOVE
for kw, value in _ATTRIBUTES.items():
if value[1] == "R" and kw in identifier:
delattr(identifier, kw)
return _search_qr(model, identifier, session)
def _search_qr(model, identifier, session):
"""Search the database using a Query/Retrieve *Identifier* query.
Parameters
----------
model : pydicom.uid.UID
Either *Patient Root Query Retrieve Information Model* or *Study Root
Query Retrieve Information Model* for C-FIND, C-GET or C-MOVE.
identifier : pydicom.dataset.Dataset
The request's *Identifier* dataset.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
Returns
-------
list of db.Instance
The Instances that match the query.
"""
# Will raise InvalidIdentifier if check failed
_check_identifier(identifier, model)
if model in _PATIENT_ROOT:
attr = _PATIENT_ROOT[model]
else:
attr = _STUDY_ROOT[model]
# Hierarchical search method: C.4.1.3.1.1
query = None
for level, keywords in attr.items():
# Keywords at current level that are in the identifier
keywords = [kw for kw in keywords if kw in identifier]
# Create query dataset for only the current level and run it
ds = Dataset()
[setattr(ds, kw, getattr(identifier, kw)) for kw in keywords]
query = build_query(ds, session, query)
if level == identifier.QueryRetrieveLevel:
break
return query.all()
def _search_range(elem, session, query=None):
"""Perform a range search for DA, DT and TM elements with '-' in them.
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
# range matching
# <date1> - <date2>: matches any date within the range, inclusive
# - <date2>: match all dates prior to and including <date2>
# <date1> -: match all dates after and including <date1>
# <time>: if Timezone Offset From UTC included, values are in specified
# date: 20060705-20060707 + time: 1000-1800 matches July 5, 10 am to
# July 7, 6 pm.
start, end = elem.value.split("-")
attr = getattr(Instance, _TRANSLATION[elem.keyword])
if not query:
query = session.query(Instance)
if start and end:
return query.filter(attr >= start, attr <= end)
elif start and not end:
return query.filter(attr >= start)
elif not start and end:
return query.filter(attr <= end)
raise ValueError("Invalid attribute value for range matching")
def _search_single_value(elem, session, query=None):
"""Perform a search using single value matching.
Single value matching shall be performed if the value of an Attribute is
non-zero length and the VR is not SQ and:
* the VR is AE, CS, LO, LT, PN, SH, ST, UC, UR or UT and contains no wild
card characters, or
* the VR is DA, TM or DT and contains a single value with no "-", or
* any other VR
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
attr = getattr(Instance, _TRANSLATION[elem.keyword])
if elem.VR == "PN":
value = str(elem.value)
else:
value = elem.value
if not query:
query = session.query(Instance)
return query.filter(attr == value)
def _search_uid_list(elem, session, query=None):
"""Search using an element containing a list of UIDs.
A match against any of the UIDs is considered a positive result.
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
if not elem.value:
return _search_universal(elem, session, query)
attr = getattr(Instance, _TRANSLATION[elem.keyword])
if not query:
query = session.query(Instance)
return query.filter(attr.in_(elem.value))
def _search_universal(elem, session, query=None):
"""Perform a universal search for empty elements.
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
# If the value is zero length then all entities shall match
if not query:
query = session.query(Instance)
return query
def _search_wildcard(elem, session, query=None):
"""Perform a wildcard search.
Parameters
----------
elem : pydicom.dataelem.DataElement
The attribute to perform the search with.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
query : sqlalchemy.orm.query.Query, optional
An existing query within which this search should be performed. If
not used then all the Instances in the database will be searched
(default).
Returns
-------
sqlalchemy.orm.query.Query
The resulting query.
"""
# Contains '*' or '?', case-sensitive if not PN
# '*' shall match any sequence of characters (incl. zero length)
# '?' shall match any single character
attr = getattr(Instance, _TRANSLATION[elem.keyword])
if elem.VR == "PN":
value = str(elem.value)
else:
value = elem.value
if value is None or value == "":
value = "*"
value = value.replace("*", "%")
value = value.replace("?", "_")
if not query:
query = session.query(Instance)
return query.filter(attr.like(value))
# Database table setup stuff
Base = declarative_base()
class Image(Base):
__tablename__ = "image"
# (0008,0018) SOP Instance UID | VR UI, VM 1, U
sop_instance_uid = Column(String(64), primary_key=True)
# (0020,0013) Instance Number | VR IS, VM 1, R
instance_number = Column(Integer)
class Instance(Base):
__tablename__ = "instance"
# Absolute path to the stored SOP Instance
filename = Column(String)
# Transfer Syntax UID of the SOP Instance
transfer_syntax_uid = Column(String(64))
sop_class_uid = Column(String(64))
patient_id = Column(String, ForeignKey("patient.patient_id"))
patient_name = Column(String, ForeignKey("patient.patient_id"))
study_instance_uid = Column(String, ForeignKey("study.study_instance_uid"))
study_date = Column(String, ForeignKey("study.study_date"))
study_time = Column(String, ForeignKey("study.study_time"))
accession_number = Column(String, ForeignKey("study.accession_number"))
study_id = Column(String, ForeignKey("study.study_id"))
series_instance_uid = Column(String, ForeignKey("series.series_instance_uid"))
modality = Column(String, ForeignKey("series.modality"))
series_number = Column(String, ForeignKey("series.series_number"))
sop_instance_uid = Column(
String,
ForeignKey("image.sop_instance_uid"),
primary_key=True,
)
instance_number = Column(String, | |
<reponame>Blank3495/aiotfm<gh_stars>0
import asyncio
import sys
import time
import traceback
from aiotfm.packet import Packet
from aiotfm.get_keys import get_keys
from aiotfm.connection import Connection
from aiotfm.player import Profile, Player
from aiotfm.tribe import Tribe
from aiotfm.message import Message, Whisper, Channel, ChannelMessage
from aiotfm.shop import Shop
from aiotfm.inventory import Inventory, InventoryItem, Trade
from aiotfm.room import Room
from aiotfm.locale import Locale
from aiotfm.errors import *
class Client:
"""Represents a client that connects to Transformice.
Two argument can be passed to the :class:`Client`.
.. _event loop: https://docs.python.org/3/library/asyncio-eventloops.html
Parameters
----------
community: Optional[:class:`int`]
Defines the community of the client. Defaults to 0 (EN community).
loop: Optional[event loop]
The `event loop`_ to use for asynchronous operations. If ``None`` is passed (defaults),
the event loop used will be ``asyncio.get_event_loop()``.
Attributes
----------
username: Optional[:class:`str`]
The bot's username received from the server. Might be None if the bot didn't log in yet.
room: Optional[:class:`aiotfm.room.Room`]
The bot's room. Might be None if the bot didn't log in yet or couldn't join any room yet.
trade: Optional[:class:`aiotfm.inventory.Trade`]
The current trade that's going on (i.e: both traders accepted it).
trades: :class:`list`[:class:`aiotfm.inventory.Trade`]
All the trades that the bot participates. Most of them might be invitations only.
inventory: Optional[:class:`aiotfm.inventory.Inventory`]
The bot's inventory. Might be None if the bot didn't log in yet or it didn't receive anything.
locale: :class:`aiotfm.locale.Locale`
The bot's locale (translations).
"""
LOG_UNHANDLED_PACKETS = False
def __init__(self, community=0, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.main = Connection('main', self, self.loop)
self.bulle = None
self._waiters = {}
self.room = None
self.trade = None
self.trades = []
self.inventory = None
self.username = None
self.locale = Locale()
self.community = community # EN
self.cp_fingerprint = 0
self._channels = []
async def received_data(self, data, connection):
"""|coro|
Dispatches the received data.
:param data: :class:`bytes` the received data.
:param connection: :class:`aiotfm.connection.Connection` the connection that received the data.
"""
self.dispatch('raw_socket', connection, Packet(data))
try:
await self.handle_packet(connection, Packet(data))
except Exception:
traceback.print_exc()
async def handle_packet(self, connection:Connection, packet:Packet):
"""|coro|
Handles the known packets and dispatches events.
Subclasses should handle only the unhandled packets from this method.
Example: ::
class Bot(aiotfm.Client):
async def handle_packet(self, conn, packet):
handled = await super().handle_packet(conn, packet.copy())
if not handled:
# Handle here the unhandled packets.
pass
:param connection: :class:`aiotfm.connection.Connection` the connection that received the packet.
:param packet: :class:`aiotfm.Packet` the packet.
:return: True if the packet got handled, False otherwise.
"""
CCC = packet.readCode()
if CCC==(1, 1): # Old packets
data = packet.readBytes().split(b'\x01')
oldCCC = tuple(data.pop(0)[:2])
self.dispatch('old_packet', connection, oldCCC, data)
return await self.handle_old_packet(connection, oldCCC, data)
elif CCC==(5, 21): # Joined room
room = self.room = Room(private=not packet.readBool(), name=packet.readUTF())
self.dispatch('joined_room', room)
elif CCC==(6, 6): # Room message
player_id = packet.read32()
username = packet.readUTF()
commu = packet.read8()
message = packet.readUTF()
self.dispatch('room_message', Message(Player(username, pid=player_id), message, commu, self))
elif CCC==(6, 20): # Server message
packet.readBool() # if False then the message will appear in the #Server channel
t_key = packet.readUTF()
t_args = [packet.readUTF() for i in range(packet.read8())]
self.dispatch('server_message', self.locale[t_key], *t_args)
elif CCC==(8, 5): # Show emoji
player = self.room.get_player(pid=packet.read32())
emoji = packet.read8()
self.dispatch('emoji', player, emoji)
elif CCC==(8, 16): # Profile
self.dispatch('profile', Profile(packet))
elif CCC==(8, 20): # Shop
self.dispatch('shop', Shop(packet))
elif CCC==(8, 22): # Skills
skills = {}
for i in range(packet.read8()):
key, value = packet.read8(), packet.read8()
skills[key] = value
self.dispatch('skills', skills)
elif CCC==(16, 2): # Tribe invitation received
author = packet.readUTF()
tribe = packet.readUTF()
self.dispatch('tribe_inv', author, tribe)
elif CCC==(26, 2): # Logged in successfully
player_id = packet.read32()
self.username = username = packet.readUTF()
played_time = packet.read32()
community = packet.read8()
pid = packet.read32()
self.dispatch('logged', player_id, username, played_time, community, pid)
elif CCC==(26, 3): # Handshake OK
online_players = packet.read32() # online players
connection.fingerprint = packet.read8()
community = packet.readUTF() # community
country = packet.readUTF() # country
self.authkey = packet.read32()
self.loop.create_task(self._heartbeat_loop())
await connection.send(Packet.new(8,2).write8(self.community).write8(0))
os_info = Packet.new(28,17).writeString('en').writeString('Linux')
os_info.writeString('LNX 29,0,0,140').write8(0)
await connection.send(os_info)
self.dispatch('login_ready', online_players, community, country)
elif CCC==(26, 12): # Login result
self.dispatch('login_result', packet.read8(), packet.readUTF(), packet.readUTF())
elif CCC==(26, 25): # Ping
self.dispatch('ping')
elif CCC==(28, 6): # Server ping
await connection.send(Packet.new(28, 6).write8(packet.read8()))
elif CCC==(29, 6): # Lua logs
self.dispatch('lua_log', packet.readUTF())
elif CCC==(31, 1): # Inventory data
self.inventory = Inventory.from_packet(packet)
self.inventory.client = self
self.dispatch('inventory_update', self.inventory)
elif CCC==(31, 2): # Update inventory item
id = packet.read16()
quantity = packet.read8()
if id in self.inventory.items:
item = self.inventory.items[id]
previous = item.quantity
item.quantity = quantity
self.dispatch('item_update', item, previous)
else:
item = InventoryItem(id=id, quantity=quantity)
self.inventory.items[item.id] = item
self.dispatch('new_item', item)
elif CCC==(31, 5): # Trade invite
player = self.room.get_player(pid=packet.read32())
trade = Trade(player, self)
self.trades.append(trade)
trade.alive = True
trade.on_invite = True
self.dispatch('trade_invite', trade)
elif CCC==(31, 6): # Trade error
name = packet.readUTF()
error = packet.read8()
if name == "":
if self.trade._other.username == name:
self.trade._close()
self.dispatch('trade_error', self.trade, error)
self.dispatch('trade_close', self.trade)
else:
for trade in self.trades:
if trade._other.username == name:
trade._close()
self.dispatch('trade_error', trade, error)
self.dispatch('trade_close', trade)
break
elif CCC==(31, 7): # Trade start
player = self.room.get_player(pid=packet.read32())
player.trade.on_invite = False
player.trade.alive = True
if self.trade is not None:
trade = self.trade
self.trade._close()
self.dispatch('trade_close', trade)
self.trade = player.trade
self.dispatch('trade_start', self.trade)
elif CCC==(31, 8): # Trade items
me = packet.readBool()
id = packet.read16()
adding = packet.readBool()
quantity = packet.read8()
quantity = (1 if adding else -1) * quantity
items = self.trade.items_me if me else self.trade.items_other
if id in items:
items[id] += quantity
else:
items[id] = quantity
if items[id] == 0:
del items[id]
self.trade.locked_me = False
self.trade.locked_other = False
self.dispatch('trade_item_change', self.trade, self if me else self.trade._other, id, quantity, items[id] if id in items else 0)
elif CCC==(31, 9): # Trade lock
if packet.readBool():
self.trade.locked_me = packet.readBool()
self.dispatch('trade_lock', self.trade, self, self.trade.locked_me)
else:
self.trade.locked_other = packet.readBool()
self.dispatch('trade_lock', self.trade, self.trade._other, self.trade.locked_other)
elif CCC==(31, 10): # Trade complete
trade = self.trade
self.trade._close()
self.dispatch('trade_complete', trade)
elif CCC==(44, 1): # Bulle switching
bulle_id = packet.read32()
bulle_ip = packet.readString().decode()
if self.bulle is not None:
self.bulle.close()
self.bulle = Connection('bulle', self, self.loop)
await self.bulle.connect(bulle_ip, self.main.address[1])
await self.bulle.send(Packet.new(*CCC).write32(bulle_id))
elif CCC==(44, 22): # Fingerprint offset changed
connection.fingerprint = packet.read8()
elif CCC==(60, 3): # Community platform
TC = packet.read16()
self.dispatch('raw_cp', TC, packet.copy(True))
if TC==3: # Connected to the community platform
self.dispatch('ready')
elif TC==55: # Channel join result
result = packet.read8()
self.dispatch('channel_joined_result', result)
elif TC==57: # Channel leave result
result = packet.read8()
self.dispatch('channel_leaved_result', result)
elif TC==59: # Channel /who result
idSequence = packet.read32()
result = packet.read8()
players = [Player(packet.readUTF()) for _ in range(packet.read16())]
self.dispatch('channel_who', idSequence, players)
elif TC==62: # Joined a channel
name = packet.readUTF()
if name in self._channels:
channel = [c for c in self._channels if c==name][0]
else:
channel = Channel(name, self)
self._channels.append(channel)
self.dispatch('channel_joined', channel)
elif TC==63: # Quit a channel
name = packet.readUTF()
if name in self._channels:
self._channels.remove(name)
self.dispatch('channel_closed', name)
elif TC==64: # Channel message
author, community = packet.readUTF(), packet.read32()
channel_name, message = packet.readUTF(), packet.readUTF()
channel = self.get_channel(channel_name)
if channel is None:
channel = Channel(channel_name, self)
self._channels.append(channel)
self.dispatch('channel_message', ChannelMessage(author, community, message, channel))
elif TC==65: # Tribe message
author, message = packet.readUTF(), packet.readUTF()
self.dispatch('tribe_message', author, message)
elif TC==66: # Whisper
author, commu, receiver, message = Player(packet.readUTF()), packet.read32(), Player(packet.readUTF()), packet.readUTF()
self.dispatch('whisper', Whisper(author, commu, receiver, message, self))
elif TC==88: # tribe member connected
self.dispatch('member_connected', packet.readUTF())
elif TC==90: # tribe member disconnected
self.dispatch('member_disconnected', packet.readUTF())
else:
if self.LOG_UNHANDLED_PACKETS:
print(CCC, TC, bytes(packet.buffer)[4:])
return False
elif CCC==(100, 67): # New inventory item
slot = packet.read8()
id = packet.read16()
quantity = packet.read8()
item = InventoryItem(id=id, quantity=quantity, slot=None if slot == 0 else slot)
self.inventory[id] = item
self.dispatch('new_item', item)
elif CCC==(144, 1): # Set player list
before = self.room.players
self.room.players = []
for player in range(packet.read16()):
self.room.players.append(Player.from_packet(packet))
for player in before:
if player.trade is not None:
after = self.room.get_player(pid=player.pid)
if after is not None:
player.trade._update_player(after)
else:
trade = player.trade
player.trade._close()
self.dispatch('trade_close', trade)
self.dispatch('bulk_player_update', before, self.room.players)
elif CCC==(144, 2): # Add a player
after = Player.from_packet(packet)
before = self.room.get_player(pid=after.pid)
self.room.players.append(after)
if before is None:
self.dispatch('player_join', after)
else:
self.room.players.remove(before)
if before.trade is not None:
before.trade._update_player(after)
self.dispatch('player_update', before, after)
else:
if self.LOG_UNHANDLED_PACKETS:
print(CCC, bytes(packet.buffer)[2:])
return False
return True
async def handle_old_packet(self, connection:Connection, oldCCC:tuple, data:list):
"""|coro|
Handles the known packets from the old protocol and dispatches events.
Subclasses should handle only the unhandled packets from this method.
Example: ::
class Bot(aiotfm.Client):
async def handle_old_packet(self, conn, oldCCC, data):
handled = await super().handle_old_packet(conn, data.copy())
if not handled:
# Handle here the unhandled packets.
pass
:param connection: :class:`aiotfm.connection.Connection` the connection that received the packet.
:param oldCCC: :class:`tuple` the packet identifiers on the old protocol.
:param data: :class:`list` the packet data.
:return: True if the packet got handled, False otherwise.
"""
if oldCCC==(8, 7): # Remove a player
player = self.room.get_player(pid=int(data[0]))
if player is not None:
self.room.players.remove(player)
if player.trade is not None:
trade = player[1].trade
player.trade._close()
self.dispatch('trade_close', trade)
self.dispatch('player_remove', player)
else:
if self.LOG_UNHANDLED_PACKETS:
print("[OLD]", oldCCC, data)
return False
return True
async def _heartbeat_loop(self):
"""|coro|
Send a packet every fifteen seconds to stay connected to the game.
"""
last_heartbeat = 0
while self.main.open:
if self.loop.time()-last_heartbeat>=15:
t = time.perf_counter()
await self.main.send(Packet.new(26, 26))
await self.main.send(Packet.new(26, 26))
if self.bulle is not None and self.bulle.open:
await self.bulle.send(Packet.new(26, 26))
self.dispatch('heartbeat', (time.perf_counter()-t)*1000)
last_heartbeat = self.loop.time()
await asyncio.sleep(.5)
def get_channel(self, name):
if name is None:
return None
for channel in self._channels:
if channel.name==name:
return channel
def event(self, coro):
"""A decorator that registers an event.
More about events [here](Events.md).
"""
name = coro.__name__
if not name.startswith('on_'):
raise InvalidEvent("'{}' isn't a correct event naming.".format(name))
if not asyncio.iscoroutinefunction(coro):
raise InvalidEvent("Couldn't register a non-coroutine function for the event {}.".format(name))
setattr(self, name, coro)
return coro
def wait_for(self, event, condition=None, timeout=None, stopPropagation=False):
"""Wait for an event.
Example: ::
@client.event
async def on_room_message(author, message):
if message=='id':
await client.sendCommand('profile '+author)
profile = await client.wait_for('on_profile', lambda p: p.username==author)
await client.sendRoomMessage('Your id: {}'.format(profile.id))
:param event: :class:`str` the event name.
:param condition: Optionnal[:class:`function`] A predicate to check what to wait for. The arguments must meet the parameters of the event being waited for.
:param timeout: Optionnal[:class:`int`] the number of seconds before raise asyncio.TimeoutError
:return: :class:`asyncio.Future` a future that you must await.
"""
event = event.lower()
future = self.loop.create_future()
if condition is None:
def condition(*a):
return True
if event not in self._waiters:
self._waiters[event] = []
self._waiters[event].append((condition, future, stopPropagation))
return asyncio.wait_for(future, timeout, loop=self.loop)
async def _run_event(self, coro, event_name, *args, **kwargs):
"""|coro|
Runs an event and handle the error if any.
:param coro: a coroutine function.
:param event_name: :class:`str` the event's name.
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
"""
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception as e:
if hasattr(self, 'on_error'):
try:
await self.on_error(event_name, e, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event, *args, **kwargs):
"""Dispatches events
:param event: :class:`str` event's name. (without 'on_')
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
"""
method = 'on_' + event
if method in self._waiters:
to_remove | |
feat2 = params
interactions = []
# Favorable cation-nucleophile interaction
if feat1.name == "Nucleophile" and feat2.name in CATIONS:
dipole_grp, dipole_type = group1, "Nucleophile"
ion_grp, ion_type = group2, "Cation"
elif feat1.name in CATIONS and feat2.name == "Nucleophile":
dipole_grp, dipole_type = group2, "Nucleophile"
ion_grp, ion_type = group1, "Cation"
# Favorable anion-electrophile interaction
elif feat1.name == "Electrophile" and feat2.name in ANIONS:
dipole_grp, dipole_type = group1, "Electrophile"
ion_grp, ion_type = group2, "Anion"
elif feat1.name in ANIONS and feat2.name == "Electrophile":
dipole_grp, dipole_type = group2, "Electrophile"
ion_grp, ion_type = group1, "Anion"
# Unfavorable anion-nucleophile interaction
elif feat1.name == "Nucleophile" and feat2.name in ANIONS:
dipole_grp, dipole_type = group1, "Nucleophile"
ion_grp, ion_type = group2, "Anion"
elif feat1.name in ANIONS and feat2.name == "Nucleophile":
dipole_grp, dipole_type = group2, "Nucleophile"
ion_grp, ion_type = group1, "Anion"
# Unfavorable cation-electrophile interaction
elif feat1.name == "Electrophile" and feat2.name in CATIONS:
dipole_grp, dipole_type = group1, "Electrophile"
ion_grp, ion_type = group2, "Cation"
elif feat1.name in CATIONS and feat2.name == "Electrophile":
dipole_grp, dipole_type = group2, "Electrophile"
ion_grp, ion_type = group1, "Cation"
else:
logger.warning("Ion-dipole interactions require a dipole and an ion group. However, the informed groups "
"have the features %s and %s." % (group1.feature_names, group2.feature_names))
return []
# A nucleophile may have only 1 atom (water oxygen).
part_charged_atm = dipole_grp.atoms[0]
# If a nucleophile has 2 atoms, it will select the partially negative atom based on the electronegativity.
if len(dipole_grp.atoms) == 2 and dipole_type == "Nucleophile":
part_charged_atm = dipole_grp.atoms[0] if (dipole_grp.atoms[0].electronegativity
> dipole_grp.atoms[1].electronegativity) else dipole_grp.atoms[1]
# If an electrophile has two atoms. It will select the partially negative atom based on the electronegativity.
elif len(dipole_grp.atoms) == 2 and dipole_type == "Electrophile":
part_charged_atm = dipole_grp.atoms[0] if (dipole_grp.atoms[0].electronegativity
< dipole_grp.atoms[1].electronegativity) else dipole_grp.atoms[1]
# Distance between the ion and the dipole.
id_dist = im.euclidean_distance(part_charged_atm.coord, ion_grp.centroid)
if (self.is_within_boundary(id_dist, "boundary_cutoff", le)
and self.is_within_boundary(id_dist, "max_id_dist_ion_multipole_inter", le)):
idy_angle = -1
if len(dipole_grp.atoms) == 2:
# Model: I ... D-Y, where I is the ion, D the dipole atom of interest (the electrophile or nucleophile),
# and Y is its counterpart.
y_atm = dipole_grp.atoms[1] if dipole_grp.atoms[0] == part_charged_atm else dipole_grp.atoms[0]
di_vect = ion_grp.centroid - part_charged_atm.coord
dy_vect = y_atm.coord - part_charged_atm.coord
idy_angle = im.angle(di_vect, dy_vect)
# Dipoles containing only one atom are allowed to pass without checking the angle IDY.
if len(dipole_grp.atoms) == 1 or self.is_within_boundary(idy_angle, "min_idy_ang_ion_multipole_inter", ge):
dipole_nb_coords = [nbi.coord for nbi in part_charged_atm.neighbors_info if nbi.atomic_num != 1]
params = {}
if len(dipole_nb_coords) > 1:
dipole_normal = im.calc_normal(dipole_nb_coords + [part_charged_atm.coord])
disp_angle = im.to_quad1(im.angle(dipole_normal, di_vect))
if self.is_within_boundary(disp_angle, "max_disp_ang_ion_multipole_inter", le):
params = {"id_dist_ion_multipole_inter": id_dist,
"idy_ang_ion_multipole_inter": idy_angle,
"disp_ang_ion_multipole_inter": disp_angle}
else:
params = {"id_dist_ion_multipole_inter": id_dist,
"idy_ang_ion_multipole_inter": idy_angle,
"disp_ang_ion_multipole_inter": -1}
if params:
if dipole_type == "Nucleophile" and ion_type == "Cation":
inter = InteractionType(dipole_grp, ion_grp, "Cation-nucleophile", params=params)
interactions.append(inter)
elif dipole_type == "Nucleophile" and ion_type == "Anion":
inter = InteractionType(dipole_grp, ion_grp, "Unfavorable anion-nucleophile", params=params)
interactions.append(inter)
elif dipole_type == "Electrophile" and ion_type == "Anion":
inter = InteractionType(ion_grp, dipole_grp, "Anion-electrophile", params=params)
interactions.append(inter)
elif dipole_type == "Electrophile" and ion_type == "Cation":
inter = InteractionType(ion_grp, dipole_grp, "Unfavorable cation-electrophile", params=params)
interactions.append(inter)
return interactions
@staticmethod
def calc_multipolar(self, params):
"""Default method to calculate favorable and unfavorable dipole-dipole interactions.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
group1, group2, feat1, feat2 = params
interactions = []
if len(group1.atoms) != 1 and len(group1.atoms) != 2 and len(group2.atoms) != 1 and len(group2.atoms) != 2:
logger.warning("A dipole group should have 1 (for cases when the atom has only hydrogens bonded to it) or 2 atoms. "
"However, the informed groups '%s' and '%s' have %d and %d atoms, respectively."
% (group1, group2, len(group1.atoms), len(group2.atoms)))
return []
# The reference dipole will always be the second one, i.e., one of its atom will be the center in the angle NEY.
#
# Favorable interactions: in these cases, the Dipole 1 will always be the nucleophile and the Dipole 2 the
# electrophile in order to represent the nucleophile atack, i.e., the angles calculated using the dipole 2 as reference
# represents how the nucleophile aproximate the electrophile.
if feat1.name == "Nucleophile" and feat2.name == "Electrophile":
dipole_grp1, dipole_type1 = group1, feat1.name
dipole_grp2, dipole_type2 = group2, feat2.name
elif feat2.name == "Nucleophile" and feat1.name == "Electrophile":
dipole_grp1, dipole_type1 = group2, feat2.name
dipole_grp2, dipole_type2 = group1, feat1.name
# Unfavorable interactions: in these cases, the reference dipole will depend on the number of atoms in the dipoles.
# With dipoles containing 1 atom, it takes a generous approach by ignoring angles and accepting everything.
# With dipoles containing two atoms, it requires that at least one of the angles fits the rules to be accepted.
elif feat1.name == feat2.name and (feat1.name == "Nucleophile" or feat1.name == "Electrophile"):
# If only one group contains 1 atom, use it as the dipole 2 because it is used as the reference to calculate
# the NEY angle. Since we take a generous approach, with one atom no angle will be calculated and the interaction
# will be accepted.
if len(group1.atoms) == 1 and len(group2.atoms) == 2:
dipole_grp1, dipole_type1 = group2, feat2.name
dipole_grp2, dipole_type2 = group1, feat1.name
# All the other number combinations ([2,1], [1,1], [2, 2]) come here.
else:
dipole_grp1, dipole_type1 = group1, feat1.name
dipole_grp2, dipole_type2 = group2, feat2.name
else:
logger.warning("Multipolar interactions require a nucleophile and an electrophile group. "
"However, the informed groups have the features %s and %s." % (group1.feature_names, group2.feature_names))
return []
# Ignore dipoles containing at least one common atom, which can happen to covalently bound dipoles.
# An example of it is the C-S-C substructure that contains two dipoles.
if any(atm in group2.atoms for atm in group1.atoms):
return []
# Atom 1 => Dipole 1
#
# A nucleophile may have only 1 atom (water oxygen).
dipole_atm1 = dipole_grp1.atoms[0]
# If it has 2 atoms, it will select the nucleophilic atom based on the electronegativity.
if len(dipole_grp1.atoms) == 2 and dipole_type1 == "Nucleophile":
dipole_atm1 = dipole_grp1.atoms[0] if (dipole_grp1.atoms[0].electronegativity
> dipole_grp1.atoms[1].electronegativity) else dipole_grp1.atoms[1]
# Or, it will select the nucleophilic atom based on the electronegativity.
elif len(dipole_grp1.atoms) == 2 and dipole_type1 == "Electrophile":
dipole_atm1 = dipole_grp1.atoms[0] if (dipole_grp1.atoms[0].electronegativity
< dipole_grp1.atoms[1].electronegativity) else dipole_grp1.atoms[1]
# Atom 2 => Dipole 2
#
# An electrophile may have only 1 atom. E.g.: NH4, although by default we consider it as an ion.
dipole_atm2 = dipole_grp2.atoms[0]
# If it has 2 atoms, it will select the nucleophilic atom based on the electronegativity.
if len(dipole_grp2.atoms) == 2 and dipole_type2 == "Nucleophile":
dipole_atm2 = dipole_grp2.atoms[0] if (dipole_grp2.atoms[0].electronegativity
> dipole_grp2.atoms[1].electronegativity) else dipole_grp2.atoms[1]
# Or, it will select the nucleophilic atom based on the electronegativity.
elif len(dipole_grp2.atoms) == 2 and dipole_type2 == "Electrophile":
dipole_atm2 = dipole_grp2.atoms[0] if (dipole_grp2.atoms[0].electronegativity
< dipole_grp2.atoms[1].electronegativity) else dipole_grp2.atoms[1]
# Model for favorable interactions: A-N ... E-Y
# Model for unfavorable interactions: A-N ... N-A, Y-E ... E-Y.
#
# Although there are two different models for unfavorable interactions, the method for them are equal to the
# favorable interaction. So, from now on, we will deal with them as if it was the first model.
#
# Distance between the nucleophile and electrophile.
ne_dist = im.euclidean_distance(dipole_atm1.coord, dipole_atm2.coord)
if (self.is_within_boundary(ne_dist, "boundary_cutoff", le)
and self.is_within_boundary(ne_dist, "max_ne_dist_multipolar_inter", le)):
# No angle can be calculated if the electrophile (dipole 2) has only one atom.
if len(dipole_grp2.atoms) == 1:
params = {"ne_dist_multipolar_inter": ne_dist,
"ney_ang_multipolar_inter": -1,
"disp_ang_multipolar_inter": -1,
"an_ey_ang_multipolar_inter": -1}
inter_type = ("Multipolar" if not dipole_type1 == dipole_type2
else "Unfavorable %s-%s" % (dipole_type1.lower(), dipole_type2.lower()))
inter = InteractionType(dipole_grp1, dipole_grp2, inter_type, directional=True, params=params)
interactions.append(inter)
else:
dipole1 = (dipole_grp1, dipole_atm1, dipole_type1)
dipole2 = (dipole_grp2, dipole_atm2, dipole_type2)
combinations = [(dipole1, dipole2)]
# For unfavorable interactions, it is necessary to evaluate each combination of dipoles. So, it can
# produce two interactions.
if | |
<reponame>karlp/KiCost
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corporation / <NAME> / <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author information.
__author__ = '<NAME>'
__webpage__ = 'https://github.com/hildogjr/'
__company__ = 'University of Campinas - Brazil'
# Libraries.
import json
import requests
import re
import sys
import os
import copy
from collections import OrderedDict
if sys.version_info[0] < 3:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
# KiCost definitions.
from ..global_vars import DEFAULT_CURRENCY, DEBUG_OVERVIEW, ERR_SCRAPE, KiCostError, W_NOINFO, NO_PRICE
from .. import DistData
# Distributors definitions.
from .distributor import distributor_class
# Uncomment for debug
# Use `debug('x + 1')` for instance.
# def debug(expression):
# frame = sys._getframe(1)
# distributor_class.logger.info(expression, '=', repr(eval(expression, frame.f_globals, frame.f_locals)))
MAX_PARTS_PER_QUERY = 20 # Maximum number of parts in a single query.
# Information to return from PartInfo KitSpace server.
QUERY_AVAIABLE_CURRENCIES = ['GBP', 'EUR', 'USD']
# DEFAULT_CURRENCY
QUERY_ANSWER = '''
mpn{manufacturer, part},
datasheet,
description,
specs{key, value},
offers(from: {DISTRIBUTORS}){
product_url,
sku {vendor, part},
description,
moq,
in_stock_quantity,
prices{''' + ','.join(QUERY_AVAIABLE_CURRENCIES) + '''}
}
'''
# Informations not used: type,specs{key, name, value},image {url, credit_string, credit_url},stock_location
QUERY_ANSWER = re.sub(r'[\s\n]', '', QUERY_ANSWER)
QUERY_PART = 'query ($input: MpnInput!) { part(mpn: $input) {' + QUERY_ANSWER + '} }'
QUERY_MATCH = 'query ($input: [MpnOrSku]!){ match(parts: $input) {' + QUERY_ANSWER + '} }'
QUERY_SEARCH = 'query ($input: String!){ search(term: $input) {' + QUERY_ANSWER + '} }'
QUERY_URL = 'https://dev-partinfo.kitspace.org/graphql'
__all__ = ['api_partinfo_kitspace']
class api_partinfo_kitspace(distributor_class):
name = 'KitSpace'
type = 'api'
enabled = True
url = 'https://kitspace.org/' # Web site API information.
API_DISTRIBUTORS = ['digikey', 'farnell', 'mouser', 'newark', 'rs', 'arrow', 'tme', 'lcsc']
DIST_TRANSLATION = { # Distributor translation.
'Digikey': 'digikey',
'Farnell': 'farnell',
'Mouser': 'mouser',
'Newark': 'newark',
'RS': 'rs',
'TME': 'tme',
'Arrow Electronics': 'arrow',
'LCSC': 'lcsc',
}
# Dict to translate KiCost field names into KitSpace distributor names
KICOST2KITSPACE_DIST = {v: k for k, v in DIST_TRANSLATION.items()}
@staticmethod
def init_dist_dict():
if api_partinfo_kitspace.enabled:
distributor_class.add_distributors(api_partinfo_kitspace.API_DISTRIBUTORS)
@staticmethod
def query(query_parts, distributors, query_type=QUERY_MATCH):
'''Send query to server and return results.'''
distributors = [api_partinfo_kitspace.KICOST2KITSPACE_DIST[d] for d in distributors]
# Allow changing the URL for debug purposes
try:
url = os.environ['KICOST_KITSPACE_URL']
except KeyError:
url = QUERY_URL
# Sort the distributors to create a reproducible query
query_type = re.sub(r'\{DISTRIBUTORS\}', '["' + '","'.join(sorted(distributors)) + '"]', query_type)
# r = requests.post(url, {"query": QUERY_SEARCH, "variables": variables}) #TODO future use for ISSUE #17
variables = '{"input":[' + ','.join(query_parts) + ']}'
# Remove all spaces, even inside the manf#
# SET comment: this is how the code always worked. Octopart (used by KitSpace) ignores spaces inside manf# codes.
variables = variables.replace(' ', '')
# Do the query using POST
data = 'query={}&variables={}'.format(quote_plus(query_type), quote_plus(variables))
distributor_class.log_request(url, data)
data = OrderedDict()
data["query"] = query_type
data["variables"] = variables
response = requests.post(url, data)
distributor_class.log_response(response)
if response.status_code == requests.codes['ok']: # 200
results = json.loads(response.text)
return results
elif response.status_code == requests.codes['not_found']: # 404
raise KiCostError('Kitspace server not found check your internet connection.', ERR_SCRAPE)
elif response.status_code == requests.codes['request_timeout']: # 408
raise KiCostError('KitSpace is not responding.', ERR_SCRAPE)
elif response.status_code == requests.codes['bad_request']: # 400
raise KiCostError('Bad request to Kitspace server probably due to an incorrect string '
'format check your `manf#` codes and contact the suport team.', ERR_SCRAPE)
elif response.status_code == requests.codes['gateway_timeout']: # 504
raise KiCostError('One of the internal Kitspace services may experiencing problems. Contact the Kitspace support.', ERR_SCRAPE)
else:
raise KiCostError('Kitspace error: ' + str(response.status_code), ERR_SCRAPE)
@staticmethod
def get_spec(data, item, default=None):
'''Get the value of `value` field of a dictionary if the `name` field identifier.
Used to get information from the JSON response.'''
for d in data['specs']:
if d['key'] == item:
value = d['value']
return value if value is not None else default
return default
@staticmethod
def get_part_info(query, parts, distributors, currency, distributors_wanted):
'''Query PartInfo for quantity/price info and place it into the parts list.
`distributors_wanted` is the list of distributors we want for each query.
`distributors` is the list of all distributors we want, in general.
This difference is because some queries are for an specific distributor.
'''
# Translate from PartInfo distributor names to the names used internally by kicost.
dist_xlate = api_partinfo_kitspace.DIST_TRANSLATION
results = api_partinfo_kitspace.query(query, distributors)
# Loop through the response to the query and enter info into the parts list.
for part_query, part, dist_want, result in zip(query, parts, distributors_wanted, results['data']['match']):
if not result:
distributor_class.logger.warning(W_NOINFO+'No information found for parts \'{}\' query `{}`'.format(part.refs, str(part_query)))
continue
# Get the information of the part.
part.datasheet = result.get('datasheet')
part.lifecycle = api_partinfo_kitspace.get_spec(result, 'lifecycle_status', 'active').lower()
# Misc data collected, currently not used inside KiCost
part.update_specs({sp['key']: (sp['key'], sp['value']) for sp in result['specs'] if sp['value']})
# Loop through the offers from various dists for this particular part.
for offer in result['offers']:
# Get the distributor who made the offer and add their
# price/qty info to the parts list if its one of the accepted distributors.
dist = dist_xlate.get(offer['sku']['vendor'], '')
if dist not in dist_want:
# Not interested in this distributor
continue
# Get the DistData for this distributor
dd = part.dd.get(dist, DistData())
# This will happen if there are not enough entries in the price/qty list.
# As a stop-gap measure, just assign infinity to the part increment.
# A better alternative may be to examine the packaging field of the offer.
part_qty_increment = float("inf")
# Get pricing information from this distributor.
dist_currency = {cur: pri for cur, pri in offer['prices'].items() if pri}
if not dist_currency:
# Some times the API returns minimum purchase 0 and a not valid `price_tiers`.
distributor_class.logger.warning(NO_PRICE+'No price information found for parts \'{}\' query `{}`'.
format(part.refs, str(part_query)))
else:
prices = None
# Get the price tiers prioritizing:
# 1) The asked currency by KiCost user;
# 2) The default currency given by `DEFAULT_CURRENCY` in root `global_vars.py`;
# 3) The first not null tiers
if currency in dist_currency:
prices = dist_currency[currency]
dd.currency = currency
elif DEFAULT_CURRENCY in dist_currency:
prices = dist_currency[DEFAULT_CURRENCY]
dd.currency = DEFAULT_CURRENCY
else:
dd.currency, prices = next(iter(dist_currency.items()))
price_tiers = {qty: float(price) for qty, price in prices}
# Combine price lists for multiple offers from the same distributor
# to build a complete list of cut-tape and reeled components.
dd.price_tiers.update(price_tiers)
# Compute the quantity increment between the lowest two prices.
# This will be used to distinguish the cut-tape from the reeled components.
if len(price_tiers) > 1:
part_break_qtys = sorted(price_tiers.keys())
part_qty_increment = part_break_qtys[1] - part_break_qtys[0]
# Select the part SKU, web page, and available quantity.
# Each distributor can have different stock codes for the same part in different
# quantities / delivery package styles: cut-tape, reel, ...
# Therefore we select and overwrite a previous selection if one of the
# following conditions is met:
# 1. We don't have a selection for this part from this distributor yet.
# 2. The MOQ is smaller than for the current selection.
# 3. The part_qty_increment for this offer smaller than that of the existing selection.
# (we prefer cut-tape style packaging over reels)
# 4. For DigiKey, we can't use part_qty_increment to distinguish between
# reel and cut-tape, so we need to look at the actual DigiKey part number.
# This procedure is made by the definition `distributors_info[dist]['ignore_cat#_re']`
# at the distributor profile.
dist_part_num = offer.get('sku', '').get('part', '')
qty_avail | |
admx_policies:
admx_policies.append(policy_item)
log.trace("Search complete: %s seconds", time.time() - start_time)
if return_not_configured:
log.trace("Gathering non configured policies")
start_time = time.time()
not_configured_policies = ALL_CLASS_POLICY_XPATH(
admx_policy_definitions, registry_class=policy_class
)
for policy_item in admx_policies:
if policy_item in not_configured_policies:
not_configured_policies.remove(policy_item)
for not_configured_policy in not_configured_policies:
not_configured_policy_namespace = not_configured_policy.nsmap[
not_configured_policy.prefix
]
if not_configured_policy_namespace not in policy_vals:
policy_vals[not_configured_policy_namespace] = {}
policy_vals[not_configured_policy_namespace][
not_configured_policy.attrib["name"]
] = "Not Configured"
if return_full_policy_names:
if not_configured_policy_namespace not in full_names:
full_names[not_configured_policy_namespace] = {}
full_names[not_configured_policy_namespace][
not_configured_policy.attrib["name"]
] = _getFullPolicyName(
policy_item=not_configured_policy,
policy_name=not_configured_policy.attrib["name"],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language,
)
log.trace(
"building hierarchy for non-configured item %s",
not_configured_policy.attrib["name"],
)
if not_configured_policy_namespace not in hierarchy:
hierarchy[not_configured_policy_namespace] = {}
hierarchy[not_configured_policy_namespace][
not_configured_policy.attrib["name"]
] = _build_parent_list(
policy_definition=not_configured_policy,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language,
)
log.trace("Gathering complete: %s seconds", time.time() - start_time)
log.trace("Examining %s policies...", len(admx_policies))
start_time = time.time()
for admx_policy in admx_policies:
this_valuename = None
this_policy_setting = "Not Configured"
element_only_enabled_disabled = True
explicit_enable_disable_value_setting = False
if "key" in admx_policy.attrib:
this_key = admx_policy.attrib["key"]
else:
log.error(
'policy item %s does not have the required "key" attribute',
admx_policy.attrib,
)
break
if "valueName" in admx_policy.attrib:
this_valuename = admx_policy.attrib["valueName"]
if "name" in admx_policy.attrib:
this_policyname = admx_policy.attrib["name"]
else:
log.error(
'policy item %s does not have the required "name" attribute',
admx_policy.attrib,
)
break
this_policynamespace = admx_policy.nsmap[admx_policy.prefix]
if (
ENABLED_VALUE_XPATH(admx_policy)
and this_policy_setting == "Not Configured"
):
# some policies have a disabled list but not an enabled list
# added this to address those issues
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(
admx_policy
):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(
admx_policy,
this_policyname,
this_key,
this_valuename,
ENABLED_VALUE_XPATH,
policy_file_data,
):
this_policy_setting = "Enabled"
log.trace(
"%s is enabled by detected ENABLED_VALUE_XPATH", this_policyname
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
if (
DISABLED_VALUE_XPATH(admx_policy)
and this_policy_setting == "Not Configured"
):
# some policies have a disabled list but not an enabled list
# added this to address those issues
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(
admx_policy,
this_policyname,
this_key,
this_valuename,
DISABLED_VALUE_XPATH,
policy_file_data,
):
this_policy_setting = "Disabled"
log.trace(
"%s is disabled by detected DISABLED_VALUE_XPATH",
this_policyname,
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
if (
ENABLED_LIST_XPATH(admx_policy)
and this_policy_setting == "Not Configured"
):
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(
admx_policy
):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(
admx_policy,
this_policyname,
this_key,
ENABLED_LIST_XPATH,
policy_file_data,
):
this_policy_setting = "Enabled"
log.trace(
"%s is enabled by detected ENABLED_LIST_XPATH", this_policyname
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
if (
DISABLED_LIST_XPATH(admx_policy)
and this_policy_setting == "Not Configured"
):
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(
admx_policy,
this_policyname,
this_key,
DISABLED_LIST_XPATH,
policy_file_data,
):
this_policy_setting = "Disabled"
log.trace(
"%s is disabled by detected DISABLED_LIST_XPATH",
this_policyname,
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
if not explicit_enable_disable_value_setting and this_valuename:
# the policy has a key/valuename but no explicit enabled/Disabled
# Value or List
# these seem to default to a REG_DWORD 1 = "Enabled" **del. = "Disabled"
if _regexSearchRegPolData(
re.escape(
_buildKnownDataSearchString(
this_key, this_valuename, "REG_DWORD", "1"
)
),
policy_file_data,
):
this_policy_setting = "Enabled"
log.trace(
"%s is enabled by no explicit enable/disable list or value",
this_policyname,
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
elif _regexSearchRegPolData(
re.escape(
_buildKnownDataSearchString(
this_key,
this_valuename,
"REG_DWORD",
None,
check_deleted=True,
)
),
policy_file_data,
):
this_policy_setting = "Disabled"
log.trace(
"%s is disabled by no explicit enable/disable list or value",
this_policyname,
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = this_policy_setting
if ELEMENTS_XPATH(admx_policy):
if element_only_enabled_disabled or this_policy_setting == "Enabled":
# TODO does this need to be modified based on the 'required' attribute?
required_elements = {}
configured_elements = {}
policy_disabled_elements = 0
for elements_item in ELEMENTS_XPATH(admx_policy):
for child_item in elements_item:
this_element_name = _getFullPolicyName(
policy_item=child_item,
policy_name=child_item.attrib["id"],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language,
)
required_elements[this_element_name] = None
child_key = child_item.attrib.get("key", this_key)
child_valuename = child_item.attrib.get(
"valueName", this_valuename
)
if etree.QName(child_item).localname == "boolean":
# https://msdn.microsoft.com/en-us/library/dn605978(v=vs.85).aspx
if child_item:
if (
TRUE_VALUE_XPATH(child_item)
and this_element_name not in configured_elements
):
if _checkValueItemParent(
child_item,
this_policyname,
child_key,
child_valuename,
TRUE_VALUE_XPATH,
policy_file_data,
):
configured_elements[
this_element_name
] = True
log.trace(
"element %s is configured true",
child_item.attrib["id"],
)
if (
FALSE_VALUE_XPATH(child_item)
and this_element_name not in configured_elements
):
if _checkValueItemParent(
child_item,
this_policyname,
child_key,
child_valuename,
FALSE_VALUE_XPATH,
policy_file_data,
):
configured_elements[
this_element_name
] = False
policy_disabled_elements = (
policy_disabled_elements + 1
)
log.trace(
"element %s is configured false",
child_item.attrib["id"],
)
# WARNING - no standard ADMX files use true/falseList
# so this hasn't actually been tested
if (
TRUE_LIST_XPATH(child_item)
and this_element_name not in configured_elements
):
log.trace("checking trueList")
if _checkListItem(
child_item,
this_policyname,
this_key,
TRUE_LIST_XPATH,
policy_file_data,
):
configured_elements[
this_element_name
] = True
log.trace(
"element %s is configured true",
child_item.attrib["id"],
)
if (
FALSE_LIST_XPATH(child_item)
and this_element_name not in configured_elements
):
log.trace("checking falseList")
if _checkListItem(
child_item,
this_policyname,
this_key,
FALSE_LIST_XPATH,
policy_file_data,
):
configured_elements[
this_element_name
] = False
policy_disabled_elements = (
policy_disabled_elements + 1
)
log.trace(
"element %s is configured false",
child_item.attrib["id"],
)
else:
if _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True,
)
),
policy_file_data,
):
configured_elements[this_element_name] = False
policy_disabled_elements = (
policy_disabled_elements + 1
)
log.trace(
"element %s is configured false",
child_item.attrib["id"],
)
elif _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False,
)
),
policy_file_data,
):
configured_elements[this_element_name] = True
log.trace(
"element %s is configured true",
child_item.attrib["id"],
)
elif (
etree.QName(child_item).localname == "decimal"
or etree.QName(child_item).localname == "text"
or etree.QName(child_item).localname == "longDecimal"
or etree.QName(child_item).localname == "multiText"
):
# https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx
if _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True,
)
),
policy_file_data,
):
configured_elements[this_element_name] = "Disabled"
policy_disabled_elements = (
policy_disabled_elements + 1
)
log.trace(
"element %s is disabled",
child_item.attrib["id"],
)
elif _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False,
)
),
policy_file_data,
):
configured_value = _getDataFromRegPolData(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False,
),
policy_file_data,
)
configured_elements[
this_element_name
] = configured_value
log.trace(
"element %s is enabled, value == %s",
child_item.attrib["id"],
configured_value,
)
elif etree.QName(child_item).localname == "enum":
if _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True,
)
),
policy_file_data,
):
log.trace(
"enum element %s is disabled",
child_item.attrib["id"],
)
configured_elements[this_element_name] = "Disabled"
policy_disabled_elements = (
policy_disabled_elements + 1
)
else:
for enum_item in child_item:
if _checkValueItemParent(
enum_item,
child_item.attrib["id"],
child_key,
child_valuename,
VALUE_XPATH,
policy_file_data,
):
if VALUE_LIST_XPATH(enum_item):
log.trace("enum item has a valueList")
if _checkListItem(
enum_item,
this_policyname,
child_key,
VALUE_LIST_XPATH,
policy_file_data,
):
log.trace(
"all valueList items exist in"
" file"
)
configured_elements[
this_element_name
] = _getAdmlDisplayName(
adml_policy_resources,
enum_item.attrib["displayName"],
)
break
else:
configured_elements[
this_element_name
] = _getAdmlDisplayName(
adml_policy_resources,
enum_item.attrib["displayName"],
)
break
elif etree.QName(child_item).localname == "list":
return_value_name = False
if (
"explicitValue" in child_item.attrib
and child_item.attrib["explicitValue"].lower()
== "true"
):
log.trace(
"explicitValue list, we will return value names"
)
return_value_name = True
regex_str = [
r"(?!\*",
r"\*",
"D",
"e",
"l",
"V",
"a",
"l",
"s",
r"\.",
")",
]
delvals_regex = "\x00".join(regex_str)
delvals_regex = salt.utils.stringutils.to_bytes(
delvals_regex
)
if _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False,
)
)
+ delvals_regex,
policy_file_data,
):
configured_value = _getDataFromRegPolData(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=False,
),
policy_file_data,
return_value_name=return_value_name,
)
configured_elements[
this_element_name
] = configured_value
log.trace(
"element %s is enabled values: %s",
child_item.attrib["id"],
configured_value,
)
elif _regexSearchRegPolData(
re.escape(
_processValueItem(
child_item,
child_key,
child_valuename,
admx_policy,
elements_item,
check_deleted=True,
)
),
policy_file_data,
):
configured_elements[this_element_name] = "Disabled"
policy_disabled_elements = (
policy_disabled_elements + 1
)
log.trace(
"element %s is disabled",
child_item.attrib["id"],
)
if element_only_enabled_disabled:
if len(required_elements.keys()) > 0 and len(
configured_elements.keys()
) == len(required_elements.keys()):
if policy_disabled_elements == len(
required_elements.keys()
):
log.trace(
"%s is disabled by all enum elements",
this_policyname,
)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = "Disabled"
else:
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = configured_elements
log.trace(
"%s is enabled by enum elements", this_policyname
)
else:
if this_policy_setting == "Enabled":
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][
this_policyname
] = configured_elements
if (
return_full_policy_names
and this_policynamespace in policy_vals
and this_policyname in policy_vals[this_policynamespace]
):
if this_policynamespace not in full_names:
full_names[this_policynamespace] = {}
full_names[this_policynamespace][this_policyname] = _getFullPolicyName(
policy_item=admx_policy,
policy_name=admx_policy.attrib["name"],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language,
)
# Make sure the we're passing the full policy name
# This issue was found when setting the `Allow | |
from __future__ import division
import io
import os
import logging
from math import ceil
import numpy as np
from sklearn.utils import Bunch
from keras.models import Sequential, Model
from keras.layers import (Conv2D, Dropout, Input, concatenate, MaxPooling2D,
Conv2DTranspose, UpSampling2D)
from keras.callbacks import (ModelCheckpoint, EarlyStopping, ReduceLROnPlateau,
TensorBoard, LambdaCallback, CSVLogger)
from keras.optimizers import Adam
from keras.layers.noise import GaussianNoise
from coindeblend.models import UNet_modular
from coindeblend.scores import jaccard_coef_int, iou
import matplotlib.pyplot as plt
import tensorflow as tf
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
buf.close()
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class ObjectDetector(object):
"""Object detector.
Parameters
----------
batch_size : int, optional
The batch size used during training. Set by default to 32 samples.
epoch : int, optional
The number of epoch for which the model will be trained. Set by default
to 50 epochs.
model_check_point : bool, optional
Whether to create a callback for intermediate models.
Attributes
----------
model_ : object
The DNN model.
params_model_ : Bunch dictionary
All hyper-parameters to build the DNN model.
"""
def __init__(self, batch_size=32, epoch=10, model_check_point=True,
filename=None, maindir=None, seed=42, plot_history=False,
display_img=None):
self.model_, self.params_model_ = self._build_model()
self.batch_size = batch_size
self.epoch = epoch
self.model_check_point = model_check_point
self.filename = filename
self.maindir = maindir
self.seed = seed
self._plot_history = plot_history
self.log = logging.getLogger(__name__)
self.test_display_img = display_img
self._init_repos()
self._write_info()
def _write_info(self):
self.log.info("")
self.log.info("\tModel summary")
self.log.info("\t=============")
self.model_.summary(print_fn=lambda x: self.log.info(f"\t{x}"))
self.log.info("")
self.log.info("\tParameters")
self.log.info("\t==========")
for k, v in self.params_model_.items():
self.log.info(f"\t{k}: {v}")
self.log.info("")
def _init_repos(self):
self.weightdir = os.path.join(self.maindir, 'weights')
self.plotdir = os.path.join(self.maindir, 'plots')
self.logdir = os.path.join(os.getenv('COIN'), 'projects', 'logs')
os.makedirs(self.weightdir, exist_ok=True)
os.makedirs(self.plotdir, exist_ok=True)
os.makedirs(self.logdir, exist_ok=True)
def load_weights(self, weights_file):
self.model_.load_weights(weights_file)
def fit(self, X, y):
# build the box encoder to later encode y to make usable in the model
train_dataset = BatchGeneratorBuilder(X, y)
train_generator, val_generator, n_train_samples, n_val_samples = \
train_dataset.get_train_valid_generators(
batch_size=self.batch_size,
valid_ratio=self.params_model_.valid_ratio)
# create the callbacks to get during fitting
callbacks = self._build_callbacks()
# fit the model
history = self.model_.fit_generator(
generator=train_generator,
steps_per_epoch=ceil(n_train_samples / self.batch_size),
epochs=self.epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(n_val_samples / self.batch_size))
if self._plot_history:
self.plot_history(history)
def predict(self, X):
if X.ndim == 3:
X = np.expand_dims(X, -1)
return self.model_.predict(X)
def predict_score(self, X, y_true):
if X.ndim == 3:
X = np.expand_dims(X, -1)
if y_true.ndim == 3:
y_true = np.expand_dims(y_true, -1)
y_pred = self.model_.predict(X)
return iou(y_true, y_pred)
def plot_history(self, history):
import matplotlib.pyplot as plt
self.log.info(" => Creating plots..")
plt.figure()
plt.semilogy(history.epoch, history.history['loss'], label='train loss')
plt.semilogy(
history.epoch, history.history['val_loss'], label='valid loss')
plt.title('Loss history')
plt.legend()
plt.tight_layout()
loss_plot_file = os.path.join(
self.plotdir, f"{self.filename}_train_history.png")
plt.savefig(loss_plot_file)
plt.close()
self.log.info(f"\tplot saved in {loss_plot_file}")
plt.figure()
plt.plot(history.epoch, history.history['acc'], label='train accuracy')
plt.plot(history.epoch, history.history['val_acc'], label='valid accuracy')
plt.title('Accuracy history')
plt.legend()
plt.tight_layout()
accuracy_plot_file = os.path.join(
self.plotdir, f"{self.filename}_train_accuracy.png")
plt.savefig(accuracy_plot_file)
plt.close()
self.log.info(f"\tplot saved in {accuracy_plot_file}")
###########################################################################
# Setup model
@staticmethod
def _init_params_model():
params_model = Bunch()
# image and class parameters
params_model.img_rows = 128
params_model.img_cols = 128
params_model.img_channels = 1
# ARCHITECTURE PARAMS
# they depend exclusively on your model
# the names can be changed since they will only be called by your model
params_model.output_channels = 3
params_model.depth = 6
params_model.init_filt_size = 64
params_model.dropout_rate = 0.3
# LOSS
# this is basically the metric for optimizing your model
# this needs to be selected in accordance to the task you want to achieve
params_model.keras_loss = 'binary_crossentropy'
# params_model.keras_loss = 'mse'
# OPTIMIZER PARAMS
# these values are good starting point
# you should not change them during the first runs.
params_model.lr = 1e-4
params_model.beta_1 = 0.9
params_model.beta_2 = 0.999
params_model.epsilon = 1e-08
params_model.decay = 5e-05
params_model.valid_ratio = 0.2
# callbacks parameters
# params_model.early_stopping = True
params_model.early_stopping = False
params_model.es_patience = 12
params_model.es_min_delta = 0.001
params_model.reduce_learning_rate = True
params_model.lr_patience = 5
params_model.lr_factor = 0.5
params_model.lr_min_delta = 0.001
params_model.lr_cooldown = 2
params_model.tensorboard = True
params_model.tb_write_grads = True
return params_model
def _build_model(self):
# load the parameter for the SSD model
params_model = self._init_params_model()
#######################################################################
#
# --- CHANGE HERE ---
#
# The deep neural network model can be imported from an external file
# like here or be defined right below.
model = UNet_modular(
input_shape=(params_model.img_rows,
params_model.img_cols,
params_model.img_channels),
output_channels=params_model.output_channels,
depth=params_model.depth,
filt_size=params_model.init_filt_size,
dropout_rate=params_model.dropout_rate)
optimizer = Adam(lr=params_model.lr)
#
#
#######################################################################
model.compile(optimizer=optimizer,
loss=params_model.keras_loss,
metrics=['acc'])
return model, params_model
def _build_callbacks(self):
logdir = os.path.join(self.logdir, f"{self.filename}")
callbacks = []
epoch_logger = LambdaCallback(
on_epoch_begin=lambda epoch, logs: self.log.info(
f"\t\tStarting Epoch {epoch}/{self.epoch}"))
callbacks.append(epoch_logger)
csv_logger = CSVLogger(os.path.join(self.plotdir, 'history.csv'))
callbacks.append(csv_logger)
if self.model_check_point:
wdir = os.path.join(self.weightdir, f'{self.filename}_weights_best.h5')
callbacks.append(
ModelCheckpoint(wdir,
monitor='val_loss',
save_best_only=True,
save_weights_only=True,
period=1,
verbose=1))
# add early stopping
if self.params_model_.early_stopping:
callbacks.append(
EarlyStopping(monitor='val_loss',
min_delta=self.params_model_.es_min_delta,
patience=self.params_model_.es_patience,
verbose=1))
# reduce learning-rate when reaching plateau
if self.params_model_.reduce_learning_rate:
callbacks.append(
ReduceLROnPlateau(monitor='val_loss',
factor=self.params_model_.lr_factor,
patience=self.params_model_.lr_patience,
cooldown=self.params_model_.lr_cooldown,
# min_delta=self.params_model_.lr_min_delta,
verbose=1))
if self.params_model_.tensorboard:
callbacks.append(
TensorBoard(log_dir=logdir,
write_grads=self.params_model_.tb_write_grads,
batch_size=self.batch_size)
)
return callbacks
###############################################################################
# Batch generator
class BatchGeneratorBuilder(object):
"""A batch generator builder for generating batches of images on the fly.
This class is a way to build training and
validation generators that yield each time a tuple (X, y) of mini-batches.
The generators are built in a way to fit into keras API of `fit_generator`
(see https://keras.io/models/model/).
The fit function from `Classifier` should then use the instance
to build train and validation generators, using the method
`get_train_valid_generators`
Parameters
==========
X_array : ArrayContainer of int
vector of image data to train on
y_array : vector of int
vector of object labels corresponding to `X_array`
"""
def __init__(self, X_array, y_array):
self.X_array = X_array
self.y_array = y_array
self.nb_examples = len(X_array)
self.X_single_channel = X_array.ndim == 3
self.y_single_channel = y_array.ndim == 3
def get_train_valid_generators(self, batch_size=256, valid_ratio=0.1):
"""Build train and valid generators for keras.
This method is used by the user defined `Classifier` to o build train
and valid generators that will be used in keras `fit_generator`.
Parameters
==========
batch_size : int
size of mini-batches
valid_ratio : float between 0 and 1
ratio of validation data
Returns
=======
a 4-tuple (gen_train, gen_valid, nb_train, nb_valid) where:
- gen_train is a generator function for training data
- gen_valid is a generator function for valid data
- nb_train is the number of training examples
- nb_valid is the number of validation examples
The number of training and validation data are necessary
so that we can use the keras method `fit_generator`.
"""
nb_valid = int(valid_ratio * self.nb_examples)
nb_train = self.nb_examples - nb_valid
indices = np.arange(self.nb_examples)
train_indices = indices[0:nb_train]
valid_indices = indices[nb_train:]
gen_train = self._get_generator(
indices=train_indices, batch_size=batch_size)
gen_valid = self._get_generator(
indices=valid_indices, batch_size=batch_size)
return gen_train, gen_valid, nb_train, nb_valid
def _get_generator(self, indices=None, batch_size=32):
if indices is None:
indices = np.arange(self.nb_examples)
# Infinite loop, as required by keras `fit_generator`.
# However, as we provide the number of examples per epoch
# and the user specifies the total number of epochs, it will
# be able to end.
while True:
X = self.X_array[indices]
y = self.y_array[indices]
# converting to float needed?
X = np.array(X, dtype='float32')
y = np.array(y, dtype='float32')
# Yielding mini-batches
for i in range(0, len(X), batch_size):
if self.X_single_channel:
X_batch = [np.expand_dims(img, -1)
for img in X[i:i + batch_size]]
else:
X_batch = [img for img in X[i:i + batch_size]]
if self.y_single_channel:
y_batch = [np.expand_dims(seg, -1)
for seg in y[i:i + batch_size]]
else:
y_batch = [seg for seg in y[i:i + batch_size]]
for j in range(len(X_batch)):
# flip images
if np.random.randint(2):
X_batch[j] = np.flip(X_batch[j], axis=0)
y_batch[j] = np.flip(y_batch[j], axis=0)
if np.random.randint(2):
X_batch[j] = np.flip(X_batch[j], axis=1)
y_batch[j] = np.flip(y_batch[j], axis=1)
# TODO add different data augmentation steps
yield np.array(X_batch), np.array(y_batch)
def main():
import sys
import time
try:
extra_arg = sys.argv[1]
except IndexError:
extra_arg = ""
if extra_arg in ['--help', '-h']:
print("\nUsage:\n"
f"\tpython {sys.argv[0]} <name/id>")
sys.exit()
filename = os.path.splitext(sys.argv[0])[0]
job_id = "_".join([f"{filename}"] + sys.argv[1:])
maindir = os.path.dirname(os.path.abspath(__file__))
datadir = os.getenv("COINBLEND_DATADIR")
workdir = os.path.join(maindir, "jobs", job_id)
logfile = os.path.join(workdir, "run.log")
resfile = os.path.join(maindir, 'results.csv')
modelfile = os.path.join(workdir, "model.json")
fullmodelfile = os.path.join(workdir, "fullmodel.h5")
predictionfile = os.path.join(workdir, "test_predictions.npy")
if os.path.exists(workdir):
print("\n --== WARNING ==--")
print(f"Directory 'jobs/{job_id}' already existing")
print("Job aborting..")
sys.exit()
os.makedirs(workdir)
logging.basicConfig(filename=logfile, level=logging.INFO)
log = logging.getLogger(__name__)
log.info(" | |
from datetime import datetime
from importlib import import_module
import inspect
import json
import logging
from multiprocessing import Process
from psycopg2.extras import execute_batch
import sys
import time
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.auth.models import AbstractUser, Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core import mail
from django.core.validators import FileExtensionValidator
from django.db import models, DEFAULT_DB_ALIAS, connections, transaction
from django.db.models import Q
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from django import forms
from django.forms.models import modelform_factory
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.html import mark_safe, escape
from django.utils.translation import gettext_lazy as _
from django.utils.text import capfirst
from .fields import JSONBField
from .. import runFunction
logger = logging.getLogger(__name__)
class HierarchyModel(models.Model):
lft = models.PositiveIntegerField(
db_index=True, editable=False, null=True, blank=True
)
rght = models.PositiveIntegerField(null=True, editable=False, blank=True)
lvl = models.PositiveIntegerField(null=True, editable=False, blank=True)
name = models.CharField(
_("name"), max_length=300, primary_key=True, help_text=_("Unique identifier")
)
owner = models.ForeignKey(
"self",
verbose_name=_("owner"),
null=True,
blank=True,
related_name="xchildren",
help_text=_("Hierarchical parent"),
on_delete=models.SET_NULL,
)
def save(self, *args, **kwargs):
# Trigger recalculation of the hieracrhy.
# TODO this triggers the recalculation in too many cases, including a lot
# of changes which don't require it. Alternative solution is to use the
# pre-save signal which has more information.
self.lft = None
self.rght = None
self.lvl = None
# Call the real save() method
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
try:
# Update an arbitrary other object to trigger recalculation of the hierarchy
obj = self.__class__.objects.using(self._state.db).exclude(pk=self.pk)[0]
obj.lft = None
obj.rght = None
obj.lvl = None
obj.save(update_fields=["lft", "rght", "lvl"], using=self._state.db)
except Exception:
# Failure can happen when eg we delete the last record
pass
# Call the real delete() method
super().delete(*args, **kwargs)
class Meta:
abstract = True
@classmethod
def rebuildHierarchy(cls, database=DEFAULT_DB_ALIAS):
# Verify whether we need to rebuild or not.
# We search for the first record whose lft field is null.
if len(cls.objects.using(database).filter(lft__isnull=True)[:1]) == 0:
return
nodes = {}
children = {}
updates = []
def tagChildren(me, left, level):
right = left + 1
# Get all children of this node
for i in children.get(me, []):
# Recursive execution of this function for each child of this node
right = tagChildren(i, right, level + 1)
# After processing the children of this node now know its left and right values
updates.append((left, right, level, me))
# Remove from node list (to mark as processed)
del nodes[me]
# Return the right value of this node + 1
return right + 1
# Load all nodes in memory
for i in cls.objects.using(database).values("name", "owner"):
if i["name"] == i["owner"]:
logging.error("Data error: '%s' points to itself as owner" % i["name"])
nodes[i["name"]] = None
else:
nodes[i["name"]] = i["owner"]
if i["owner"]:
if not i["owner"] in children:
children[i["owner"]] = set()
children[i["owner"]].add(i["name"])
keys = sorted(nodes.items())
# Loop over nodes without parent
cnt = 1
for i, j in keys:
if j is None:
cnt = tagChildren(i, cnt, 0)
if nodes:
# If the nodes dictionary isn't empty, it is an indication of an
# invalid hierarchy.
# There are loops in your hierarchy, ie parent-chains not ending
# at a top-level node without parent.
bad = nodes.copy()
updated = True
while updated:
updated = False
for i in bad.keys():
ok = True
for j, k in bad.items():
if k == i:
ok = False
break
if ok:
# If none of the bad keys points to me as a parent, I am unguilty
del bad[i]
updated = True
logging.error("Data error: Hierarchy loops among %s" % sorted(bad.keys()))
for i, j in sorted(bad.items()):
children[j].remove(i)
nodes[i] = None
# Continue loop over nodes without parent
keys = sorted(nodes.items())
for i, j in keys:
if j is None:
cnt = tagChildren(i, cnt, 0)
# Write all results to the database
with transaction.atomic(using=database):
cursor = connections[database].cursor()
execute_batch(
cursor,
"update %s set lft=%%s, rght=%%s, lvl=%%s where name = %%s"
% connections[database].ops.quote_name(cls._meta.db_table),
updates,
)
@classmethod
def createRootObject(cls, database=DEFAULT_DB_ALIAS):
"""
Rebuilds the hierarchy, and also assures we only have a single root object
"""
# Rebuild hierarchy
cls.rebuildHierarchy(database=database)
# Create root
roots = cls.objects.using(database).filter(lvl=0).count()
if roots != 1:
# create a 'All dimensions' item (that might already be there)
rootname = "All %ss" % cls._meta.db_table
obj, created = cls.objects.using(database).get_or_create(name=rootname)
if created:
obj.description = "Automatically created root object"
obj.save()
else:
# This is to force hierarchy rebuild that may not occur as all lft values are populated.
obj.lft = None
obj.save(update_fields=["lft"])
cls.objects.using(database).filter(owner__isnull=True).exclude(
name=rootname
).update(owner=obj)
# Rebuild the hierarchy again with the new root
cls.rebuildHierarchy(database=database)
class MultiDBManager(models.Manager):
def get_queryset(self):
from .middleware import _thread_locals
req = getattr(_thread_locals, "request", None)
if req:
return (
super().get_queryset().using(getattr(req, "database", DEFAULT_DB_ALIAS))
)
else:
db = getattr(_thread_locals, "database", None)
return super().get_queryset().using(db or DEFAULT_DB_ALIAS)
class MultiDBRouter:
def db_for_read(self, model, **hints):
from .middleware import _thread_locals
req = getattr(_thread_locals, "request", None)
if req:
return getattr(req, "database", None)
else:
return getattr(_thread_locals, "database", None)
def db_for_write(self, model, **hints):
from .middleware import _thread_locals
req = getattr(_thread_locals, "request", None)
if req:
return getattr(req, "database", None)
else:
return getattr(_thread_locals, "database", None)
class AuditModel(models.Model):
"""
This is an abstract base model.
It implements the capability to maintain:
- the date of the last modification of the record.
- a string intended to describe the source system that supplied the record
"""
# Database fields
source = models.CharField(
_("source"), db_index=True, max_length=300, null=True, blank=True
)
lastmodified = models.DateTimeField(
_("last modified"), editable=False, db_index=True, default=timezone.now
)
objects = MultiDBManager() # The default manager.
def save(self, *args, **kwargs):
# Update the field with every change
self.lastmodified = datetime.now()
# Call the real save() method
super().save(*args, **kwargs)
class Meta:
abstract = True
class Parameter(AuditModel):
# Database fields
name = models.CharField(_("name"), max_length=60, primary_key=True)
value = models.CharField(_("value"), max_length=1000, null=True, blank=True)
description = models.CharField(
_("description"), max_length=1000, null=True, blank=True
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "common_parameter"
verbose_name = _("parameter")
verbose_name_plural = _("parameters")
@staticmethod
def getValue(key, database=DEFAULT_DB_ALIAS, default=None):
try:
return Parameter.objects.using(database).only("value").get(pk=key).value
except Exception:
return default
class Scenario(models.Model):
scenarioStatus = (("free", _("free")), ("in use", _("in use")), ("busy", _("busy")))
# Database fields
name = models.CharField(_("name"), max_length=300, primary_key=True)
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
status = models.CharField(
_("status"), max_length=10, null=False, blank=False, choices=scenarioStatus
)
lastrefresh = models.DateTimeField(_("last refreshed"), null=True, editable=False)
help_url = models.URLField("help", null=True, editable=False)
def __str__(self):
return self.name
@staticmethod
def syncWithSettings():
try:
# Bring the scenario table in sync with settings.databases
with transaction.atomic(savepoint=False):
dbs = [i for i, j in settings.DATABASES.items() if j["NAME"]]
scs = []
for sc in Scenario.objects.using(DEFAULT_DB_ALIAS):
if sc.name not in dbs:
sc.delete()
else:
scs.append(sc.name)
for db in dbs:
if db not in scs:
if db == DEFAULT_DB_ALIAS:
Scenario(
name=db, status="In use", description="Production"
).save(using=DEFAULT_DB_ALIAS)
else:
Scenario(name=db, status="Free").save(
using=DEFAULT_DB_ALIAS
)
except Exception:
# Failures are acceptable - eg when the default database has not been intialized yet
pass
def __lt__(self, other):
# Default database is always first in the list
if self.name == DEFAULT_DB_ALIAS:
return True
elif other.name == DEFAULT_DB_ALIAS:
return False
# Other databases are sorted by their description
return (self.description or self.name) < (other.description or other.name)
class Meta:
db_table = "common_scenario"
default_permissions = ("copy", "release", "promote")
verbose_name_plural = _("scenarios")
verbose_name = _("scenario")
ordering = ["name"]
class User(AbstractUser):
languageList = tuple(
[("auto", _("Detect automatically"))] + list(settings.LANGUAGES)
)
language = models.CharField(
_("language"), max_length=10, choices=languageList, default="auto"
)
theme = models.CharField(
_("theme"),
max_length=20,
default=settings.DEFAULT_THEME,
choices=[(i, capfirst(i)) for i in settings.THEMES],
)
pagesize = models.PositiveIntegerField(
_("page size"), default=settings.DEFAULT_PAGESIZE
)
horizonbuckets = models.CharField(max_length=300, blank=True, null=True)
horizonstart = models.DateTimeField(blank=True, null=True)
horizonend = models.DateTimeField(blank=True, null=True)
horizontype = models.BooleanField(blank=True, default=True)
horizonlength = models.IntegerField(blank=True, default=6, null=True)
horizonbefore = models.IntegerField(blank=True, default=0, null=True)
horizonunit = models.CharField(
blank=True,
max_length=5,
default="month",
null=True,
choices=(("day", "day"), ("week", "week"), ("month", "month")),
)
avatar = models.ImageField(null=True, blank=True)
lastmodified = models.DateTimeField(
_("last modified"),
auto_now=True,
null=True,
blank=True,
editable=False,
db_index=True,
)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
"""
Every change to a user model is saved to all active scenarios.
The is_superuser and is_active fields can be different in each scenario.
All other fields are expected to be identical in each database.
Because of the logic in this method creating users directly in the
database tables is NOT a good idea!
"""
# We want to automatically give | |
#!/usr/bin/env python
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Main front-end to the tests of Nuitka.
Has many options, read --help output.
"""
import os
import subprocess
import sys
import tempfile
from optparse import OptionParser
from nuitka.freezer.Onefile import checkOnefileReadiness
from nuitka.tools.Basics import goHome
from nuitka.tools.testing.Common import my_print, withExtendedExtraOptions
from nuitka.utils.Execution import (
check_call,
check_output,
getExecutablePath,
getPythonExePathWindows,
)
from nuitka.utils.FileOperations import withDirectoryChange
from nuitka.utils.Timing import TimerReport
from nuitka.utils.Utils import getOS, hasOnefileSupportedOS
def parseOptions():
# There are freaking many options to honor,
# pylint: disable=too-many-branches,too-many-statements
parser = OptionParser()
parser.add_option(
"--skip-basic-tests",
action="store_false",
dest="basic_tests",
default=True,
help="""\
The basic tests, execute these to check if Nuitka is healthy.
Default is %default.""",
)
parser.add_option(
"--skip-syntax-tests",
action="store_false",
dest="syntax_tests",
default=True,
help="""\
The syntax tests, execute these to check if Nuitka handles Syntax errors fine.
Default is %default.""",
)
parser.add_option(
"--skip-program-tests",
action="store_false",
dest="program_tests",
default=True,
help="""\
The programs tests, execute these to check if Nuitka handles programs, e.g.
import recursions, etc. fine. Default is %default.""",
)
parser.add_option(
"--skip-package-tests",
action="store_false",
dest="package_tests",
default=True,
help="""\
The packages tests, execute these to check if Nuitka handles packages, e.g.
import recursions, etc. fine. Default is %default.""",
)
parser.add_option(
"--skip-plugins-tests",
action="store_false",
dest="plugin_tests",
default=True,
help="""\
The plugins tests, execute these to check if Nuitka handles its own plugin
interfaces, e.g. user plugins, etc. fine. Default is %default.""",
)
parser.add_option(
"--skip-optimizations-tests",
action="store_false",
dest="optimization_tests",
default=True,
help="""\
The optimization tests, execute these to check if Nuitka does optimize certain
constructs fully away. Default is %default.""",
)
parser.add_option(
"--skip-standalone-tests",
action="store_false",
dest="standalone_tests",
default=getOS() != "NetBSD",
help="""\
The standalone tests, execute these to check if Nuitka standalone mode, e.g.
not referring to outside, important 3rd library packages like PyQt fine.
Default is %default.""",
)
parser.add_option(
"--skip-onefile-tests",
action="store_false",
dest="onefile_tests",
default=hasOnefileSupportedOS(),
help="""\
The onefile tests, execute these to check if Nuitka works in onefile mode, e.g.
not referring to outside, important 3rd library packages like PyQt fine.
Default is %default.""",
)
parser.add_option(
"--skip-reflection-test",
action="store_false",
dest="reflection_test",
default=True,
help="""\
The reflection test compiles Nuitka with Nuitka, and then Nuitka with the
compile Nuitka and compares the outputs. Default is %default.""",
)
parser.add_option(
"--skip-cpython26-tests",
action="store_false",
dest="cpython26",
default=True,
help="""\
The standard CPython2.6 test suite. Execute this for all corner cases to be
covered. With Python 2.7 this covers exception behavior quite well. Default
is %default.""",
)
parser.add_option(
"--skip-cpython27-tests",
action="store_false",
dest="cpython27",
default=True,
help="""\
The standard CPython2.7 test suite. Execute this for all corner cases to be
covered. With Python 2.6 these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython32-tests",
action="store_false",
dest="cpython32",
default=True,
help="""\
The standard CPython3.2 test suite. Execute this for all corner cases to be
covered. With Python 2.6 these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython33-tests",
action="store_false",
dest="cpython33",
default=True,
help="""\
The standard CPython3.3 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython34-tests",
action="store_false",
dest="cpython34",
default=True,
help="""\
The standard CPython3.4 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython35-tests",
action="store_false",
dest="cpython35",
default=True,
help="""\
The standard CPython3.5 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython36-tests",
action="store_false",
dest="cpython36",
default=True,
help="""\
The standard CPython3.6 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython37-tests",
action="store_false",
dest="cpython37",
default=True,
help="""\
The standard CPython3.7 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython38-tests",
action="store_false",
dest="cpython38",
default=True,
help="""\
The standard CPython3.8 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-cpython39-tests",
action="store_false",
dest="cpython39",
default=True,
help="""\
The standard CPython3.9 test suite. Execute this for all corner cases to be
covered. With Python 2.x these are not run. Default is %default.""",
)
parser.add_option(
"--skip-other-cpython-tests",
action="store_true",
dest="cpython_no_other",
default=False,
help="""\
Do not execute any CPython test suite other than the one matching the running
Python. Default is %default.""",
)
parser.add_option(
"--skip-all-cpython-tests",
action="store_true",
dest="cpython_none",
default=False,
help="""\
Do not execute any CPython test suite other than the one matching the running
Python. Default is %default.""",
)
parser.add_option(
"--no-other-python",
action="store_true",
dest="no_other",
default=False,
help="""\
Do not use any other Python than the one running, even if available on
the system. Default is %default.""",
)
parser.add_option(
"--no-python2.6",
action="store_true",
dest="no26",
default=False,
help="""\
Do not use Python2.6 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python2.7",
action="store_true",
dest="no27",
default=False,
help="""\
Do not use Python2.7 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.3",
action="store_true",
dest="no33",
default=False,
help="""\
Do not use Python3.3 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.4",
action="store_true",
dest="no34",
default=False,
help="""\
Do not use Python3.4 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.5",
action="store_true",
dest="no35",
default=False,
help="""\
Do not use Python3.5 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.6",
action="store_true",
dest="no36",
default=False,
help="""\
Do not use Python3.6 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.7",
action="store_true",
dest="no37",
default=False,
help="""\
Do not use Python3.7 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.8",
action="store_true",
dest="no38",
default=False,
help="""\
Do not use Python3.8 even if available on the system. Default is %default.""",
)
parser.add_option(
"--no-python3.9",
action="store_true",
dest="no39",
default=False,
help="""\
Do not use Python3.9 even if available on the system. Default is %default.""",
)
parser.add_option(
"--coverage",
action="store_true",
dest="coverage",
default=False,
help="""\
Make a coverage analysis, that does not really check. Default is %default.""",
)
parser.add_option(
"--no-debug",
action="store_false",
dest="debug",
default=True,
help="""\
Make a coverage analysis, that does not really check. Default is %default.""",
)
parser.add_option(
"--assume-yes-for-downloads",
action="store_true",
dest="assume_yes_for_downloads",
default=False,
help="""\
Allow Nuitka to download code if necessary, e.g. dependency walker on Windows. Default is %default.""",
)
parser.add_option(
"--mingw64",
action="store_true",
dest="mingw64",
default=False,
help="""\
Enforce the use of MinGW64 on Windows. Defaults to off.""",
)
options, positional_args = parser.parse_args()
if positional_args:
parser.print_help()
sys.exit("\nError, no positional argument allowed.")
if options.no_other:
if sys.version_info[0:2] != (2, 6):
options.no26 = True
if sys.version_info[0:2] != (2, 7):
options.no27 = True
if sys.version_info[0:2] != (3, 3):
options.no33 = True
if sys.version_info[0:2] != (3, 4):
options.no34 = True
if sys.version_info[0:2] != (3, 5):
options.no35 = True
if sys.version_info[0:2] != (3, 6):
options.no36 = True
if sys.version_info[0:2] != (3, 7):
options.no37 = True
if sys.version_info[0:2] != (3, 8):
options.no38 = True
if sys.version_info[0:2] != (3, 9):
options.no39 = True
if options.cpython_no_other:
if sys.version_info[0:2] != (2, 6):
options.cpython26 = False
if sys.version_info[0:2] != (2, 7):
options.cpython27 = False
if sys.version_info[0:2] != (3, 2):
options.cpython32 = False
if sys.version_info[0:2] != (3, 3):
options.cpython33 = False
if sys.version_info[0:2] != (3, 4):
options.cpython34 = False
if sys.version_info[0:2] != (3, 5):
options.cpython35 = False
if sys.version_info[0:2] != (3, 6):
options.cpython36 = False
if sys.version_info[0:2] != (3, 7):
options.cpython37 = False
if sys.version_info[0:2] != (3, 8):
options.cpython38 = False
if sys.version_info[0:2] != (3, 9):
options.cpython39 = False
if options.cpython_none:
options.cpython26 = False
options.cpython27 = False
options.cpython32 = False
options.cpython33 = False
options.cpython34 = False
options.cpython35 = False
options.cpython36 = False
options.cpython37 = False
options.cpython38 = False
options.cpython39 = False
if options.coverage and os.path.exists(".coverage"):
os.unlink(".coverage")
return options
def publishCoverageData():
def copyToGlobalCoverageData(source, target):
coverage_dir = os.environ.get("COVERAGE_DIR")
if coverage_dir is None:
return
check_call(("scp", source, os.path.join(coverage_dir, target)))
if os.name == "nt":
suffix = "win"
else:
import platform
suffix = platform.uname()[0] + "." + platform.uname()[4]
with open("data.coverage", "w") as data_file:
source_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
with withDirectoryChange(source_dir):
nuitka_id = check_output("git rev-parse HEAD".split())
nuitka_id = nuitka_id.strip()
if sys.version_info > (3,):
nuitka_id = nuitka_id.decode()
data_file.write("NUITKA_SOURCE_DIR=%r\n" % source_dir)
data_file.write("NUITKA_COMMIT=%r\n" % nuitka_id)
copyToGlobalCoverageData("data.coverage", "meta.coverage." + suffix)
def makeCoverageRelative(filename):
"""Normalize coverage data."""
with open(filename) | |
int
numel_in(self, str iname) -> int
For a particular input or for all of the inputs
"""
return _casadi.Function_numel_in(self, *args)
def numel_out(self, *args):
"""
Get number of output elements.
numel_out(self) -> int
numel_out(self, int ind) -> int
numel_out(self, str oname) -> int
For a particular output or for all of the outputs
"""
return _casadi.Function_numel_out(self, *args)
def name_in(self, *args):
"""
Get input scheme name by index.
name_in(self) -> [str]
Get input scheme.
name_in(self, int ind) -> str
> name_in(self, int ind)
------------------------------------------------------------------------
Get input scheme name by index.
> name_in(self)
------------------------------------------------------------------------
Get input scheme.
"""
return _casadi.Function_name_in(self, *args)
def name_out(self, *args):
"""
Get output scheme name by index.
name_out(self) -> [str]
Get output scheme.
name_out(self, int ind) -> str
> name_out(self, int ind)
------------------------------------------------------------------------
Get output scheme name by index.
> name_out(self)
------------------------------------------------------------------------
Get output scheme.
"""
return _casadi.Function_name_out(self, *args)
def index_in(self, *args):
"""
Find the index for a string describing a particular entry of an input
index_in(self, str name) -> int
scheme.
example: schemeEntry("x_opt") -> returns NLPSOL_X if FunctionInternal
adheres to SCHEME_NLPINput
"""
return _casadi.Function_index_in(self, *args)
def index_out(self, *args):
"""
Find the index for a string describing a particular entry of an output
index_out(self, str name) -> int
scheme.
example: schemeEntry("x_opt") -> returns NLPSOL_X if FunctionInternal
adheres to SCHEME_NLPINput
"""
return _casadi.Function_index_out(self, *args)
def default_in(self, *args):
"""
Get default input value.
default_in(self, int ind) -> float
"""
return _casadi.Function_default_in(self, *args)
def max_in(self, *args):
"""
Get largest input value.
max_in(self, int ind) -> float
"""
return _casadi.Function_max_in(self, *args)
def min_in(self, *args):
"""
Get smallest input value.
min_in(self, int ind) -> float
"""
return _casadi.Function_min_in(self, *args)
def sparsity_in(self, *args):
"""
Get sparsity of a given input.
sparsity_in(self, int ind) -> Sparsity
sparsity_in(self, str iname) -> Sparsity
"""
return _casadi.Function_sparsity_in(self, *args)
def sparsity_out(self, *args):
"""
Get sparsity of a given output.
sparsity_out(self, int ind) -> Sparsity
sparsity_out(self, str iname) -> Sparsity
"""
return _casadi.Function_sparsity_out(self, *args)
def factory(self, *args):
"""
factory(self, str name, [str] s_in, [str] s_out, dict:[str] aux, dict opts) -> Function
"""
return _casadi.Function_factory(self, *args)
def oracle(self, *args):
"""
Get oracle.
oracle(self) -> Function
"""
return _casadi.Function_oracle(self, *args)
def wrap(self, *args):
"""
Wrap in an Function instance consisting of only one MX call.
wrap(self) -> Function
"""
return _casadi.Function_wrap(self, *args)
def which_depends(self, *args):
"""
Which variables enter with some order.
which_depends(self, str s_in, [str] s_out, int order, bool tr) -> [bool]
Parameters:
-----------
order: Only 1 (linear) and 2 (nonlinear) allowed
tr: Flip the relationship. Return which expressions contain the variables
"""
return _casadi.Function_which_depends(self, *args)
def print_dimensions(self, *args):
"""
Print dimensions of inputs and outputs.
print_dimensions(self)
"""
return _casadi.Function_print_dimensions(self, *args)
def print_options(self, *args):
"""
Print options to a stream.
print_options(self)
"""
return _casadi.Function_print_options(self, *args)
def print_option(self, *args):
"""
Print all information there is to know about a certain option.
print_option(self, str name)
"""
return _casadi.Function_print_option(self, *args)
def uses_output(self, *args):
"""
Do the derivative functions need nondifferentiated outputs?
uses_output(self) -> bool
"""
return _casadi.Function_uses_output(self, *args)
def jacobian_old(self, *args):
"""
Generate a Jacobian function of output oind with respect to input iind.
jacobian_old(self, int iind, int oind) -> Function
Parameters:
-----------
iind: The index of the input
oind: The index of the output Legacy function: To be deprecated in a future
version of CasADi. Exists only for compatibility with Function::jacobian
pre-CasADi 3.2
"""
return _casadi.Function_jacobian_old(self, *args)
def hessian_old(self, *args):
"""
Generate a Hessian function of output oind with respect to input iind.
hessian_old(self, int iind, int oind) -> Function
Parameters:
-----------
iind: The index of the input
oind: The index of the output Legacy function: To be deprecated in a future
version of CasADi. Exists only for compatibility with Function::hessian pre-
CasADi 3.2
"""
return _casadi.Function_hessian_old(self, *args)
def jacobian(self, *args):
"""
Generate a Jacobian function of all the inputs elements with respect to all
jacobian(self) -> Function
the output elements).
"""
return _casadi.Function_jacobian(self, *args)
def jac(self, *args):
"""
Calculate all Jacobian blocks Generates a function that takes all non-
jac(self) -> Function
differentiated inputs and outputs and calculates all Jacobian blocks. Inputs
that are not needed by the routine are all-zero sparse matrices with the
correct dimensions. Output blocks that are not calculated, e.g. if the
corresponding input or output is marked non-differentiated are also all-zero
sparse. The Jacobian blocks are sorted starting by all the blocks for the
first output, then all the blocks for the second output and so on. E.g.
f:(x,y)->(r,s) results in the function jac_f:(x,y,r,s)->(dr_dx, dr_dy,
ds_dx, ds_dy) This function is cached.
"""
return _casadi.Function_jac(self, *args)
def call(self, *args):
"""
Generate a Jacobian function of output oind with respect to input iind.
call(self, dict:DM arg, bool always_inline, bool never_inline) -> dict:DM
call(self, [DM] arg, bool always_inline, bool never_inline) -> [DM]
Evaluate the function symbolically or numerically.
call(self, [SX] arg, bool always_inline, bool never_inline) -> [SX]
call(self, dict:SX arg, bool always_inline, bool never_inline) -> dict:SX
call(self, dict:MX arg, bool always_inline, bool never_inline) -> dict:MX
call(self, [MX] arg, bool always_inline, bool never_inline) -> [MX]
Parameters:
-----------
iind: The index of the input
oind: The index of the output Legacy function: To be deprecated in a future
version of CasADi. Exists only for compatibility with Function::jacobian
pre-CasADi 3.2
> call(self, [DM] arg, bool always_inline, bool never_inline)
------------------------------------------------------------------------
Evaluate the function symbolically or numerically.
> call(self, dict:DM arg, bool always_inline, bool never_inline)
> call(self, [SX] arg, bool always_inline, bool never_inline)
> call(self, dict:SX arg, bool always_inline, bool never_inline)
> call(self, dict:MX arg, bool always_inline, bool never_inline)
> call(self, [MX] arg, bool always_inline, bool never_inline)
------------------------------------------------------------------------
Generate a Jacobian function of output oind with respect to input iind.
Parameters:
-----------
iind: The index of the input
oind: The index of the output Legacy function: To be deprecated in a future
version of CasADi. Exists only for compatibility with Function::jacobian
pre-CasADi 3.2
"""
return _casadi.Function_call(self, *args)
def mapsum(self, *args):
"""
Evaluate symbolically in parallel and sum (matrix graph)
mapsum(self, [MX] arg, str parallelization) -> [MX]
Parameters:
-----------
parallelization: Type of parallelization used: unroll|serial|openmp
"""
return _casadi.Function_mapsum(self, *args)
def mapaccum(self, *args):
"""
Create a mapaccumulated version of this function.
mapaccum(self, int n, dict opts) -> Function
mapaccum(self, str name, int n, dict opts) -> Function
mapaccum(self, str name, int n, int n_accum, dict opts) -> Function
mapaccum(self, str name, int n, [str] accum_in, [str] accum_out, dict opts) -> Function
mapaccum(self, str name, int n, [int] accum_in, [int] accum_out, dict opts) -> Function
Suppose the function has a signature of:
::
f: (x, u) -> (x_next , y )
The the mapaccumulated version has the signature:
::
F: (x0, U) -> (X , Y )
with
U: horzcat([u0, u1, ..., u_(N-1)])
X: horzcat([x1, x2, ..., x_N])
Y: horzcat([y0, y1, ..., y_(N-1)])
and
x1, y0 <- f(x0, u0)
x2, y1 <- f(x1, u1)
...
x_N, y_(N-1) <- f(x_(N-1), u_(N-1))
Mapaccum has the following benefits over writing an equivalent for- loop:
much faster at construction time
potentially much faster compilation times (for codegen)
offers a trade-off between memory and evaluation time
The base (settable through the options dictionary, default 10), is used to
create a tower of function calls, containing unrolled for- loops of length
maximum base.
This technique is much more scalable in terms of memory-usage, but slightly
slower at evaluation, than a plain for-loop. The effect is similar to that
of a for-loop with a check-pointing instruction after each chunk of
iterations with size base.
Set base to -1 to unroll all the way; no gains in memory efficiency here.
"""
return _casadi.Function_mapaccum(self, *args)
def fold(self, *args):
"""
Create a mapaccumulated version of this function.
fold(self, int n, dict opts) -> Function
Suppose the function has a signature of:
::
f: (x, u) -> (x_next , y )
The the mapaccumulated version has the signature:
::
F: (x0, U) -> (X , Y )
with
U: horzcat([u0, | |
'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | |
(m^3; net mouth is 15cm high)']]
concentration_surf = pd.DataFrame(np.divide(counts_surf.values, volume_surf.values))
# Get a nice array with three columns corresponding to the three 'super station' sites
concentrations = pd.DataFrame(columns=[0, 1, 2], index=[0, 1, 2, 3, 4, 5]).fillna(0.0)
depths = pd.DataFrame(columns=[0, 1, 2], index=[0, 1, 2, 3, 4, 5]).fillna(0.0)
for station in [0, 1, 2]:
for rows in [0, 1, 2, 3, 4, 5]:
if rows is 0:
concentrations[station][rows] = concentration_surf[0][station]
else:
concentrations[station][rows] = concentration_multi[0][(rows - 1) + station * 5]
depths[station][rows] = depth[(rows - 1) + station * 5]
# Determining the MLD from the CTD data
MLD = determine_MLD(prefix=prefix).values
MLD = np.array([MLD, ] * 6).reshape(6, 3)
# Normalizing the concentrations and depths
depth_norm = np.divide(depths.values, MLD)
concentrations = concentrations.apply(lambda x: x / x.sum(), axis=0).fillna(0.0)
# Getting the wind data
wind_data = pd.DataFrame(casino_wind(device='MultiNet', cruise='PE448'))
wind_data = pd.concat([wind_data] * depths.shape[0], axis=1).transpose().values
# Keeping just the measurements taken above max-depth
max_depth = 73
depth_selec = depths.values.flatten() < max_depth
# Saving everything into a dictionary
output_dic = {'concentration': concentrations.values.flatten()[depth_selec],
'depth': depths.values.flatten()[depth_selec],
'depth_norm': depth_norm.flatten()[depth_selec],
'wind_speed': wind_data.flatten()[depth_selec],
'MLD': MLD.flatten()[depth_selec]}
# Pickling the array
utils.save_obj(filename=file_name, item=output_dic)
def standardization_Egger():
"""
Data provided by <NAME>, which was published in Egger et al. (2020) https://doi.org/10.1038/s41598-020-64465-8
The measurements were collected with a multinet
The original published data had a depth correction included, the data here is without that depth correction included
"""
prefix = 'Egger'
file_name = utils.get_data_output_name(prefix)
if not utils.check_file_exist(file_name + '.pkl'):
# Loading the data
data_multi = pd.read_excel(settings.data_dir + 'Egger2020_processed.xlsx')
# Create an empty dataframe to divide up the dataset according to the station
concentrations = pd.DataFrame(columns=range(1, 6), index=range(16)).fillna(0.0)
depths = pd.DataFrame(columns=range(1, 6), index=range(16)).fillna(0.0)
MLD = pd.DataFrame(columns=range(1, 6), index=range(16)).fillna(1.0)
wind = pd.DataFrame(columns=range(1, 6), index=range(16)).fillna(0.0)
# Determining the MLD at the station with the provided CTD data
MLD_station = determine_MLD(prefix=prefix)
# looping through the stations to get the concentrations, depths, wind speeds and MlD
for station in concentrations.columns:
station_data = data_multi.loc[data_multi.Station == station].copy(deep=True).reset_index()
concentrations.loc[:station_data.shape[0], station] = station_data['concentration'].copy(deep=True)
depths.loc[:station_data.shape[0], station] = station_data['depth'].copy(deep=True)
depths.loc[station_data.shape[0]:, station] = np.nan
wind.loc[:station_data.shape[0], station] = station_data['wind'].copy(deep=True)
MLD.loc[:, station] = MLD_station[station].values[0]
# Normalizing the concentrations
concentrations = concentrations.apply(lambda x: x / x.sum(), axis=0)
# Normalizing depths
depth_norm = depths.div(MLD)
# Drop all nan measurements, and then only keep measurements above max-depth
max_depth = 73
depth_selec = (deepcopy(depths.values) > max_depth) + (np.isnan(depths.values))
keys, arrays = ['concentration', 'depth', 'depth_norm', 'wind_speed', 'MLD'], [concentrations, depths,
depth_norm, wind, MLD]
output_dic = {}
for ind, key in enumerate(keys):
data = arrays[ind]
# Only keep above max_depth, and drop all other values
data = data.mask(depth_selec).values.flatten()
output_dic[key] = data[~np.isnan(data)]
# Pickling the array
utils.save_obj(filename=file_name, item=output_dic)
def standardization_average():
"""
We calculate the average concentration in each depth bin and the standard deviation of all observations that fall
into that bin
:return:
"""
prefix = 'average'
file_name = utils.get_data_output_name(prefix)
if not utils.check_file_exist(file_name + '.pkl'):
sources = ['Kooi', 'Pieper', 'Zettler', 'Kukulka', 'Egger']
# Setting the depth ranges
depth_ranges = [(0, 0.5)]
while depth_ranges[-1][-1] < 20:
depth_ranges.append((depth_ranges[-1][-1], depth_ranges[-1][-1] + 0.5))
# Initializing the arrays for the mean concentration, the total standard deviation and all per depth level to
# ease the RMSE per depth level calculation later on
mean_concentration, std_concentration, total_std, all_concentrations = {}, {}, {}, {}
for wind_range in utils.beaufort_limits():
mean_wind = np.nanmean(wind_range)
mean_concentration[mean_wind] = np.zeros(shape=depth_ranges.__len__(), dtype=float)
std_concentration[mean_wind] = np.zeros(shape=depth_ranges.__len__(), dtype=float)
all_concentrations[mean_wind] = {}
# Initializing arrays for the observation concentrations, depths and wind speeds
data_concentration = np.array([], dtype=float)
data_depth = np.array([], dtype=float)
data_wind = np.array([], dtype=float)
# Looping through all observations and putting them all into one big array
for source in sources:
data_dict = utils.load_obj(utils.get_data_output_name(source))
data_concentration = np.append(data_concentration, data_dict['concentration'])
data_depth = np.append(data_depth, data_dict['depth'])
data_wind = np.append(data_wind, data_dict['wind_speed'])
# Looping through all the wind conditions and calculating the mean and std within each depth bin
for wind_range in utils.beaufort_limits():
min_wind, max_wind = wind_range
mean_wind = np.nanmean(wind_range)
selection_wind = (data_wind < max_wind) & (data_wind > min_wind)
for index_range, depth_range in enumerate(depth_ranges):
selection = (selection_wind == True) & (data_depth <= depth_range[1]) & (data_depth > depth_range[0])
if max(selection) > 0:
mean_concentration[mean_wind][index_range] = np.nanmean(data_concentration[selection])
std_concentration[mean_wind][index_range] = np.nanstd(data_concentration[selection])
all_concentrations[mean_wind][index_range] = data_concentration[selection]
total_std[mean_wind] = np.nanstd(data_concentration[selection_wind])
# Creating an array containing the midpoint of each depth range
depth_midpoint = np.array([])
for depth_range in depth_ranges:
depth_midpoint = np.append(depth_midpoint, np.nanmean(depth_range))
# Creating the final output dict
output_dict = {"depth": depth_midpoint, "average": mean_concentration, "std": std_concentration,
"total_std": total_std, "all_concentrations": all_concentrations}
# Pickling the output dictionary
utils.save_obj(filename=file_name, item=output_dict)
def determine_MLD(prefix: str, station_numbers=None):
"""
Determine the mixing layer depth according to de Boyer Montegut et al. (2004), which calculates the MLD from CTD
data using a temperature threshold https://doi.org/10.1029/2004JC002378
MLD = depth at which there is a 0.2 degree temperature difference relative to the temperature at 10m depth
The different field datasets have different ways of loading the data since all data formats were slightly different
"""
z_ref = 10 # reference depth in meters
dif_ref = 0.2 # temperature difference relative to reference depth (degrees celcius)
if prefix is 'Kooi':
# Loading the CTD data
data_ctd = pd.read_excel(settings.data_dir + 'Data_KooiEtAl.xlsx', sheet_name='CTD')
# Changing the station numbering so the first station has index 0 instead of 1
data_ctd.station -= 1
station_numbers = np.sort(np.append(data_ctd.station.unique(), 24))
# Array in which to store the determined MLD values
MLD = np.zeros((1, station_numbers.shape[0]))
for station in station_numbers:
if station == 24:
# CTD data for station 24 is missing from the datasheet
MLD[0, station] = np.nan
else:
# Load the depth and temperature data for that particular station, where we also include a check that all
# of the depth files are sorted correctly
depth = data_ctd['Depth'][data_ctd.station == station].values
depth_sort = depth.argsort()
depth = depth[depth_sort]
temp = data_ctd['Temperature'][data_ctd.station == station].values[depth_sort]
# Determine the index that corresponds to a depth of 10m
ind_10 = utils.utils_files.find_nearest_index(depth=depth, z_ref=z_ref)
temp_10 = temp[ind_10]
# Determine the depth at which the temperature difference is equal to dif_ref with respect to z_ref
depth, temp = depth[ind_10:], temp[ind_10:]
MLD[0, station] = depth[np.where(np.abs(temp - temp_10) > dif_ref)[0][0]]
if prefix is 'Pieper':
MLD = pd.DataFrame(columns=station_numbers, index=[0]).fillna(0.0)
# Check if there is a CTD file for the station in question
for station in station_numbers:
file_name = settings.data_dir + 'CTD_PE442/PE442_' + station + 'avg.cnv'
if utils.check_file_exist(file_name):
# Load the depth and temperature data for the particular station
temperature = fCNV(file_name)['TEMP']
depth = fCNV(file_name)['DEPTH']
# Determine the index that corresponds to a depth of 10m
ind_10 = utils.find_nearest_index(depth=depth, z_ref=z_ref)
temp_10 = temperature[ind_10]
# Determine the depth at which the temperature difference is equal to dif_ref with respect to z_ref
depth, temp = depth[ind_10:], temperature[ind_10:]
MLD[station] = depth[np.where(np.abs(temp - temp_10) > dif_ref)[0][0]]
else:
MLD[station] = np.nan
if prefix is 'Zettler':
MLD = pd.DataFrame(columns=range(1, 4), index=[0]).fillna(0.0)
for station in MLD.columns:
data_file = settings.data_dir + 'CTD_PE448/PE448_HC_avg_station_{}.cnv'.format(station)
if utils.check_file_exist(data_file):
# Load the depth and temperature data for the particular station
temperature = fCNV(data_file)['TEMP']
depth = fCNV(data_file)['DEPTH']
# Determine the index that corresponds to a depth of 10m
ind_10 = utils.utils_files.find_nearest_index(depth=depth, z_ref=z_ref)
temp_10 = temperature[ind_10]
# Determine the depth at which the temperature difference is equal to dif_ref with respect to z_ref
depth, temp = depth[ind_10:], temperature[ind_10:]
MLD[station] = depth[np.where(np.abs(temp - temp_10) > dif_ref)[0][0]]
if prefix is 'Egger':
MLD = pd.DataFrame(columns=range(1, 6), index=[0]).fillna(0.0)
for station in MLD.columns:
data_file = settings.data_dir + 'CTD_EGGER/station_{}/CTD Data/NPM2_Stat-{}_Cast1.txt'.format(station,
station)
# Loading the data for the depth and temperature (C)
data = np.genfromtxt(data_file, skip_header=4, usecols=(1, 2))
# Determine index of the max depth, and then only use data from before that point, as we only want to use
# data as the CTD was travelling downwards
data = data[:np.argmax(data[:, 0]), :]
# Bin the temperature data into 0.2 m intervals
bin_T, depth, _ = stats.binned_statistic(x=data[:, 0], values=data[:, 1], bins=np.arange(min(data[:, 0]),
max(data[:, 0]),
0.3))
# Determine the index that corresponds to a depth of 10m
ind_10 = utils.find_nearest_index(depth=depth, z_ref=z_ref)
temp_10 = bin_T[ind_10]
# Determine the depth at which the temperature difference is equal to dif_ref with respect to z_ref
depth, bin_T = depth[ind_10:], bin_T[ind_10:]
| |
"""
Constructs for grouping tool parameters
"""
import logging
log = logging.getLogger( __name__ )
import os
import StringIO
import unicodedata
from basic import ToolParameter
from galaxy.datatypes import sniff
from galaxy.util import inflector
from galaxy.util import relpath
from galaxy.util import sanitize_for_filename
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
from galaxy.model.item_attrs import Dictifiable
class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'name', 'type' )
def __init__( self ):
self.name = None
@property
def visible( self ):
return True
def value_to_basic( self, value, app ):
"""
Convert value to a (possibly nested) representation using only basic
types (dict, list, tuple, str, unicode, int, long, float, bool, None)
"""
return value
def value_from_basic( self, value, app, ignore_errors=False ):
"""
Convert a basic representation as produced by `value_to_basic` back
into the preferred value form.
"""
return value
def get_initial_value( self, trans, context, history=None ):
"""
Return the initial state/value for this group
"""
raise TypeError( "Not implemented" )
def to_dict( self, trans, view='collection', value_mapper=None ):
# TODO: need to to_dict conditions.
group_dict = super( Group, self ).to_dict( view=view, value_mapper=value_mapper )
return group_dict
class Repeat( Group ):
dict_collection_visible_keys = ( 'name', 'type', 'title', 'help', 'default', 'min', 'max' )
type = "repeat"
def __init__( self ):
Group.__init__( self )
self.title = None
self.inputs = None
self.help = None
self.default = 0
self.min = None
self.max = None
@property
def title_plural( self ):
return inflector.pluralize( self.title )
def label( self ):
return "Repeat (%s)" % self.title
def value_to_basic( self, value, app ):
rval = []
for d in value:
rval_dict = {}
# Propogate __index__
if '__index__' in d:
rval_dict['__index__'] = d['__index__']
for input in self.inputs.itervalues():
rval_dict[ input.name ] = input.value_to_basic( d[input.name], app )
rval.append( rval_dict )
return rval
def value_from_basic( self, value, app, ignore_errors=False ):
rval = []
try:
for i, d in enumerate( value ):
rval_dict = {}
# If the special __index__ key is not set, create it (for backward
# compatibility)
rval_dict['__index__'] = d.get( '__index__', i )
# Restore child inputs
for input in self.inputs.itervalues():
if ignore_errors and input.name not in d:
# If we do not have a value, and are ignoring errors, we simply
# do nothing. There will be no value for the parameter in the
# conditional's values dictionary.
pass
else:
rval_dict[ input.name ] = input.value_from_basic( d[input.name], app, ignore_errors )
rval.append( rval_dict )
except Exception, e:
if not ignore_errors:
raise e
return rval
def visit_inputs( self, prefix, value, callback ):
for i, d in enumerate( value ):
for input in self.inputs.itervalues():
new_prefix = prefix + "%s_%d|" % ( self.name, i )
if isinstance( input, ToolParameter ):
callback( new_prefix, input, d[input.name], parent = d )
else:
input.visit_inputs( new_prefix, d[input.name], callback )
def get_initial_value( self, trans, context, history=None ):
rval = []
for i in range( self.default ):
rval_dict = { '__index__': i}
for input in self.inputs.itervalues():
rval_dict[ input.name ] = input.get_initial_value( trans, context, history=history )
rval.append( rval_dict )
return rval
def to_dict( self, trans, view='collection', value_mapper=None ):
repeat_dict = super( Repeat, self ).to_dict( trans, view=view, value_mapper=value_mapper )
def input_to_dict( input ):
return input.to_dict( trans, view=view, value_mapper=value_mapper )
repeat_dict[ "inputs" ] = map( input_to_dict, self.inputs.values() )
return repeat_dict
class UploadDataset( Group ):
type = "upload_dataset"
def __init__( self ):
Group.__init__( self )
self.title = None
self.inputs = None
self.file_type_name = 'file_type'
self.default_file_type = 'txt'
self.file_type_to_ext = { 'auto':self.default_file_type }
self.metadata_ref = 'files_metadata'
def get_composite_dataset_name( self, context ):
#FIXME: HACK
#Special case of using 'base_name' metadata for use as Dataset name needs to be done in a General Fashion, as defined within a particular Datatype.
#We get two different types of contexts here, one straight from submitted parameters, the other after being parsed into tool inputs
dataset_name = context.get('files_metadata|base_name', None )
if dataset_name is None:
dataset_name = context.get('files_metadata', {} ).get( 'base_name', None )
if dataset_name is None:
dataset_name = 'Uploaded Composite Dataset (%s)' % self.get_file_type( context )
return dataset_name
def get_file_base_name( self, context ):
fd = context.get('files_metadata|base_name','Galaxy_Composite_file')
return fd
def get_file_type( self, context ):
return context.get( self.file_type_name, self.default_file_type )
def get_datatype_ext( self, trans, context ):
ext = self.get_file_type( context )
if ext in self.file_type_to_ext:
ext = self.file_type_to_ext[ext] #when using autodetect, we will use composite info from 'text', i.e. only the main file
return ext
def get_datatype( self, trans, context ):
ext = self.get_datatype_ext( trans, context )
return trans.app.datatypes_registry.get_datatype_by_extension( ext )
@property
def title_plural( self ):
return inflector.pluralize(self.title)
def group_title( self, context ):
return "%s (%s)" % ( self.title, context.get( self.file_type_name, self.default_file_type ) )
def title_by_index( self, trans, index, context ):
d_type = self.get_datatype( trans, context )
for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.iteritems() ):
if i == index:
rval = composite_name
if composite_file.description:
rval = "%s (%s)" % ( rval, composite_file.description )
if composite_file.optional:
rval = "%s [optional]" % rval
return rval
return None
def value_to_basic( self, value, app ):
rval = []
for d in value:
rval_dict = {}
# Propogate __index__
if '__index__' in d:
rval_dict['__index__'] = d['__index__']
for input in self.inputs.itervalues():
rval_dict[ input.name ] = input.value_to_basic( d[input.name], app )
rval.append( rval_dict )
return rval
def value_from_basic( self, value, app, ignore_errors=False ):
rval = []
for i, d in enumerate( value ):
rval_dict = {}
# If the special __index__ key is not set, create it (for backward
# compatibility)
rval_dict['__index__'] = d.get( '__index__', i )
# Restore child inputs
for input in self.inputs.itervalues():
if ignore_errors and input.name not in d: #this wasn't tested
rval_dict[ input.name ] = input.get_initial_value( None, d )
else:
rval_dict[ input.name ] = input.value_from_basic( d[input.name], app, ignore_errors )
rval.append( rval_dict )
return rval
def visit_inputs( self, prefix, value, callback ):
for i, d in enumerate( value ):
for input in self.inputs.itervalues():
new_prefix = prefix + "%s_%d|" % ( self.name, i )
if isinstance( input, ToolParameter ):
callback( new_prefix, input, d[input.name], parent = d )
else:
input.visit_inputs( new_prefix, d[input.name], callback )
def get_initial_value( self, trans, context, history=None ):
d_type = self.get_datatype( trans, context )
rval = []
for i, ( composite_name, composite_file ) in enumerate( d_type.writable_files.iteritems() ):
rval_dict = {}
rval_dict['__index__'] = i # create __index__
for input in self.inputs.itervalues():
rval_dict[ input.name ] = input.get_initial_value( trans, context, history=history ) #input.value_to_basic( d[input.name], app )
rval.append( rval_dict )
return rval
def get_uploaded_datasets( self, trans, context, override_name = None, override_info = None ):
def get_data_file_filename( data_file, override_name = None, override_info = None ):
dataset_name = override_name
dataset_info = override_info
def get_file_name( file_name ):
file_name = file_name.split( '\\' )[-1]
file_name = file_name.split( '/' )[-1]
return file_name
try:
# Use the existing file
if not dataset_name and 'filename' in data_file:
dataset_name = get_file_name( data_file['filename'] )
if not dataset_info:
dataset_info = 'uploaded file'
return Bunch( type='file', path=data_file['local_filename'], name=dataset_name )
#return 'file', data_file['local_filename'], get_file_name( data_file.filename ), dataset_name, dataset_info
except:
# The uploaded file should've been persisted by the upload tool action
return Bunch( type=None, path=None, name=None )
#return None, None, None, None, None
def get_url_paste_urls_or_filename( group_incoming, override_name = None, override_info = None ):
filenames = []
url_paste_file = group_incoming.get( 'url_paste', None )
if url_paste_file is not None:
url_paste = open( url_paste_file, 'r' ).read( 1024 )
if url_paste.lstrip().lower().startswith( 'http://' ) or url_paste.lstrip().lower().startswith( 'ftp://' ) or url_paste.lstrip().lower().startswith( 'https://' ):
url_paste = url_paste.replace( '\r', '' ).split( '\n' )
for line in url_paste:
line = line.strip()
if line:
if not line.lower().startswith( 'http://' ) and not line.lower().startswith( 'ftp://' ) and not line.lower().startswith( 'https://' ):
continue # non-url line, ignore
dataset_name = override_name
if not dataset_name:
dataset_name = line
dataset_info = override_info
if not dataset_info:
dataset_info = 'uploaded url'
yield Bunch( type='url', path=line, name=dataset_name )
#yield ( 'url', line, precreated_name, dataset_name, dataset_info )
else:
dataset_name = dataset_info = precreated_name = 'Pasted Entry' #we need to differentiate between various url pastes here
if override_name:
dataset_name = override_name
if override_info:
dataset_info = override_info
yield Bunch( type='file', path=url_paste_file, name=precreated_name )
#yield ( 'file', url_paste_file, precreated_name, dataset_name, dataset_info )
def get_one_filename( context ):
data_file = context['file_data']
url_paste = context['url_paste']
ftp_files = context['ftp_files']
name = context.get( 'NAME', None )
info = | |
if pixel_select == 1:
#RD=dir([r_path 'multiharlocs*.mat']); # DOESN'T seem to be used
#str1=[q_path 'multiharlocs_' num2str(tmplt_index,'%.6d') '.mat'];
#load(str1)
# Retrieve the query harloc (Harris features)
har1pp = q_path[tmplt_index];
#str2=[r_path 'multiharlocs_' num2str(img_index,'%.6d') '.mat'];
#har2=load(str2);
# Retrieve the reference harloc (Harris features)
har2pp = r_path[img_index];
#har2.pp(:,2)=har2.pp(:,2)-2*p_init(7);
har2pp[:, 1] = har2pp[:, 1] - 2 * p_init[6];
if levels == 1:
#out1 = my_morph(pp(:,1:2),15,imres(1),imres(2));
out1 = my_morph(har1pp[:, 0: 2], 15, imres[0], imres[1]);
#out2 = my_morph([har2.pp(:,1) har2.pp(:,2)],15,imres(1),imres(2));
out2 = my_morph(np.c_[har2pp[:, 0], har2pp[:, 1]], \
15, imres[0], imres[1]);
#out=out1.*out2;
out = out1 * out2;
#% out=imresize(out,.5)>0; %handle half-size images
"""
numpy.nonzero(a)
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of 'a', containing
the indices of the non-zero elements in that dimension.
From http://www.mathworks.com/help/matlab/ref/find.html:
[row,col] = find(X, ...) returns the row and column indices
of the nonzero entries in the matrix X.
This syntax is especially useful when working with sparse
matrices.
If X is an N-dimensional array with N > 2, col contains linear
indices for the columns.
For example, for a 5-by-7-by-3 array X with a nonzero element
at X(4,2,3), find returns 4 in row and 16 in col.
That is, (7 columns in page 1) + (7 columns in page 2) +
(2 columns in page 3) = 16.
"""
#[ny,nx]=find(out==1);
if ORDER_MATTERS:
nx, ny = np.nonzero((out == 1).T);
else:
ny, nx = np.nonzero(out == 1);
"""
This is important: if we don't copy the matrix, warp_p would referece the
same numpy.array object as p_init and since warp_p is changed,
p_init is changed as well, and at the next call of ecc_homo_spacetime()
p_init will have this last value of warp_p before exiting ecc_homo_spacetime();
"""
warp_p = p_init.copy();
if common.MY_DEBUG_STDOUT:
common.DebugPrint("ecc_homo_spacetime(): warp_p = %s" % str(warp_p));
# This loop actually doesn't execute anything WHEN levels == 1.
#for ii=1:levels-1:
for ii in range(1, levels-1 + 1):
warp_p = next_level(warp_p, "homography", 0);
if weighted_flag == 1:
#W=ones(size(tmplt1));
assert tmplt[1].ndim == 2;
#W = np.ones(tmplt[1].shape);
W = np.ones(tmplt[1].shape, dtype=MATRIX_FLOAT_TYPE);
if common.MY_DEBUG_STDOUT:
common.DebugPrint("ecc_homo_spacetime(): At init, W.shape = %s" % \
str(W.shape));
if time_flag == 1:
N_p = 9;
t = t0;
else:
N_p = 8;
t = 0;
if USE_DRIVER == True:
volumeA = volume[1];
tmpltA = tmplt[1]; # input (query) frame
"""
cv2.imwrite("template_query_frame" + imformat, tmpltA);
cv2.imwrite("reference_frame" + imformat, volumeA);
"""
cv2.imwrite(o_path + ("reference_frame") + imformat, \
volumeA[:, :, 0].astype(int));
cv2.imwrite(o_path + ("query_frame") + imformat, \
tmpltA.astype(int));
"""
templateImage=tmplt[1],
inputImage=image_temp,
warpMatrix=warp_p,
"""
return fit;
if config.USE_ECC_FROM_OPENCV:
levels = 0; # We assign 0 to avoid executing the standard space-time ECC below
weighted_flag = 0;
#!!!!!!!TODO: volumeA = volume[1][:, :, 2];
"""
volumeA is the sub-sequence of reference frames (a number of nof frames) used
in interp_space_time(), for warping of the reference "frame".
"""
volumeA = volume[1];
tmpltA = tmplt[1]; # input (query) frame
if save_image == 1:
#clear xxx
#TODO: think if we should do a del xxx, like he does in Matlab a clear
# We allocate space for xxx
xxx = np.zeros( (tmpltA.shape[0], tmpltA.shape[1], 3) ); #!!!!TODO: use MATRIX_FLOAT_TYPE
#xxx(:,:,1)=tmplt;
xxx[:, :, 0] = tmpltA;
#xxx(:,:,3)=tmplt;
xxx[:, :, 2] = tmpltA;
if config.VISUAL_DIFF_FRAMES == True:
xxx[:, :, 1] = tmpltA;
#!!!!TODO: experiment if it helps to "reuse" warp_p between different pairs of frames, MAYBE reset it to eye(3) once in a while - for the sake of performance increase
#warp_p = p_init;
warp_p = p_init.copy();
if common.MY_DEBUG_STDOUT:
common.DebugPrint( \
"ecc_homo_spacetime(): warp_p.dtype = %s" % str(warp_p.dtype));
common.DebugPrint( \
"ecc_homo_spacetime(): warp_p (before transformECC) = %s" % \
str(warp_p));
if False:
cv2.imwrite(o_path + ("reference_frame") + imformat, \
volumeA[:, :, 0].astype(int));
cv2.imwrite(o_path + ("query_frame") + imformat, \
tmpltA.astype(int));
"""
From http://opencvpython.blogspot.ro/2013/01/k-means-clustering-3-working-with-opencv.html
Define criteria = ( type, max_iter = ... , epsilon = ...)
Note: OpenCV's ECC uses by default: 50, 0.001 .
#aCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 0.001);
See examples of criteria definition also at:
http://docs.opencv.org/trunk/doc/py_tutorials/py_video/py_meanshift/py_meanshift.html
http://opencvpython.blogspot.ro/2013/01/k-means-clustering-3-working-with-opencv.html
- book pg 258, http://books.google.ro/books?id=seAgiOfu2EIC&pg=PA258&lpg=PA258&dq=OpenCV+CvTermCriteria&source=bl&ots=hTD0bmeANg&sig=eS7FA1QeEy_K5vAFpG_tCOjak7w&hl=en&sa=X&ei=DJN8U5XnOvTrygP5mYH4Aw&ved=0CEMQ6AEwAg#v=onepage&q=OpenCV%20CvTermCriteria&f=false
- http://stackoverflow.com/questions/18955760/how-does-cvtermcriteria-work-in-opencv
cv::TermCriteria(cv::TermCriteria::MAX_ITER +
cv::TermCriteria::EPS,
50, // max number of iterations
0.0001)); // min accuracy
"""
#aCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 15, 0.001);
aCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, config.EPS_ECC);
"""
See doc findTransformECC at
http://docs.opencv.org/trunk/modules/video/doc/motion_analysis_and_object_tracking.html#findtransformecc
http://docs.opencv.org/trunk/modules/video/doc/motion_analysis_and_object_tracking.html#double%20findTransformECC%28InputArray%20templateImage,%20InputArray%20inputImage,%20InputOutputArray%20warpMatrix,%20int%20motionType,%20TermCriteria%20criteria%29
Src code at https://github.com/Itseez/opencv/blob/ef91d7e8830c36785f0b6fdbf2045da48413dd76/modules/video/src/ecc.cpp (see also modules/video/include/opencv2/video/tracking.hpp and https://github.com/Itseez/opencv/blob/master/samples/cpp/image_alignment.cpp)
"""
tECC1 = float(cv2.getTickCount());
USE_MY_ECC_PY = False;
if USE_MY_ECC_PY:
import ECC
import cv
# From http://stackoverflow.com/questions/9913392/convert-numpy-array-to-cvmat-cv2
cvTmplt = cv.fromarray(tmplt[1]);
cvImageTemp = cv.fromarray(image_temp);
cvWarp = cv.fromarray(warp_p);
print "cvTmplt = %s" % str(cvTmplt);
#print "dir(cvTmplt) = %s" % str(dir(cvTmplt));
print "cvImageTemp = %s" % str(cvImageTemp);
warp_p, retval = ECC.cvFindTransform(cvTmplt,
cvImageTemp,
cvWarp,
ECC.WARP_MODE_HOMOGRAPHY,
(cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS,
15, 0.001));
warp_p = Matlab.ConvertCvMatToNPArray(warp_p);
else:
retval, warp_p = cv2.findTransformECC( \
templateImage=tmplt[1],
inputImage=image_temp,
warpMatrix=warp_p,
motionType=cv2.MOTION_HOMOGRAPHY,
criteria=aCriteria);
tECC2 = float(cv2.getTickCount());
myTime = (tECC2 - tECC1) / cv2.getTickFrequency();
print("ecc_homo_spacetime(): cv2.findTransformECC() took %.6f [sec]" % myTime);
"""
From http://docs.opencv.org/trunk/modules/video/doc/motion_analysis_and_object_tracking.html#findtransformecc:
"It returns the final enhanced correlation coefficient, that is the
correlation coefficient between the template image and the final
warped input image."
"""
if common.MY_DEBUG_STDOUT:
common.DebugPrint( \
"ecc_homo_spacetime(): retval (final value of ECC) of findTransformECC = %s" % \
str(retval));
common.DebugPrint( \
"ecc_homo_spacetime(): warp_p (after findTransformECC) = %s" % \
str(warp_p));
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
# (see above)
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
##IMPORTANT NOTE: we don't execute the nol loop because now levels == 0
if common.MY_DEBUG_STDOUT:
common.DebugPrint("ecc_homo_spacetime(): levels = %s" % str(levels));
"""
resize volume[*];
resize tmplt[nol + 1]; # input (query) frame
for i in range(volume[nol].shape[2]):
if common.MY_DEBUG_STDOUT:
common.DebugPrint(" i = %s" % str(i));
volume[nol + 1][:, :, i] = Matlab.imresize(volume[nol][:, :, i], scale=0.5);
"""
#%%Iterative procedure
#for nol=levels:-1:1
# IMPORTANT NOTE: We substitute nol - 1 --> nol (since array numbering
# starts with 0, not like in Matlab from 1)
for nol in range(levels - 1, 0-1, -1): # If levels == 1, it gets executed only once.
#eval(['volume=volume' num2str(nol) ';'])
volumeA = volume[nol + 1]; # (volumeA = volume[1], for our setting, when levels=1).
# IMPORTANT NOTE: volumeA is a 3D matrix - it has nof matrices from the video sequence (in gray)
if common.MY_DEBUG_STDOUT:
common.DebugPrint( \
"ecc_homo_spacetime(): Am here: volumeA.shape = %s" % \
str(volumeA.shape));
common.DebugPrint( \
"ecc_homo_spacetime(): Am here: volumeA.dtype = %s" % \
str(volumeA.dtype));
if INVERSE == False:
# Note: nof is the number of frames for sub-sequences
if nof > 1:
#!!!!TODO: we should be able to optimize heavily here if we compute only the required elements of vx, vy, vt, since these matrices are huge and very easy to compute
#[vx,vy,vt]=gradient(volume);
vx, vy, vt = Matlab.gradient(volumeA);
else:
#[vx,vy]=gradient(volume);
#!!!!TODO: we should be able to optimize heavily here if we compute only the required elements of vx, vy since these matrices are huge and very easy to compute
vx, vy = Matlab.gradient(volumeA);
vt = 0 * vx;
#eval(['tmplt=tmplt' num2str(nol) ';'])
tmpltA = tmplt[nol + 1];
if INVERSE == True:
print("IMPORTANT: ecc_homo_spacetime(): volumeA.shape = %s" % str(volumeA.shape));
print("IMPORTANT: ecc_homo_spacetime(): tmpltA.shape = %s" % str(tmpltA.shape));
"""
From iat_eccIC.m:
temp = TEMP{nol};
[vx,vy]=gradient(temp);
"""
#vx, vy, vt = Matlab.gradient(tmpltA);
#!!!!TODO TODO TODO: see what I can do better
vx, vy = Matlab.gradient(tmpltA);
vt = 0 * vx;
#[AA,BB,CC]=size(volume);
AA, BB, CC = volumeA.shape;
#% if nol>1
#% margin=floor(mean(AA,BB)*.05/(2^(nol-1)));
#% else
margin = 0;
#% end
if save_image == 1:
#clear xxx
#TODO: think if we should do a del xxx, like he does in Matlab a clear
# We allocate space for xxx
xxx = np.zeros( (tmpltA.shape[0], tmpltA.shape[1], 3) );
#xxx(:,:,1)=tmplt;
xxx[:, :, 0] = tmpltA;
| |
squares, three moves
board = Board('k7/pp6/8/8/8/8/PPPPPPPP/RNBQKBNR w - - 0 1')
target_bb = BitBoard(0)
target_bb[Sq.H3] = 1
target_bb[Sq.E4] = 1
moves = board.get_target_noncapture_moves(target_bb)
assert len(moves) == 3
assert sorted(moves) == sorted([Move(Sq.E2, Sq.E4, MoveType.DOUBLE),
Move(Sq.H2, Sq.H3, MoveType.QUIET),
Move(Sq.G1, Sq.H3, MoveType.QUIET)])
def test_get_attacking_sqs(self):
"""
Tests the get_attacking_sqs() function of the Board class
"""
# no checks
board = Board()
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
check_sqs = board.get_attacking_sqs(king_sq)
self.assertEqual(len(check_sqs), 0)
# single check
board = Board('4K2k/8/5B2/8/8/8/8/1n6 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
check_sqs = board.get_attacking_sqs(king_sq)
self.assertEqual(len(check_sqs), 1)
self.assertListEqual(sorted(check_sqs), sorted([Sq.F6]))
# multiple checks
board = Board('7k/8/8/8/7r/8/5n2/7K w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
check_sqs = board.get_attacking_sqs(king_sq)
self.assertEqual(len(check_sqs), 2)
self.assertListEqual(sorted(check_sqs), sorted([Sq.H4, Sq.F2]))
def test_get_attacking_pawn_sqs(self):
"""
Tests the _get_attacking_pawn_sqs() function of the Board class
"""
# normal
board = Board('7k/8/8/8/6p1/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
pawn_sqs = board._get_attacking_pawn_sqs(king_sq)
self.assertEqual(len(pawn_sqs), 1)
self.assertListEqual(sorted(pawn_sqs), sorted([Sq.G4]))
# no check
board = Board('7k/8/8/8/p6p/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
pawn_sqs = board._get_attacking_pawn_sqs(king_sq)
self.assertEqual(len(pawn_sqs), 0)
# double check
# not possible in actual game of chess
board = Board('7K/8/8/8/8/4k3/3P1P2/8 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
pawn_sqs = board._get_attacking_pawn_sqs(king_sq)
self.assertEqual(len(pawn_sqs), 2)
self.assertListEqual(sorted(pawn_sqs), sorted([Sq.D2, Sq.F2]))
# pawn on 2nd/7th row (promotion)
board = Board('7k/8/8/8/8/8/3p4/4K3 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
pawn_sqs = board._get_attacking_pawn_sqs(king_sq)
self.assertEqual(len(pawn_sqs), 1)
self.assertListEqual(sorted(pawn_sqs), sorted([Sq.D2]))
def test_get_attacking_knight_sqs(self):
"""
Tests the _get_attacking_knight_sqs() function of the Board class
"""
# normal
board = Board('7k/8/8/8/5n2/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
knight_sqs = board._get_attacking_knight_sqs(king_sq)
self.assertEqual(len(knight_sqs), 1)
self.assertListEqual(sorted(knight_sqs), sorted([Sq.F4]))
# no check
board = Board('nn6/8/8/8/8/7K/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
knight_sqs = board._get_attacking_knight_sqs(king_sq)
self.assertEqual(len(knight_sqs), 0)
# double check
# not possible in actual game of chess
board = Board('7K/8/8/8/8/4k3/8/3N1N2 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
knight_sqs = board._get_attacking_knight_sqs(king_sq)
self.assertEqual(len(knight_sqs), 2)
self.assertListEqual(sorted(knight_sqs), sorted([Sq.D1, Sq.F1]))
def test_get_attacking_bishop_sqs(self):
"""
Tests the _get_attacking_bishop_sqs() function of the Board class
"""
# normal
board = Board('7k/8/8/8/6b1/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
bishop_sqs = board._get_attacking_bishop_sqs(king_sq)
self.assertEqual(len(bishop_sqs), 1)
self.assertListEqual(sorted(bishop_sqs), sorted([Sq.G4]))
# no check
board = Board('6bb/8/8/8/8/7K/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
bishop_sqs = board._get_attacking_bishop_sqs(king_sq)
self.assertEqual(len(bishop_sqs), 0)
# no check (covered)
board = Board('8/8/8/5b2/6N1/7K/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
bishop_sqs = board._get_attacking_bishop_sqs(king_sq)
self.assertEqual(len(bishop_sqs), 0)
# double check
# not possible in actual game of chess
board = Board('7K/8/8/8/8/4k3/3B4/6B1 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
bishop_sqs = board._get_attacking_bishop_sqs(king_sq)
self.assertEqual(len(bishop_sqs), 2)
self.assertListEqual(sorted(bishop_sqs), sorted([Sq.D2, Sq.G1]))
def test_get_attacking_rook_sqs(self):
"""
Tests the _get_attacking_rook_sqs() function of the Board class
"""
# normal
board = Board('7k/8/8/7r/8/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
rook_sqs = board._get_attacking_rook_sqs(king_sq)
self.assertEqual(len(rook_sqs), 1)
self.assertListEqual(sorted(rook_sqs), sorted([Sq.H5]))
# no check
board = Board('6r1/8/8/8/8/7K/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
rook_sqs = board._get_attacking_rook_sqs(king_sq)
self.assertEqual(len(rook_sqs), 0)
# no check (covered)
board = Board('8/8/8/5b2/6N1/5rnK/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
rook_sqs = board._get_attacking_rook_sqs(king_sq)
self.assertEqual(len(rook_sqs), 0)
# double check
# not possible in actual game of chess
board = Board('7K/8/8/4R3/8/2R1k3/3B4/6B1 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
rook_sqs = board._get_attacking_rook_sqs(king_sq)
self.assertEqual(len(rook_sqs), 2)
self.assertListEqual(sorted(rook_sqs), sorted([Sq.E5, Sq.C3]))
def test_get_attacking_queen_sqs(self):
"""
Tests the _get_attacking_queen_sqs() function of the Board class
"""
# normal
board = Board('7k/8/7q/8/8/7K/8/8 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
queen_sqs = board._get_attacking_queen_sqs(king_sq)
self.assertEqual(len(queen_sqs), 1)
self.assertListEqual(sorted(queen_sqs), sorted([Sq.H6]))
# no check
board = Board('5q1Q/8/8/8/8/7K/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
queen_sqs = board._get_attacking_queen_sqs(king_sq)
self.assertEqual(len(queen_sqs), 0)
# no check (covered)
board = Board('8/8/8/5b2/q2N3K/8/8/3k4 w - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
queen_sqs = board._get_attacking_queen_sqs(king_sq)
self.assertEqual(len(queen_sqs), 0)
# triple check
# not possible in actual game of chess
board = Board('7K/8/6Q1/5n2/8/3Q2k1/7Q/6b1 b - - 0 1')
king = Piece.WK if board.turn == Color.WHITE else Piece.BK
king_sq = board.piece_sq[king][0]
queen_sqs = board._get_attacking_queen_sqs(king_sq)
self.assertEqual(len(queen_sqs), 3)
self.assertListEqual(sorted(queen_sqs), sorted([Sq.H2, Sq.D3, Sq.G6]))
def test_find_pinned(self):
"""
Tests the _find_pinned() function of the Board class
"""
# pinned by bishop
board = Board('k6K/1p6/8/8/8/8/8/7B b - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 1)
self.assertEqual(sorted(pinned[0]), sorted([Sq.B7]))
self.assertEqual(len(pinned[1]), 0)
# pinned by rook
board = Board('k6K/8/8/n7/8/8/8/R7 b - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 1)
self.assertEqual(sorted(pinned[0]), sorted([Sq.A5]))
self.assertEqual(len(pinned[1]), 0)
# pinned by queen
board = Board('k6K/1p6/8/8/8/5Q2/8/8 b - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 1)
self.assertEqual(sorted(pinned[0]), sorted([Sq.B7]))
self.assertEqual(len(pinned[1]), 0)
# no pinned piece (no slider on trajectory)
board = Board('k6K/1p6/8/2r5/8/3b4/8/8 w - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 0)
self.assertEqual(len(pinned[1]), 0)
# no pinned piece (multiple allied piece on slider trajectory)
board = Board('K6k/1N6/8/3P4/4q3/8/8/8 w - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 0)
self.assertEqual(len(pinned[1]), 0)
# no pinned piece (enemy piece on slider trajectory)
board = Board('q3k3/8/2n5/3K4/8/8/8/7B w - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 0)
self.assertEqual(len(pinned[1]), 0)
# no pinned piece, piece in trajectory but further than king
board = Board('1q5k/8/8/8/8/6K1/7R/8 w - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 0)
self.assertEqual(len(pinned[1]), 0)
# one pinned, piece in trajectory but further than king
board = Board('q6k/8/8/8/4P3/5K2/8/7B w - - 0 1')
pinned = board.find_pinned()
self.assertEqual(len(pinned[0]), 1)
self.assertEqual(sorted(pinned[0]), sorted([Sq.E4]))
self.assertEqual(len(pinned[1]), 0)
# multiple pinned pieces
board = Board('1q1k4/2R5/8/rN2K3/8/8/8/8 w - - 0 1')
pinned = board.find_pinned()
print(board)
self.assertEqual(len(pinned[0]), 2)
self.assertEqual(sorted(pinned[0]), sorted([Sq.C7, Sq.B5]))
self.assertEqual(len(pinned[1]), 0)
def test_pinned_move_gen(self):
"""
Tests the _pinned_move_gen() function of the Board class
"""
# pawn pinned by rook
board = Board('k6K/8/8/8/8/p7/8/R7 b - - 0 1')
moves = board._pinned_move_gen(Sq.A3, Sq.A1, Direction.U)
assert len(moves) == 1
assert sorted(moves) == [Move(Sq.A3, Sq.A2, MoveType. QUIET)]
# knight pinned by rook
board = Board('k6K/8/8/8/8/n7/8/R7 b - - 0 1')
moves = board._pinned_move_gen(Sq.A3, Sq.A1, Direction.U)
self.assertEqual(len(moves), 0)
# bishop pinned by rook
board = Board('k6K/8/8/8/b7/8/8/R7 b - - 0 1')
moves = board._pinned_move_gen(Sq.A4, Sq.A1, Direction.U)
self.assertEqual(len(moves), 0)
# rook pinned by rook
board = Board('8/8/8/k6K/8/r7/8/R7 b - - 0 1')
moves = board._pinned_move_gen(Sq.A3, Sq.A1, Direction.U)
assert len(moves) == 3
assert sorted(moves) == [Move(Sq.A3, Sq.A1, MoveType.CAPTURE),
Move(Sq.A3, Sq.A2, MoveType.QUIET),
Move(Sq.A3, Sq.A4, MoveType.QUIET)]
# queen pinned by rook
board = Board('8/8/8/8/k6K/q7/8/R7 b - - 0 1')
moves = board._pinned_move_gen(Sq.A3, Sq.A1, Direction.U)
assert len(moves) == 2
assert sorted(moves) == [Move(Sq.A3, Sq.A1, MoveType.CAPTURE),
Move(Sq.A3, Sq.A2, MoveType.QUIET)]
# pawn pinned by bishop (no attack)
board = Board('K6k/8/8/8/3p4/8/1B6/8 b - - 0 1')
moves = board._pinned_move_gen(Sq.D4, Sq.B2, Direction.UR)
assert len(moves) == 0
# pawn pinned by bishop (can attack)
board = Board('K6k/8/8/8/8/2p5/1B6/8 b - - 0 1')
moves = board._pinned_move_gen(Sq.C3, Sq.B2, Direction.UR)
self.assertEqual(len(moves), 1)
self.assertEqual(sorted(moves), [Move(Sq.C3, Sq.B2, MoveType.CAPTURE)])
# pawn pinned by bishop (promo attack)
board = Board('K6k/8/8/8/8/8/1p6/B7 b - - 0 1')
moves = board._pinned_move_gen(Sq.B2, Sq.A1, Direction.UR)
assert len(moves) == 4
assert sorted(moves) == [Move(Sq.B2, Sq.A1, MoveType.N_PROMO_CAPTURE),
Move(Sq.B2, Sq.A1, MoveType.B_PROMO_CAPTURE),
Move(Sq.B2, Sq.A1, MoveType.R_PROMO_CAPTURE),
Move(Sq.B2, Sq.A1, MoveType.Q_PROMO_CAPTURE)]
# knight pinned by bishop
board = Board('k6K/6N1/8/8/8/8/1b6/8 w - - 0 1')
moves = board._pinned_move_gen(Sq.G7, Sq.B2, Direction.UR)
self.assertEqual(len(moves), 0)
# bishop pinned by bishop
board | |
auth_source_ldap['name'] = self.params.name
auth_source_ldap['attr_mail'] = self.params.extra['attr_mail']
auth_source_ldap['account_password'] = self.params.extra['account_password']
auth_source_ldap['attr_firstname'] = self.params.extra['arttr_firstname']
auth_source_ldap['host'] = self.params.extra['host']
auth_source_ldap['attr_lastname'] = self.params.extra['attr_lastname']
except KeyError as e:
self.log.debug(e)
quit("Cannot create " + str(self.params.function))
if 'tls' in self.params.extra:
auth_source_ldap['tls'] = self.params.extra['tls']
if 'port' in self.params.extra:
auth_source_ldap['port'] = self.params.extra['port']
if 'account' in self.params.extra:
auth_source_ldap['account'] = self.params.extra['account']
if 'onthefly_register' in self.params.extra:
auth_source_ldap['onthefly_register'] = self.params.extra['onthefly_register']
if 'base_dn' in self.params.extra:
auth_source_ldap['base_dn'] = self.params.extra['base_dn']
try:
auth_source_ldap = conn.create_auth_source_ldaps(auth_source_ldap)
except Exception as e:
self.log.error(e)
quit("There was a problem creating your " + str(self.params.function))
return auth_source_ldap
def create_paramter(self,conn):
common_parameter = {}
try:
common_parameter['value'] = self.params.extra['value']
common_parameter['name'] = self.params.name
except KeyError as e:
self.log.debug(e)
quit("Cannot create " + str(self.params.function))
try:
common_paramter = conn.create_common_paramters(common_paramter)
except Exception as e:
self.log.error(e)
quit("There was a problem creating your " + str(self.params.function))
return common_parameter
def create_key(self,conn):
lookup_key = {}
try:
lookup_key['key'] = self.params.name
except KeyError as e:
self.log.debug(e)
quit("Cannot create " + str(self.params.function))
if 'default_value' in self.params.extra:
lookup_key['default_value'] = self.params.extra['default_value']
if 'description' in self.params.extra:
lookup_key['description'] = self.params.extra['description']
if 'path' in self.params.extra:
lookup_key['path'] = self.params.extra['path']
if 'puppetclass_id' in self.params.extra:
lookup_key['puppetclass_id'] = self.params.extra['puppetclass_id']
if 'lookup_values_count' in self.params.extra:
lookup_key['lookup_values_count'] = self.params.extra['lookup_values_count']
try:
lookup_key = conn.create_lookup_keys(lookup_key)
except Exception as e:
self.log.error(e)
quit("There was a problem creating your " + str(self.params.function))
return lookup_key
def create_partitiontable(self,conn):
ptable = {}
try:
ptable['layout'] = self.params.extra['layout']
ptable['name'] = self.params.name
except KeyError as e:
self.log.debug(e)
quit("Cannot create " + str(self.params.function))
if 'os_family' in self.params.extra:
ptable['os_family'] = self.params.extra['os_family']
try:
ptable = conn.create_ptables(ptable)
except Exception as e:
self.log.error(e)
quit("There was a problem creating your " + str(self.params.function))
return ptable
def create_role(self,conn):
role = {}
try:
role['name'] = self.params.name
except KeyError as e:
self.log.debug(e)
quit("Cannot create environment")
try:
role = conn.create_roles(role)
except Exception as e:
self.log.error("Not enough detail provided with element - Unsure what to do - Skipping!")
return
return role
def create_usergroup(self,conn):
usergroup = {}
try:
usergroup['name'] = self.params.name
except KeyError as e:
self.log.debug(e)
quit("Cannot create environment")
try:
usergroup = conn.create_usergroups(usergroup)
except Exception as e:
self.log.error("Not enough detail provided with element - Unsure what to do - Skipping!")
return
return usergroup
def create_user(self,conn):
user = {}
try:
user['login'] = self.params.name
user['password'] = self.params.extra['password']
user['mail'] = self.params.extra['mail']
user['auth_source_id'] = self.params.extra['auth_source_id']
except KeyError as e:
self.log.debug(e)
quit("Cannot create environment")
if 'firstname' in self.params.extra:
user['firstname'] = self.params.extra['firstname']
if 'admin' in self.params.extra:
user['admin'] = self.params.extra['admin']
if 'lastname' in self.params.extra:
user['lastname'] = self.params.extra['lastname']
try:
user = conn.create_users(user)
except Exception as e:
self.log.error("Not enough detail provided with element - Unsure what to do - Skipping!")
return
return user
def update_host(self,conn,i):
host = {}
self.log.info(conn.show_hosts(i))
try:
host['id'] = i
except KeyError as e:
self.log.debug(e)
quit("Cannot update " + str(self.params.function))
if 'new_name' in self.params.extra:
host['name'] = self.params.extra['new_name']
if 'architecture_id' in self.params.extra:
host['architecture_id'] = self.params.extra['architecture_id']
if 'model_id' in self.params.extra:
host['model_id'] = self.params.extra['model_id']
if 'puppet_proxy_id' in self.params.extra:
host['puppet_proxy_id'] = self.params.extra['puppet_proxy_id']
if 'environment_id' in self.params.extra:
host['environment_id'] = self.params.extra['environment_id']
if 'domain_id' in self.params.extra:
host['domain_id'] = self.params.extra['domain_id']
if 'mac' in self.params.extra:
host['mac'] = self.params.extra['mac']
if 'ip' in self.params.extra:
host['ip'] = self.params.extra['ip']
if 'operatingsystem_id' in self.params.extra:
host['operatingsystem_id'] = self.params.extra['operatingsystem_id']
if 'ptable_id' in self.params.extra:
host['ptable_id'] = self.params.extra['ptable_id']
if 'hostgroup_id' in self.params.extra:
host['hostgroup_id'] = self.params.extra['hostgroup']
if 'sp_subnet_id' in self.params.extra:
host['sp_subnet_id']= self.params.extra['sp_subnet_id']
if 'subnet_id' in self.params.extra:
host['subnet_id'] = self.params.extra['subnet_id']
if 'owner_id' in self.params.extra:
host['owner_id']= self.params.extra['owner_id']
if 'host_parameters_attributes' in self.params.extra:
host['host_parameters_attributes'] = self.params.extra['host_parameters_attributes']
if 'puppet_ca_proxy_id' in self.params.extra:
host['puppet_ca_proxy_id'] = self.params.extra['puppet_ca_proxy_id']
if 'image_id' in self.params.extra:
host['image_id'] = self.params.extra['image_id']
if 'medium_id' in self.params.extra:
host['medium_id'] = self.params.extra['medium_id']
try:
host = conn.update_hosts(host)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return host
def update_smart_proxy(self,conn,i):
smartProxy = {}
try:
smartProxy['id'] = i
except KeyError as e:
self.log.debug(e)
quit("Please provide a JSON string")
if 'new_name' in self.params.extra:
smartProxy['name'] = self.params.extra['new_name']
if 'url' in self.params.extra:
smartProxy['url'] = self.params.extra['url']
try:
proxy = conn.update_smart_proxies(smartProxy)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return proxy
def update_compute_resource(self,conn,i):
computeResource = {}
try:
computeResource['id'] = i
except KeyError:
quit("Please deliver a json string with - password, url, description, user and provider values ")
if 'new_name' in self.params.extra:
computeResource['name'] = self.params.extra['new_name']
if 'password' in self.params.extra:
computeResource['password'] = self.params.extra['password']
if 'url' in self.params.extra:
computeResource['url'] = self.params.extra['url']
if 'description' in self.params.extra:
computeResource['description'] = self.params.extra['description']
if 'user' in self.params.extra:
computeResource['user'] = self.params.extra['user']
if 'provider' in self.params.extra:
computeResource['provider'] = self.params.extra['provider']
if 'server' in self.params.extra:
ComputeResource['server'] = self.params.extra['server']
if 'uuid' in self.params.extra:
ComputeResource['uuid'] = self.params.extra['uuid']
if 'tenant' in self.params.extra:
ComputeResource['tenant'] = self.params.extra['tenant']
if 'region' in self.params.extra:
ComputeResource['region'] = self.params.extra['region']
try:
resource = conn.update_compute_resources(computeResource)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return resource
def update_subnet(self,conn,i):
subnet = {}
try:
subnet['id'] = i
except KeyError:
quit("Please enter a valid JSON string with name, subnetmask, network")
if 'new_name' in self.params.extra:
subnet['name'] = self.params.extra['new_name']
if 'mask' in self.params.extra:
subnet['mask'] = self.params.extra['subnetmask']
if 'network' in self.params.extra:
subnet['network'] = self.params.extra['network']
if 'vlanid' in self.params.extra:
subnet['vlanid'] = self.params.extra['vlanid']
if 'dns_primary' in self.params.extra:
subnet['dns_primary'] = self.params.extra['dns_primary']
if 'gateway' in self.params.extra:
subnet['gateway'] = self.params.extra['gateway']
if 'to' in self.params.extra:
subnet['to'] = self.params.extra['to']
if 'dns_id' in self.params.extra:
subnet['dns_id'] = self.params.extra['dns_id']
if 'dhcp_id' in self.params.extra:
subnet['dhcp_id'] = self.params.extra['dhcp_id']
if 'from' in self.params.extra:
subnet['from'] = self.params.extra['from']
if 'dns_secondary' in self.params.extra:
subnet['dns_secondary'] = self.params.extra['dns_secondary']
if 'domain_ids' in self.params.extra:
subnet['domain_ids'] = self.params.extra['domain_ids']
if 'tftp_id' in self.params.extra:
subnet['tftp_id'] = self.params.extra['tftp_id']
try:
sub = conn.update_subnets(subnet)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return sub
def update_domain(self,conn,i):
domain = {}
try:
domain['id'] = i
except KeyError:
quit("Please enter a valid JSON string with dns_id and description")
if 'new_name' in self.params.extra:
domain['name'] = self.params.extra['new_name']
if 'dns_id' in self.params.extra:
domain['dns_id'] = self.params.extra['dns_id']
if 'fullname' in self.params.extra:
domain['fullname'] = self.params.extra['fullname']
if 'domain_parameters_attributes' in self.params.extra:
domain['domain_parameters_attributes'] = self.params.extra['domain_parameters_attributes']
try:
dom = conn.update_domains(domain)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return dom
def update_hostgroup(self,conn,i):
hostgroup = {}
try:
hostgroup['id'] = i
except KeyError:
quit('Please provide a valid JSON string that has name')
if 'new_name' in self.params.extra:
hostgroup['name'] = self.params.extra['new_name']
if 'operatingsystem_id' in self.params.extra:
hostgroup['operatingsystem_id'] = self.params.extra['operatingsystem_id']
if 'puppet_ca_proxy_id' in self.params.extra:
hostgroup['puppet_ca_proxy_id'] = self.params.extra['puppet_ca_proxy_id']
if 'ptable_id' in self.params.extra:
hostgroup['ptable_id'] = self.params.extra['ptable_id']
if 'environment_id' in self.params.extra:
hostgroup['environment_id'] = self.params.extra['environment_id']
if 'medium_id' in self.params.extra:
hostgroup['medium_id'] = self.params.extra['medium_id']
if 'subnet_id' in self.params.extra:
hostgroup['subnet_id'] = self.params.extra['subnet_id']
if 'architecture_id' in self.params.extra:
hostgroup['architecture_id'] = self.params.extra['architecture_id']
if 'puppet_proxy_id' in self.params.extra:
hostgroup['puppet_proxy_id'] = self.params.extra['puppet_proxy_id']
if 'puppetclass_ids' in self.params.extra:
hostgroup['puppetclass_ids'] = self.params.extra['puppetclass_ids']
if 'root_pass' in self.params.extra:
hostgroup['root_pass'] = self.params.extra['root_pass']
if 'domain_id' in self.params.extra:
hostgroup['domain_id'] = self.params.extra['domain_id']
try:
hostgroup = conn.update_hostgroups(hostgroup)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return hostgroup
def update_puppetclass(self,conn,i):
puppetclass = {}
try:
puppetclass['id'] = i
except KeyError:
quit('Please provide a valid JSON string')
if 'new_name' in self.params.extra:
puppetclass['name'] = self.params.extra['new_name']
try:
pclass = conn.update_puppetclasses(puppetclass)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return pclass
def update_hardwaremodel(self,conn,i):
hardware = {}
try:
hardware['id'] = i
except KeyError:
quit('please provide a valid JSON string')
if 'new_name' in self.params.extra:
hardware['name'] = self.params.extra['new_name']
if 'hardware_model' in self.params.extra:
hardware['hardware_model'] = self.params.extra['hardware_model']
if 'vendor_class' in self.params.extra:
hardware['vendor_class'] = self.params.extra['vendor_class']
if 'info' in self.params.extra:
hardware['info'] = self.params.extra['info']
try:
model = conn.update_models(hardware)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return model
def update_os(self,conn,i):
os = {}
try:
os['id'] = i
except KeyError:
quit('Please provide a valid JSON string')
if 'new_name' in self.params.extra:
os['name'] = self.params.extra['new_name']
if 'minor' in self.params.extra:
os['minor'] = self.params.extra['minor']
if 'major' in self.params.extra:
os['major'] = self.params.extra['major']
try:
operatingsys = conn.update_operatingsystems(os)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return operatingsys
def update_env(self,conn,i):
env = {}
try:
env['id'] = i
except KeyError as e:
self.log.debug(e)
quit("Cannot update environment")
if 'new_name' in self.params.extra:
env['name'] = self.params.extra['new_name']
try:
environment = conn.update_environments(env)
except Exception as e:
self.log.error(e)
quit("There was a problem updating your " + str(self.params.function))
return environment
def update_install_media(self,conn,i):
install = {}
try:
install['id'] = i
| |
<reponame>myvyang/rre
#!/usr/bin/env python
# coding: utf-8
import os
import sys
"""
laucha: regular expressions static compiler.
<NAME>, @alejolp
"""
RE_SPECIAL_SYMBOLS = set(".[]^$()*+?|{}")
RE_CLASSES = ['[:upper:]', '[:lower:]', '[:alpha:]', '[:alnum:]', '[:digit:]', '[:xdigit:]', '[:punct:]', '[:blank:]', '[:space:]', '[:cntrl:]', '[:graph:]', '[:print:]']
TOK_SPECIAL = 'special'
TOK_LITERAL = 'literal'
TOK_CLASS = 'class'
TOK_ENDOFSTR = 'eos'
class laucha_parser_error(Exception):
pass
class laucha_parser_missing_token(Exception):
"""
Backtrack the parser
"""
pass
def tokenize_regexp(S):
i = 0
R = []
in_char_class = False
try:
while i < len(S):
if S.startswith('[:', i):
class_str = None
for c in RE_CLASSES:
if S.startswith(c, i):
class_str = c
break
if class_str is None:
raise laucha_parser_error()
R.append((TOK_CLASS, class_str))
i += len(class_str)
elif S.startswith('[^', i):
R.append((TOK_SPECIAL, S[i:i+2]))
i = i + 2
elif S[i] in RE_SPECIAL_SYMBOLS:
if in_char_class and S[i] not in "^-]\\":
R.append((TOK_LITERAL, S[i]))
else:
R.append((TOK_SPECIAL, S[i]))
if S[i] == '[':
in_char_class = True
if S[i] == ']':
in_char_class = False
i = i + 1
elif S[i] == '\\':
i = i + 1
if S[i] in "sSwW":
R.append((TOK_LITERAL, "\\" + S[i]))
elif S[i] in "nrtf":
m = {"n": "\n", "r": "\r", "t": "\t", "f": "\f"}
R.append((TOK_LITERAL, m[S[i]]))
else:
R.append((TOK_LITERAL, S[i]))
i = i + 1
else:
R.append((TOK_LITERAL, S[i]))
i = i + 1
except Exception as e:
raise laucha_parser_error("Exception: " + str(e))
R.append((TOK_ENDOFSTR, None))
return R
class regexp_node:
def __init__(self, name):
self.name = name
self.childs = []
def __repr__(self):
return "('" + self.name + "', " + ', '.join([repr(x) for x in self.childs]) + ")"
class regexp_parser:
"""
Recursive descent parser for regular expressions.
GRAMMAR
-------
http://www.cs.sfu.ca/~cameron/Teaching/384/99-3/regexp-plg.html
<START> ::= <RE> <TOK_ENDOFSTR>
<RE> ::= <union> | <simple_RE>
<union> ::= <RE> "|" <simple_RE>
<simple_RE> ::= <concatenation> | <basic_RE>
<concatenation> ::= <simple_RE> <basic_RE>
<basic_RE> ::= <star> | <plus> | <question> | <num_copy> | <elementary_RE>
<star> ::= <elementary_RE> "*"
<plus> ::= <elementary_RE> "+"
<question> ::= <elementary_RE> "?"
<num_copy> ::= <elementary_RE> <num_copy_struct>
<num_copy_struct> ::= "{" num "}" | "{" num "," num "}"
<num> ::= 0123456789
<elementary_RE> ::= <group> | <any> | <eos> | <sos> | <char> | <char_group> | <set>
<group> ::= "(" <RE> ")"
<set> ::= <positive_set> | <negative_set>
<positive_set> ::= "[" <set_items> "]"
<negative_set> ::= "[^" <set_items> "]"
<set_items> ::= <set_item> | <set_item> <set_items>
<set_item> ::= <range> | <char> | <char_group>
<range> ::= <char> "-" <char>
<any> ::= "."
<sos> ::= "^"
<eos> ::= "$"
<char> ::= any non metacharacter | "\" metacharacter
<char_group> ::= "\s" | "\w" | \S | \W
# Left factored grammar
<RE> ::= <simple_RE> <union> | <simple_RE>
<union> ::= "|" <simple_RE> <union> | "|" <simple_RE>
<simple_RE> ::= <basic_RE> <concatenation> | <basic_RE>
<concatenation> ::= <simple_RE>
"""
def __init__(self, T):
self.toks = T
self.pos = 0
def tok_peek(self):
t = self.toks[self.pos]
#print "peek", t
return t
def tok_next(self):
t = self.toks[self.pos]
#print "next", t
self.pos += 1
return t
def parse_any(self):
"""
<any> ::= "."
"""
if self.tok_peek() != (TOK_SPECIAL, '.'):
raise laucha_parser_missing_token()
node = regexp_node('any')
node.childs.append(self.tok_next())
return node
def parse_sos(self):
"""
<sos> ::= "^"
"""
if self.tok_peek() != (TOK_SPECIAL, '^'):
raise laucha_parser_missing_token()
node = regexp_node('sos')
node.childs.append(self.tok_next())
return node
def parse_eos(self):
"""
<eos> ::= "$"
"""
if self.tok_peek() != (TOK_SPECIAL, '$'):
raise laucha_parser_missing_token()
node = regexp_node('eos')
node.childs.append(self.tok_next())
return node
def parse_char(self):
"""
<char> ::= any non metacharacter | "\" metacharacter
"""
if self.tok_peek()[0] != TOK_LITERAL:
raise laucha_parser_missing_token()
if self.tok_peek()[1] in "\s\w\S\W":
node = regexp_node('char_group')
node.childs.append(self.tok_next())
else:
node = regexp_node('char')
node.childs.append(self.tok_next())
return node
def parse_range(self):
"""
<range> ::= <char> "-" <char>
"""
oldpos = self.pos
try:
a = self.parse_char()
if self.tok_peek()[1] != '-':
raise laucha_parser_missing_token()
b = self.tok_next()
c = self.parse_char()
node = regexp_node('range')
node.childs.append(a)
node.childs.append(b)
node.childs.append(c)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_set_item(self):
"""
<set_item> ::= <range> | <char>
"""
oldpos = self.pos
try:
node = regexp_node('set_item')
try:
a = self.parse_range()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
pass
a = self.parse_char()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_set_items(self):
"""
<set_items> ::= <set_item> | <set_item> <set_items>
"""
oldpos = self.pos
try:
node = regexp_node('set_items')
a = self.parse_set_item()
node.childs.append(a)
try:
b = self.parse_set_items()
node.childs.append(b)
except laucha_parser_missing_token as e:
pass
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_set(self):
"""
<set> ::= <positive_set> | <negative_set>
"""
oldpos = self.pos
try:
node = regexp_node('set')
try:
a = self.parse_positive_set()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
pass
a = self.parse_negative_set()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_positive_set(self):
"""
<positive_set> ::= "[" <set_items> "]"
"""
oldpos = self.pos
try:
node = regexp_node('positive_set')
if self.tok_peek() != (TOK_SPECIAL, '['):
raise laucha_parser_missing_token()
a = self.tok_next()
b = self.parse_set_items()
if self.tok_peek() != (TOK_SPECIAL, ']'):
raise laucha_parser_missing_token()
c = self.tok_next()
node.childs.append(a)
node.childs.append(b)
node.childs.append(c)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_negative_set(self):
"""
<negative_set> ::= "[^" <set_items> "]"
"""
oldpos = self.pos
try:
node = regexp_node('negative_set')
if self.tok_peek() != (TOK_SPECIAL, '[^'):
raise laucha_parser_missing_token()
a = self.tok_next()
b = self.parse_set_items()
if self.tok_peek() != (TOK_SPECIAL, ']'):
raise laucha_parser_missing_token()
c = self.tok_next()
node.childs.append(a)
node.childs.append(b)
node.childs.append(c)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_group(self):
"""
<group> ::= "(" <RE> ")"
"""
oldpos = self.pos
try:
node = regexp_node('group')
if self.tok_peek() != (TOK_SPECIAL, '('):
raise laucha_parser_missing_token()
a = self.tok_next()
if self.tok_peek() == (TOK_SPECIAL, '?'):
self.tok_next()
b = self.parse_RE()
if self.tok_peek() != (TOK_SPECIAL, ')'):
raise laucha_parser_missing_token()
c = self.tok_next()
node.childs.append(a)
node.childs.append(b)
node.childs.append(c)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_START(self):
"""
<START> ::= <RE> <TOK_ENDOFSTR>
"""
oldpos = self.pos
try:
node = regexp_node('RE')
a = self.parse_RE()
if self.tok_peek() != (TOK_ENDOFSTR, None):
raise laucha_parser_missing_token()
b = self.tok_next()
node.childs.append(a)
node.childs.append(b)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_RE(self):
"""
<RE> ::= <simple_RE> <union> | <simple_RE>
"""
oldpos = self.pos
try:
node = regexp_node('RE')
a = self.parse_simple_RE()
node.childs.append(a)
try:
b = self.parse_union()
node.childs.append(b)
except laucha_parser_missing_token as e:
pass
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_union(self):
"""
<union> ::= "|" <simple_RE> <union> | "|" <simple_RE>
"""
oldpos = self.pos
try:
node = regexp_node('union')
if self.tok_peek() != (TOK_SPECIAL, '|'):
raise laucha_parser_missing_token()
a = self.tok_next()
node.childs.append(a)
b = self.parse_simple_RE()
node.childs.append(b)
try:
c = self.parse_union()
node.childs.append(c)
except laucha_parser_missing_token as e:
pass
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_simple_RE(self):
"""
<simple_RE> ::= <basic_RE> <concatenation> | <basic_RE>
"""
oldpos = self.pos
try:
node = regexp_node('simple_RE')
a = self.parse_basic_RE()
node.childs.append(a)
try:
b = self.parse_concatenation()
node.childs.append(b)
except laucha_parser_missing_token as e:
pass
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_concatenation(self):
"""
<concatenation> ::= <simple_RE>
"""
oldpos = self.pos
try:
node = regexp_node('concatenation')
a = self.parse_simple_RE()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_basic_RE(self):
"""
<basic_RE> ::= <star> | <plus> | <question> | <num_copy> | <elementary_RE>
"""
oldpos = self.pos
try:
node = regexp_node('basic_RE')
try:
a = self.parse_star()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
try:
a = self.parse_plus()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
try:
a = self.parse_question()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
try:
a = self.parse_num_copy()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
a = self.parse_elementary_RE()
node.childs.append(a)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_star(self):
"""
<star> ::= <elementary_RE> "*"
"""
oldpos = self.pos
try:
node = regexp_node('star')
a = self.parse_elementary_RE()
if self.tok_peek() != (TOK_SPECIAL, '*'):
raise laucha_parser_missing_token()
b = self.tok_next()
if self.tok_peek() == (TOK_SPECIAL, '?'):
self.tok_next()
node.childs.append(a)
node.childs.append(b)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_plus(self):
"""
<plus> ::= <elementary_RE> "+"
"""
oldpos = self.pos
try:
node = regexp_node('plus')
a = self.parse_elementary_RE()
if self.tok_peek() != (TOK_SPECIAL, '+'):
raise laucha_parser_missing_token()
b = self.tok_next()
if self.tok_peek() == (TOK_SPECIAL, '?'):
self.tok_next()
node.childs.append(a)
node.childs.append(b)
return node
except laucha_parser_missing_token as e:
self.pos = oldpos
raise
def parse_question(self):
"""
<question> ::= <elementary_RE> "?"
"""
oldpos = self.pos
try:
node = regexp_node('question')
a = self.parse_elementary_RE()
if self.tok_peek() != (TOK_SPECIAL, | |
<reponame>gynvael/stream
from z3 import *
def movsx(v):
return ZeroExt(32 - 8, v)
def imul(a, b, c = None):
if c is None:
return a * b
return b * c
def xor_(r, v):
return r ^ v
def or_(r, v):
return r | v
def mov(_, r2):
return r2
def shr(r1, c):
return LShR(r1, c)
def shl(r1, c):
return r1 << c
def calc():
esp_0x10 = BitVec("esp_0x10", 8)
esp_0x11 = BitVec("esp_0x11", 8)
esp_0x12 = BitVec("esp_0x12", 8)
esp_0x13 = BitVec("esp_0x13", 8)
esp_0x14 = BitVec("esp_0x14", 8)
esp_0x15 = BitVec("esp_0x15", 8)
esp_0x16 = BitVec("esp_0x16", 8)
esp_0x17 = BitVec("esp_0x17", 8)
esp_0x18 = BitVec("esp_0x18", 8)
esp_0x19 = BitVec("esp_0x19", 8)
esp_0x1A = BitVec("esp_0x1A", 8)
esp_0x1B = BitVec("esp_0x1B", 8)
esp_0x1C = BitVec("esp_0x1C", 8)
esp_0x1D = BitVec("esp_0x1D", 8)
esp_0x1E = BitVec("esp_0x1E", 8)
esp_0x1F = BitVec("esp_0x1F", 8)
eax = BitVec("eax", 32)
ebx = BitVec("ebx", 32)
ecx = BitVec("ecx", 32)
edx = BitVec("edx", 32)
esi = BitVec("esi", 32)
edi = BitVec("edi", 32)
ebp = BitVec("ebp", 32)
edi = movsx(esp_0x10)
edx = imul(edx, edi, 0x3039)
edx = xor_(edx, 0x93E6BBCF)
ebx = imul(ebx, edi, 0x0AEDCE)
ebx = xor_(ebx, 0x2ECBBAE2)
ecx = imul(ecx, edi, 0x2EF8F)
ecx = xor_(ecx, 0x0A0A2A282)
edi = imul(edi, 0x0DEDC7)
edi = xor_(edi, 0x9BDFE6F7)
eax = mov(eax, edx)
eax = shr(eax, 3)
edx = shl(edx, 3)
eax = or_(eax, edx)
edx = movsx(esp_0x11)
esi = imul(esi, edx, 0x3039)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 5)
ebx = shl(ebx, 5)
esi = or_(esi, ebx)
ebx = imul(ebx, edx, 0x0AEDCE)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 7)
ecx = shl(ecx, 7)
ebx = or_(ebx, ecx)
ecx = imul(ecx, edx, 0x2EF8F)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edi)
ecx = shr(ecx, 9)
edi = shl(edi, 9)
ecx = or_(ecx, edi)
edx = imul(edx, 0x0DEDC7)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 3)
eax = shl(eax, 3)
edx = or_(edx, eax)
edi = movsx(esp_0x12)
eax = imul(eax, edi, 0x3039)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 5)
esi = shl(esi, 5)
eax = or_(eax, esi)
esi = imul(esi, edi, 0x0AEDCE)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 7)
ebx = shl(ebx, 7)
esi = or_(esi, ebx)
ebx = imul(ebx, edi, 0x2EF8F)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 9)
ecx = shl(ecx, 9)
ebx = or_(ebx, ecx)
edi = imul(edi, 0x0DEDC7)
ebx = xor_(ebx, edi)
ecx = mov(ecx, edx)
ecx = shr(ecx, 3)
edx = shl(edx, 3)
ecx = or_(ecx, edx)
edi = movsx(esp_0x13)
edx = imul(edx, edi, 0x3039)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 5)
eax = shl(eax, 5)
edx = or_(edx, eax)
eax = imul(eax, edi, 0x0AEDCE)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 7)
esi = shl(esi, 7)
eax = or_(eax, esi)
esi = imul(esi, edi, 0x2EF8F)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 9)
ebx = shl(ebx, 9)
esi = or_(esi, ebx)
edi = imul(edi, 0x0DEDC7)
esi = xor_(esi, edi)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 3)
ecx = shl(ecx, 3)
ebx = or_(ebx, ecx)
edi = movsx(esp_0x14)
ecx = imul(ecx, edi, 0x3039)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edx)
ecx = shr(ecx, 5)
edx = shl(edx, 5)
ecx = or_(ecx, edx)
edx = imul(edx, edi, 0x0AEDCE)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 7)
eax = shl(eax, 7)
edx = or_(edx, eax)
eax = imul(eax, edi, 0x2EF8F)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 9)
esi = shl(esi, 9)
eax = or_(eax, esi)
edi = imul(edi, 0x0DEDC7)
eax = xor_(eax, edi)
esi = mov(esi, ebx)
esi = shr(esi, 3)
ebx = shl(ebx, 3)
esi = or_(esi, ebx)
edi = movsx(esp_0x15)
ebx = imul(ebx, edi, 0x3039)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 5)
ecx = shl(ecx, 5)
ebx = or_(ebx, ecx)
ecx = imul(ecx, edi, 0x0AEDCE)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edx)
ecx = shr(ecx, 7)
edx = shl(edx, 7)
ecx = or_(ecx, edx)
edx = imul(edx, edi, 0x2EF8F)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 9)
eax = shl(eax, 9)
edx = or_(edx, eax)
edi = imul(edi, 0x0DEDC7)
edx = xor_(edx, edi)
eax = mov(eax, esi)
eax = shr(eax, 3)
esi = shl(esi, 3)
eax = or_(eax, esi)
edi = movsx(esp_0x16)
esi = imul(esi, edi, 0x3039)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 5)
ebx = shl(ebx, 5)
esi = or_(esi, ebx)
ebx = imul(ebx, edi, 0x0AEDCE)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 7)
ecx = shl(ecx, 7)
ebx = or_(ebx, ecx)
ecx = imul(ecx, edi, 0x2EF8F)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edx)
ecx = shr(ecx, 9)
edx = shl(edx, 9)
ecx = or_(ecx, edx)
edi = imul(edi, 0x0DEDC7)
ecx = xor_(ecx, edi)
edx = mov(edx, eax)
edx = shr(edx, 3)
eax = shl(eax, 3)
edx = or_(edx, eax)
edi = movsx(esp_0x17)
eax = imul(eax, edi, 0x3039)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 5)
esi = shl(esi, 5)
eax = or_(eax, esi)
esi = imul(esi, edi, 0x0AEDCE)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 7)
ebx = shl(ebx, 7)
esi = or_(esi, ebx)
ebx = imul(ebx, edi, 0x2EF8F)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 9)
ecx = shl(ecx, 9)
ebx = or_(ebx, ecx)
edi = imul(edi, 0x0DEDC7)
ebx = xor_(ebx, edi)
ecx = mov(ecx, edx)
ecx = shr(ecx, 3)
edx = shl(edx, 3)
ecx = or_(ecx, edx)
edi = movsx(esp_0x18)
edx = imul(edx, edi, 0x3039)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 5)
eax = shl(eax, 5)
edx = or_(edx, eax)
eax = imul(eax, edi, 0x0AEDCE)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 7)
esi = shl(esi, 7)
eax = or_(eax, esi)
esi = imul(esi, edi, 0x2EF8F)
eax = xor_(eax, esi)
esi = mov(esi, ebx)
esi = shr(esi, 9)
ebx = shl(ebx, 9)
esi = or_(esi, ebx)
edi = imul(edi, 0x0DEDC7)
esi = xor_(esi, edi)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 3)
ecx = shl(ecx, 3)
ebx = or_(ebx, ecx)
edi = movsx(esp_0x19)
ecx = imul(ecx, edi, 0x3039)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edx)
ecx = shr(ecx, 5)
edx = shl(edx, 5)
ecx = or_(ecx, edx)
edx = imul(edx, edi, 0x0AEDCE)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 7)
eax = shl(eax, 7)
edx = or_(edx, eax)
eax = imul(eax, edi, 0x2EF8F)
edx = xor_(edx, eax)
eax = mov(eax, esi)
eax = shr(eax, 9)
esi = shl(esi, 9)
eax = or_(eax, esi)
edi = imul(edi, 0x0DEDC7)
eax = xor_(eax, edi)
esi = mov(esi, ebx)
esi = shr(esi, 3)
ebx = shl(ebx, 3)
esi = or_(esi, ebx)
edi = movsx(esp_0x1A)
ebx = imul(ebx, edi, 0x3039)
esi = xor_(esi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 5)
ecx = shl(ecx, 5)
ebx = or_(ebx, ecx)
ecx = imul(ecx, edi, 0x0AEDCE)
ebx = xor_(ebx, ecx)
ecx = mov(ecx, edx)
ecx = shr(ecx, 7)
edx = shl(edx, 7)
ecx = or_(ecx, edx)
edx = imul(edx, edi, 0x2EF8F)
ecx = xor_(ecx, edx)
edx = mov(edx, eax)
edx = shr(edx, 9)
eax = shl(eax, 9)
edx = or_(edx, eax)
edi = imul(edi, 0x0DEDC7)
edx = xor_(edx, edi)
eax = mov(eax, esi)
eax = shr(eax, 3)
esi = shl(esi, 3)
eax = or_(eax, esi)
esi = movsx(esp_0x1B)
edi = imul(edi, esi, 0x3039)
eax = xor_(eax, edi)
edi = mov(edi, ebx)
edi = shr(edi, 5)
ebx = shl(ebx, 5)
edi = or_(edi, ebx)
ebx = imul(ebx, esi, 0x0AEDCE)
edi = xor_(edi, ebx)
ebx = mov(ebx, ecx)
ebx = shr(ebx, 7)
ecx = shl(ecx, 7)
| |
struct_anon_97(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'mode',
'detail',
'time',
]
struct_anon_97._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('mode', c_int),
('detail', c_int),
('time', Time),
]
XDeviceFocusChangeEvent = struct_anon_97 # /usr/include/X11/extensions/XInput.h:5133
XDeviceFocusInEvent = XDeviceFocusChangeEvent # /usr/include/X11/extensions/XInput.h:5135
XDeviceFocusOutEvent = XDeviceFocusChangeEvent # /usr/include/X11/extensions/XInput.h:5136
class struct_anon_98(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'same_screen',
'device_state',
'axes_count',
'first_axis',
'axis_data',
]
struct_anon_98._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('same_screen', c_int),
('device_state', c_uint),
('axes_count', c_ubyte),
('first_axis', c_ubyte),
('axis_data', c_int * 6),
]
XProximityNotifyEvent = struct_anon_98 # /usr/include/X11/extensions/XInput.h:5164
XProximityInEvent = XProximityNotifyEvent # /usr/include/X11/extensions/XInput.h:5165
XProximityOutEvent = XProximityNotifyEvent # /usr/include/X11/extensions/XInput.h:5166
class struct_anon_99(Structure):
__slots__ = [
'class',
'length',
]
struct_anon_99._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
]
XInputClass = struct_anon_99 # /usr/include/X11/extensions/XInput.h:5183
class struct_anon_100(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'num_classes',
'data',
]
struct_anon_100._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('num_classes', c_int),
('data', c_char * 64),
]
XDeviceStateNotifyEvent = struct_anon_100 # /usr/include/X11/extensions/XInput.h:5195
class struct_anon_101(Structure):
__slots__ = [
'class',
'length',
'num_valuators',
'mode',
'valuators',
]
struct_anon_101._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_valuators', c_ubyte),
('mode', c_ubyte),
('valuators', c_int * 6),
]
XValuatorStatus = struct_anon_101 # /usr/include/X11/extensions/XInput.h:5207
class struct_anon_102(Structure):
__slots__ = [
'class',
'length',
'num_keys',
'keys',
]
struct_anon_102._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_keys', c_short),
('keys', c_char * 32),
]
XKeyStatus = struct_anon_102 # /usr/include/X11/extensions/XInput.h:5218
class struct_anon_103(Structure):
__slots__ = [
'class',
'length',
'num_buttons',
'buttons',
]
struct_anon_103._fields_ = [
('class', c_ubyte),
('length', c_ubyte),
('num_buttons', c_short),
('buttons', c_char * 32),
]
XButtonStatus = struct_anon_103 # /usr/include/X11/extensions/XInput.h:5229
class struct_anon_104(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'request',
'first_keycode',
'count',
]
struct_anon_104._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('request', c_int),
('first_keycode', c_int),
('count', c_int),
]
XDeviceMappingEvent = struct_anon_104 # /usr/include/X11/extensions/XInput.h:5250
class struct_anon_105(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'deviceid',
'time',
'request',
]
struct_anon_105._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('deviceid', XID),
('time', Time),
('request', c_int),
]
XChangeDeviceNotifyEvent = struct_anon_105 # /usr/include/X11/extensions/XInput.h:5268
class struct_anon_106(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'time',
'devchange',
'deviceid',
'control',
]
struct_anon_106._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('time', Time),
('devchange', c_int),
('deviceid', XID),
('control', XID),
]
XDevicePresenceNotifyEvent = struct_anon_106 # /usr/include/X11/extensions/XInput.h:5293
class struct_anon_107(Structure):
__slots__ = [
'class',
'length',
'id',
]
struct_anon_107._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
]
XFeedbackState = struct_anon_107 # /usr/include/X11/extensions/XInput.h:5311
class struct_anon_108(Structure):
__slots__ = [
'class',
'length',
'id',
'click',
'percent',
'pitch',
'duration',
'led_mask',
'global_auto_repeat',
'auto_repeats',
]
struct_anon_108._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('click', c_int),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
('led_mask', c_int),
('global_auto_repeat', c_int),
('auto_repeats', c_char * 32),
]
XKbdFeedbackState = struct_anon_108 # /usr/include/X11/extensions/XInput.h:5328
class struct_anon_109(Structure):
__slots__ = [
'class',
'length',
'id',
'accelNum',
'accelDenom',
'threshold',
]
struct_anon_109._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('accelNum', c_int),
('accelDenom', c_int),
('threshold', c_int),
]
XPtrFeedbackState = struct_anon_109 # /usr/include/X11/extensions/XInput.h:5341
class struct_anon_110(Structure):
__slots__ = [
'class',
'length',
'id',
'resolution',
'minVal',
'maxVal',
]
struct_anon_110._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('resolution', c_int),
('minVal', c_int),
('maxVal', c_int),
]
XIntegerFeedbackState = struct_anon_110 # /usr/include/X11/extensions/XInput.h:5354
class struct_anon_111(Structure):
__slots__ = [
'class',
'length',
'id',
'max_symbols',
'num_syms_supported',
'syms_supported',
]
KeySym = pyglet.libs.x11.xlib.KeySym
struct_anon_111._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('max_symbols', c_int),
('num_syms_supported', c_int),
('syms_supported', POINTER(KeySym)),
]
XStringFeedbackState = struct_anon_111 # /usr/include/X11/extensions/XInput.h:5367
class struct_anon_112(Structure):
__slots__ = [
'class',
'length',
'id',
'percent',
'pitch',
'duration',
]
struct_anon_112._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
]
XBellFeedbackState = struct_anon_112 # /usr/include/X11/extensions/XInput.h:5380
class struct_anon_113(Structure):
__slots__ = [
'class',
'length',
'id',
'led_values',
'led_mask',
]
struct_anon_113._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('led_values', c_int),
('led_mask', c_int),
]
XLedFeedbackState = struct_anon_113 # /usr/include/X11/extensions/XInput.h:5392
class struct_anon_114(Structure):
__slots__ = [
'class',
'length',
'id',
]
struct_anon_114._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
]
XFeedbackControl = struct_anon_114 # /usr/include/X11/extensions/XInput.h:5402
class struct_anon_115(Structure):
__slots__ = [
'class',
'length',
'id',
'accelNum',
'accelDenom',
'threshold',
]
struct_anon_115._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('accelNum', c_int),
('accelDenom', c_int),
('threshold', c_int),
]
XPtrFeedbackControl = struct_anon_115 # /usr/include/X11/extensions/XInput.h:5415
class struct_anon_116(Structure):
__slots__ = [
'class',
'length',
'id',
'click',
'percent',
'pitch',
'duration',
'led_mask',
'led_value',
'key',
'auto_repeat_mode',
]
struct_anon_116._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('click', c_int),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
('led_mask', c_int),
('led_value', c_int),
('key', c_int),
('auto_repeat_mode', c_int),
]
XKbdFeedbackControl = struct_anon_116 # /usr/include/X11/extensions/XInput.h:5433
class struct_anon_117(Structure):
__slots__ = [
'class',
'length',
'id',
'num_keysyms',
'syms_to_display',
]
struct_anon_117._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('num_keysyms', c_int),
('syms_to_display', POINTER(KeySym)),
]
XStringFeedbackControl = struct_anon_117 # /usr/include/X11/extensions/XInput.h:5445
class struct_anon_118(Structure):
__slots__ = [
'class',
'length',
'id',
'int_to_display',
]
struct_anon_118._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('int_to_display', c_int),
]
XIntegerFeedbackControl = struct_anon_118 # /usr/include/X11/extensions/XInput.h:5456
class struct_anon_119(Structure):
__slots__ = [
'class',
'length',
'id',
'percent',
'pitch',
'duration',
]
struct_anon_119._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('percent', c_int),
('pitch', c_int),
('duration', c_int),
]
XBellFeedbackControl = struct_anon_119 # /usr/include/X11/extensions/XInput.h:5469
class struct_anon_120(Structure):
__slots__ = [
'class',
'length',
'id',
'led_mask',
'led_values',
]
struct_anon_120._fields_ = [
('class', XID),
('length', c_int),
('id', XID),
('led_mask', c_int),
('led_values', c_int),
]
XLedFeedbackControl = struct_anon_120 # /usr/include/X11/extensions/XInput.h:5481
class struct_anon_121(Structure):
__slots__ = [
'control',
'length',
]
struct_anon_121._fields_ = [
('control', XID),
('length', c_int),
]
XDeviceControl = struct_anon_121 # /usr/include/X11/extensions/XInput.h:5492
class struct_anon_122(Structure):
__slots__ = [
'control',
'length',
'first_valuator',
'num_valuators',
'resolutions',
]
struct_anon_122._fields_ = [
('control', XID),
('length', c_int),
('first_valuator', c_int),
('num_valuators', c_int),
('resolutions', POINTER(c_int)),
]
XDeviceResolutionControl = struct_anon_122 # /usr/include/X11/extensions/XInput.h:5500
class struct_anon_123(Structure):
__slots__ = [
'control',
'length',
'num_valuators',
'resolutions',
'min_resolutions',
'max_resolutions',
]
struct_anon_123._fields_ = [
('control', XID),
('length', c_int),
('num_valuators', c_int),
('resolutions', POINTER(c_int)),
('min_resolutions', POINTER(c_int)),
('max_resolutions', POINTER(c_int)),
]
XDeviceResolutionState = struct_anon_123 # /usr/include/X11/extensions/XInput.h:5509
class struct_anon_124(Structure):
__slots__ = [
'control',
'length',
'min_x',
'max_x',
'min_y',
'max_y',
'flip_x',
'flip_y',
'rotation',
'button_threshold',
]
struct_anon_124._fields_ = [
('control', XID),
('length', c_int),
('min_x', c_int),
('max_x', c_int),
('min_y', c_int),
('max_y', c_int),
('flip_x', c_int),
('flip_y', c_int),
('rotation', c_int),
('button_threshold', c_int),
]
XDeviceAbsCalibControl = struct_anon_124 # /usr/include/X11/extensions/XInput.h:5522
class struct_anon_125(Structure):
__slots__ = [
'control',
'length',
'min_x',
'max_x',
'min_y',
'max_y',
'flip_x',
'flip_y',
'rotation',
'button_threshold',
]
struct_anon_125._fields_ = [
('control', XID),
('length', c_int),
('min_x', c_int),
('max_x', c_int),
('min_y', c_int),
('max_y', c_int),
('flip_x', c_int),
('flip_y', c_int),
('rotation', c_int),
('button_threshold', c_int),
]
XDeviceAbsCalibState = struct_anon_125 # /usr/include/X11/extensions/XInput.h:5522
class struct_anon_126(Structure):
__slots__ = [
'control',
'length',
'offset_x',
'offset_y',
'width',
'height',
'screen',
'following',
]
struct_anon_126._fields_ = [
('control', XID),
('length', c_int),
('offset_x', c_int),
('offset_y', c_int),
('width', c_int),
('height', c_int),
('screen', c_int),
('following', XID),
]
XDeviceAbsAreaControl = struct_anon_126 # /usr/include/X11/extensions/XInput.h:5533
class struct_anon_127(Structure):
__slots__ = [
'control',
'length',
'offset_x',
'offset_y',
'width',
'height',
'screen',
'following',
]
struct_anon_127._fields_ = [
('control', XID),
('length', c_int),
('offset_x', c_int),
('offset_y', c_int),
('width', c_int),
('height', c_int),
('screen', c_int),
('following', XID),
]
XDeviceAbsAreaState = struct_anon_127 # /usr/include/X11/extensions/XInput.h:5533
class struct_anon_128(Structure):
__slots__ = [
'control',
'length',
'status',
]
struct_anon_128._fields_ = [
('control', XID),
('length', c_int),
('status', c_int),
]
XDeviceCoreControl = struct_anon_128 # /usr/include/X11/extensions/XInput.h:5539
class struct_anon_129(Structure):
__slots__ = [
'control',
'length',
'status',
'iscore',
]
struct_anon_129._fields_ = [
('control', XID),
('length', c_int),
('status', c_int),
('iscore', c_int),
]
XDeviceCoreState = struct_anon_129 # /usr/include/X11/extensions/XInput.h:5546
class struct_anon_130(Structure):
__slots__ = [
'control',
'length',
'enable',
]
struct_anon_130._fields_ = [
('control', XID),
('length', c_int),
('enable', c_int),
]
XDeviceEnableControl = struct_anon_130 # /usr/include/X11/extensions/XInput.h:5552
class struct_anon_131(Structure):
__slots__ = [
'control',
'length',
'enable',
]
struct_anon_131._fields_ = [
('control', XID),
('length', c_int),
('enable', c_int),
]
XDeviceEnableState = struct_anon_131 # /usr/include/X11/extensions/XInput.h:5552
class struct__XAnyClassinfo(Structure):
__slots__ = [
]
struct__XAnyClassinfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XAnyClassinfo(Structure):
__slots__ = [
]
struct__XAnyClassinfo._fields_ = [
('_opaque_struct', c_int)
]
XAnyClassPtr = POINTER(struct__XAnyClassinfo) # /usr/include/X11/extensions/XInput.h:5564
class struct__XAnyClassinfo(Structure):
__slots__ = [
'class',
'length',
]
struct__XAnyClassinfo._fields_ = [
('class', XID),
('length', c_int),
]
XAnyClassInfo = struct__XAnyClassinfo # /usr/include/X11/extensions/XInput.h:5573
class struct__XDeviceInfo(Structure):
__slots__ = [
]
struct__XDeviceInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDeviceInfo(Structure):
__slots__ = [
]
struct__XDeviceInfo._fields_ = [
('_opaque_struct', c_int)
]
XDeviceInfoPtr = POINTER(struct__XDeviceInfo) # /usr/include/X11/extensions/XInput.h:5575
class struct__XDeviceInfo(Structure):
__slots__ = [
'id',
'type',
'name',
'num_classes',
'use',
'inputclassinfo',
]
Atom = pyglet.libs.x11.xlib.Atom
struct__XDeviceInfo._fields_ = [
('id', XID),
('type', Atom),
('name', c_char_p),
('num_classes', c_int),
('use', c_int),
('inputclassinfo', XAnyClassPtr),
]
XDeviceInfo = struct__XDeviceInfo # /usr/include/X11/extensions/XInput.h:5585
class struct__XKeyInfo(Structure):
__slots__ = [
]
struct__XKeyInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XKeyInfo(Structure):
__slots__ = [
]
struct__XKeyInfo._fields_ = [
('_opaque_struct', c_int)
]
XKeyInfoPtr = POINTER(struct__XKeyInfo) # /usr/include/X11/extensions/XInput.h:5587
class struct__XKeyInfo(Structure):
__slots__ = [
'class',
'length',
'min_keycode',
'max_keycode',
'num_keys',
]
struct__XKeyInfo._fields_ = [
('class', XID),
('length', c_int),
('min_keycode', c_ushort),
('max_keycode', c_ushort),
('num_keys', c_ushort),
]
XKeyInfo = struct__XKeyInfo # /usr/include/X11/extensions/XInput.h:5600
class struct__XButtonInfo(Structure):
__slots__ = [
]
struct__XButtonInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XButtonInfo(Structure):
__slots__ = [
]
struct__XButtonInfo._fields_ = [
('_opaque_struct', c_int)
]
XButtonInfoPtr = POINTER(struct__XButtonInfo) # /usr/include/X11/extensions/XInput.h:5602
class struct__XButtonInfo(Structure):
__slots__ = [
'class',
'length',
'num_buttons',
]
struct__XButtonInfo._fields_ = [
('class', XID),
('length', c_int),
('num_buttons', c_short),
]
XButtonInfo = struct__XButtonInfo # /usr/include/X11/extensions/XInput.h:5612
class struct__XAxisInfo(Structure):
__slots__ = [
]
struct__XAxisInfo._fields_ = [
('_opaque_struct', c_int)
]
class struct__XAxisInfo(Structure):
__slots__ = [
]
struct__XAxisInfo._fields_ | |
no children of the same type, so it's the "deepest" in its branch.
else:
# Return this match
return node
# Failing a direct descendant match, we'll check each descendant's children (grandchildren check)
for node in self.nodes:
# Check for grandchild node
node2 = node.find_deepest_node_by_tag(tag_type)
# Did we find a match?
if (node2):
# Return match
return node2
# Could not find a node of that type anywhere in the tree
return None
# Find all nodes that match a given tag, even searching recursively if desired
def find_nodes_by_tag(self, tag_type, recursive = False):
# Matches
results = []
# Loop children
for node in self.nodes:
# Child match?
if ( node.tag_type == tag_type ):
# Track result
results.append(node)
# Always recur, if/a
if (recursive):
# Extend results
results.extend(
node.find_nodes_by_tag(tag_type, recursive)
)
# Return all matches
return results
def compile_xml_string(self, prefix = "", include_namespaces = False, encode_innerText = True, pretty = True):
# Assume no namespace
tag_data = "%s" % self.tag_type
# Check namespace inclusion flag
if (include_namespaces):
# Confirm that a namespace exists
if (self.tag_namespace != None):
# Add namespace
tag_data = "%s:%s" % (self.tag_namespace, self.tag_type)
# Begin serialization
xml = prefix + "<%s" % tag_data
for key in self.attributes:
xml += " %s = '%s'" % (key, self.attributes[key])
# Any children?
if (len(self.nodes) > 0):
# Pretty formatting?
if (pretty):
# Add newline
xml += ">\n"
# Loop through nodes and indent each one
for each in self.nodes:
xml += each.compile_xml_string("\t" + prefix, include_namespaces, encode_innerText, pretty)
# Close tag
xml += prefix + "</%s>\n" % tag_data
# Everything in a single line
else:
# Close tag without newline
xml += ">"
# Loop through nodes, no indention
for each in self.nodes:
xml += each.compile_xml_string(prefix, include_namespaces, encode_innerText, pretty)
# Close tag, don't add newline
xml += prefix + "</%s>" % tag_data
# Inner text?
elif (self.innerText != ""):
# If the inner text has one or more line breaks,
# then I'm going to indent it on a new line.
if ( self.innerText.find("\n") >= 0 ):
# Pretty with trailing newline?
if (pretty):
xml += ">\n%s\t%s\n%s</%s>\n" % (prefix, self.innerText.strip(), prefix, tag_data)
# No newline
else:
xml += ">\n%s\t%s\n%s</%s>\n" % (prefix, self.innerText.strip(), prefix, tag_data)
# Otherwise, I'm going to print it out without any indenting...
else:
# I don't always want to encode the inner text data
if (encode_innerText):
# Pretty with trailing newline?
if (pretty):
xml += ">%s</%s>\n" % ( xml_encode( self.innerText.strip() ), tag_data )
# No newline
else:
xml += ">%s</%s>" % ( xml_encode( self.innerText.strip() ), tag_data )
# Flag set to false?
else:
# Pretty with trailing newline?
if (pretty):
xml += ">%s</%s>\n" % ( self.innerText, tag_data )
# No newline
else:
xml += ">%s</%s>" % ( self.innerText, tag_data )
# Nope; self-closing...
else:
# Pretty with newline?
if (pretty):
xml += " />\n"
# No newline
else:
xml += " />"
return xml
def compile_inner_xml_string(self, prefix = "", include_namespaces = False):
xml = ""
# Any children?
if (len(self.nodes) > 0):
for each in self.nodes:
xml += each.compile_xml_string("\t" + prefix, include_namespaces)
# Inner text?
elif (self.innerText != ""):
xml += xml_encode(self.innerText)
return xml
def compile_xml_abstract(self, prefix = "", include_namespaces = False):
# Assume no namespace
tag_data = "%s" % self.tag_type
# Check namespace inclusion flag
if (include_namespaces):
# Confirm that a namespace exists
if (self.tag_namespace != None):
# Add namespace
tag_data = "%s:%s" % (self.tag_namespace, self.tag_type)
xml = prefix + "<%s" % tag_data
for key in self.attributes:
xml += " %s = '%s' " % (key, self.attributes[key])
if (len(self.nodes) > 0):
xml += ">"
else:
xml += " />"
return xml
class XMLDocument:
def __init__(self, xml, parent = None):
self.nodes = {}
#self.parse_xml(xml)
#def clear(self):
# self.tag_collections = {}
class XMLParser:
def __init__(self):
return
# Compile an xml string into a single xml node, wrapping the entire contents in a parent node
def create_node_from_xml(self, xml, maxdepth = -1):
# Create a new root node
node = XMLNode("xml-root")
# Parse into that node. Return no node if the xml does not validate
if ( not self.parse_xml(xml, node, maxdepth = maxdepth) ):
# Abandon
return None
else:
# Return the new node
return node
# Import a filepath into a single xml node, wrapping the file's xml contents in a parent node. Returns a single (empty) wrapper node if file does not exist.
def create_node_from_file(self, filepath, maxdepth = -1):
# Create wrapper
node = XMLNode("xml-root")
# Validate filepath
if ( os.path.exists(filepath) ):
# Read file contents
f = open(filepath, "r")
xml = f.read()
f.close()
# Parse file contents. Return no node if the parsing fails validation
if ( not self.parse_xml(xml, node, maxdepth = maxdepth) ):
# Abandon
return None
# Return the node, presumably with the file contents imported
return node
# If the XML contains more than one "root" node, this function will ignore all except the first...
def convert_xml_to_one_node(self, xml):
# Create a temporary root node
node = XMLNode("temp")
# Parse into that node
self.parse_xml(xml, node)
# Return the first node in the temp node
return node.get_nodes_by_tag("*")[0]
def parse_xml(self, xml, parent, depth = 1, maxdepth = -1):
# Track whether or not we have valid xml data. Assume we do, at the beginning.
valid = True
# Strip comments from the markup
xml = re.sub("\<\!\-\-[^$]*?\-\-\>", "", xml)
# Strip whitespace surrounding new lines, remove tabs, excess whitespace, etc.
xml = re.sub("[ \t]+?\n[ \t]+?", "\n", xml).strip(" \n").replace("\t", "")
"""
def gLog(args):
for arg in args:
print arg
log = lambda *s: gLog(s)
log2 = log
logn = log
#log(xml)
"""
# Begin by finding the first element
a = xml.find("<")
b = -1
if (a >= 0):
b = self.find_tag_end(xml, a)
# If we couldn't find the end of the tag, then we invalidate the entire document
if (b < 0):
# Just for posterity
valid = False
# Immediately abandon parsing for this node
return False
# We found the end of the intro tag.
while (a >= 0):
# Check to see if this is a self-closing tag ( e.g. <node /> or <node attribute = '1' /> )
self_closing = (xml[b - 1] == "/")
# Get the contents of the XML
s = xml[a + 1 : b].strip()
# For self-closing tags, let's ditch the closing /
if (self_closing):
s = xml[a + 1 : b - 1].strip()
# Split to calculate (1) tag type, and (2) tag attributes
pieces = s.split(" ", 1)
# We definitely will have a tag type. We might have namespace data.
tag_data = pieces[0].strip()
# Assume
(tag_namespace, tag_type) = (
None,
tag_data
)
# Check for namespace
if ( tag_data.find(":") >= 0 ):
# Reinterpret data
(tag_namespace, tag_type) = tag_data.split(":", 1)
# Create a new node using the given tag type
node = XMLNode(tag_type, tag_namespace)
# Strip any whitespace surrounding = in the attributes, if we actually have any attribute to read
if (len(pieces) > 1):
# Remove whitespace surrounding the assignment operator ( e.g. attribute = '1' -> attribute='1' ). This simplifies parsing.
pieces[1] = escape_special_characters( re.sub("[ ]+=[ ]+", "=", pieces[1]).strip() )
# Check any attribute assignments...
assignments = pieces[1].split(" ")
# Loop all
for each in assignments:
# Split by the assignment operator to get the key and the value
kv = each.split("=")
# If we didn't assign a value to this attribute, we'll treat it as a boolean attribute, set as True...
if ( len(kv) == 1 ):
(key, value) = (kv[0], True)
node.set_attribute(key, value)
else:
(key, value) = (kv[0].strip(), kv[1].strip())
# String assignment?
if (value[0] == "'"):
# Unescape value (?)
value = unescape_special_characters(value).strip("'")
# Save attribute
node.set_attribute(key, value)
else:
# (?) Save as integer
try:
node.set_attribute(key, int(value))
# Can't set this attribute. Assumed integer, but
# cannot convert.
except:
pass#node.set_attribute(
z = 0
# For | |
f the fraction on N
# of the max growth rate
ga_vars = self.get_ordered_ga_vars()
out_expr = self.mu.variable.lb
# Build z = ga_0*2^0*mu_max/N * [E]
# + ga_1*2^1*mu_max/N * [E]
# + ...
# + ga_n*2^n*mu_max/N * [E]
for i, ga_i in enumerate(ga_vars):
# Linearization step for ga_i * [E]
z_name = '__MUL__'.join([ga_i.name, E.name])
# Add the variables
model_z_i = self.add_variable(kind=LinearizationVariable,
hook=self,
id_=z_name,
lb=0,
ub=ub,
queue=False)
# z_i, cons = glovers_linearization(b = ga_i, fy=E, L=E.lb, U=E.ub, z=model_z_i)
z_i, new_constraints = petersen_linearization(b=ga_i, x=E, M=E.ub,
z=model_z_i)
# Add the constraints:
for cons in new_constraints:
# Do not forget to substitute the sympy symbol in the constraint
# with a variable !
# new_expression = cons.expression.subs(z_i, model_z_i.variable)
# EDIT: Not anymore needed if we supply the variable
self.add_constraint(kind=LinearizationConstraint,
hook=self,
id_=cons.name,
expr=cons.expression,
# expr=new_expression,
ub=cons.ub,
lb=cons.lb,
queue=queue)
out_expr += (2 ** i) * self.mu_approx_resolution * model_z_i
self.dilution_terms[macromolecule.id] = out_expr
return out_expr
def get_ordered_ga_vars(self):
"""
Returns in order the variables that discretize growth
:return:
"""
# ga_i is a binary variable for the binary expansion f the fraction on N
# of the max growth rate
ga_vars = self.get_variables_of_type(GrowthActivation)
ga_vars = sorted(ga_vars, key=lambda x: x.ix)
return ga_vars
def _prep_enzyme_variables(self, enzyme):
"""
Reads Enzyme.composition to find complexation reaction from enzyme information
:param reaction:
:type reaction: cobra.Reaction
:return:
"""
#1. Complexation
# Does this enzyme already have a complexation reaction ?
# This happens if an enzyme is used in several reactions
if enzyme.complexation is not None:
complexation = enzyme.complexation
else:
complexation = self.make_enzyme_complexation(enzyme)
enzyme.init_variable()
#2. Also add degradation
self._add_enzyme_degradation(enzyme, scaled=True, queue=True)
#3. Finally make the mass balance
# Cannot queue, if the same enzyme is to be added twice
self.add_mass_balance_constraint(complexation, enzyme, queue=False)
def make_enzyme_complexation(self, enzyme):
"""
Makes the complexation reaction and attached it to its enzyme
:param enzyme:
:return:
"""
if not enzyme.composition:
self.logger.warning('Enzyme {} has no composition'
.format(enzyme.id))
return None
this_id = '{}_complex'.format(enzyme.id)
this_name = '{} Complexation'.format(enzyme.id)
complexation = ProteinComplexation(id=this_id,
name=this_name,
target=enzyme,
# upper_bound=1,
scaled=True)
try:
peptides = {self.peptides.get_by_id(k): -v \
for k, v in enzyme.composition.items()}
except KeyError:
missing_genes = '.'.join(enzyme.composition.keys())
self.logger.warning('No nucleotide sequence found for '
'some of these genes {}'.format(missing_genes))
return None
self.add_reactions([complexation])
complexation.add_peptides(peptides)
# Post processing
self.complexation_reactions+= [complexation]
enzyme.complexation = complexation
return complexation
def add_enzymes(self, enzyme_list, prep = True):
"""
Adds an Enzyme object, or iterable of Enzyme objects, to the model
:param enzyme_list:
:type enzyme_list:Iterable(Enzyme) or Enzyme
:param prep: whether or not to add complexation, degradation, and mass
balance constraints (needs to be overridden for dummies for example)
:type prep: Boolean
:return:
"""
if not hasattr(enzyme_list, '__iter__'):
enzyme_list = [enzyme_list]
else:
enzyme_list = list(enzyme_list)
if len(enzyme_list) == 0:
return None
# unpacking
if not isinstance(enzyme_list[0],Enzyme):
enzyme_list = [x for item in enzyme_list for x in item]
# First check whether the enzymes exist in the model
# Also enzymes could be declared twice for different reactions,
# hence turn the list into a set
enzyme_list = [x for x in set(enzyme_list) if x.id not in self.enzymes]
enz_ids = [x.id for x in enzyme_list]
tot_ids = len(enz_ids)
unique_ids = len(set(enz_ids))
if tot_ids != unique_ids:
msg = '{} duplicate enzyme IDs detected'.format(tot_ids-unique_ids)
self.logger.error(msg)
raise KeyError(msg)
for enz in enzyme_list:
enz._model = self
self.enzymes += enzyme_list
if prep:
for enz in tqdm(enzyme_list, desc='enz. vars'):
self._prep_enzyme_variables(enz)
def add_mrnas(self, mrna_list, add_degradation=True):
"""
Adds a mRNA object, or iterable of mRNA objects, to the model
:param mrna_list:
:type mrna_list:Iterable(mRNA) or mRNA
:return:
"""
if not hasattr(mrna_list, '__iter__'):
mrna_list = [mrna_list]
if len(mrna_list) == 0:
return None
# First check whether the mRNAs exist in the model
mrna_list = [x for x in mrna_list if x.id not in self.mrnas]
for mrna in mrna_list:
mrna._model = self
mrna.init_variable()
if add_degradation:
self._add_mrna_degradation(mrna, scaled=True, queue=True)
self.mrnas += mrna_list
self._push_queue
def add_trnas(self, trna_list):
"""
Adds a tRNA object, or iterable of tRNA objects, to the model
:param trna_list:
:type trna_list:Iterable(tRNA) or tRNA
:return:
"""
if not hasattr(trna_list, '__iter__'):
trna_list = [trna_list]
if len(trna_list) == 0:
return None
# First check whether the tRNAs exist in the model
trna_list = [x for x in trna_list if x.id not in self.trnas]
for trna in trna_list:
trna._model = self
trna.init_variable()
self.trnas += trna_list
def add_dna(self, dna):
"""
Adds a DNA object to the model
:param dna:
:type dna: DNA
:return:
"""
dna._model = self
dna.init_variable()
self.dna = dna
def add_lipid(self, lipid):
"""
Adds a lipid object to the model
:param lipid:
:type lipid: Lipid
:return:
"""
lipid._model = self
lipid.init_variable()
self.lipid = lipid
def add_ion(self, ion):
"""
Adds a ion object to the model
:param ion:
:type ion: ion
:return:
"""
ion._model = self
ion.init_variable()
self.ion = ion
def add_carbohydrate(self, carbohydrate):
"""
Adds a carbohydrate object to the model
:param carbohydrate:
:type carbohydrate: carbohydrate
:return:
"""
carbohydrate._model = self
carbohydrate.init_variable()
self.carbohydrate = carbohydrate
def remove_enzymes(self, enzyme_list):
"""
Removes an Enzyme object, or iterable of Enzyme objects, from the model
:param enzyme_list:
:type enzyme_list:Iterable(Enzyme) or Enzyme
:return:
"""
if not hasattr(enzyme_list, '__iter__'):
enzyme_list = [enzyme_list]
if len(enzyme_list) == 0:
return None
# First check whether the metabolites exist in the model
enzyme_list = [x for x in enzyme_list if x.id not in self.enzymes]
for enz in enzyme_list:
self.remove_reactions(enz.degradation)
self.remove_reactions(enz.complexation)
self.enzymes.pop(enz.id)
def _add_enzyme_degradation(self, enzyme, scaled=True, queue=False):
"""
Given an enzyme, adds the corresponding degradation reaction
:param enzyme:
:type enzyme: Enzyme
:param scaled: Indicates whether scaling should be performed (see manuscript)
:type scaled: bool
:param queue: Indicates whether to add the variable directly or
in the next batch
:type queue: bool
:return:
"""
h2o = self.essentials['h2o']
if enzyme.kdeg is None or np.isnan(enzyme.kdeg):
return None
complex_dict = enzyme.complexation.metabolites
deg_stoich = defaultdict(int)
for peptide, stoich in complex_dict.items():
the_pep = self.peptides.get_by_id(peptide.id)
degradation_mets = degrade_peptide(the_pep,
self.aa_dict,
h2o)
for k,v in degradation_mets.items():
deg_stoich[k]+=-1*v*stoich # stoich is negative
self._make_degradation_reaction(deg_stoich,
enzyme,
EnzymeDegradation,
scaled=scaled,
queue=queue)
def _add_mrna_degradation(self, mrna, scaled = True, queue=False):
"""
Given an mRNA, adds the corresponding degradation reaction
:param mrna:
:type mrna: mRNA
:param scaled: Indicates whether scaling should be performed (see manuscript)
:type scaled: bool
:param queue: Indicates whether to add the variable directly or
in the next batch
:type queue: bool
:return:
"""
h2o = self.essentials['h2o']
h = self.essentials['h']
if mrna.kdeg is None or np.isnan(mrna.kdeg):
return None
degradation_mets = degrade_mrna(mrna, self.rna_nucleotides_mp, h2o, h)
self._make_degradation_reaction(degradation_mets,mrna,mRNADegradation,
scaled=scaled, queue=queue)
def _make_degradation_reaction(self, deg_stoich, macromolecule,
kind, scaled, queue=False):
"""
given a degradation stoichiometry, makes the corresponding degradation
reaction
:param deg_stoich: stoichiometry of the degradation
:type deg_stoich: dict({:class:`cobra.core.Species:Number`})
:param macromolecule: the macromalecule being degraded. Used for binding
the degradation constraint
:type macromolecule: Macromolecule
:param kind: kind of constraint
:type kind: mRNADegradation or EnzymeDegradation
:param scaled: Indicates whether scaling should be performed (see manuscript)
:type scaled: bool
:param queue: Indicates whether to add the variable directly or
in the next batch
:type queue: bool
:return:
"""
reaction = DegradationReaction(id='{}_degradation'.format(macromolecule.id),
macromolecule=macromolecule,
scaled=scaled)
if scaled:
reaction.upper_bound = 1
# Assignment to model must be done before since met dict kas string keys
self.add_reactions([reaction])
self.degradation_reactions += [reaction]
reaction.add_metabolites(deg_stoich, rescale = True)
# Couple with the expression constraint v_deg = k_deg [E]
# Scaled into v_deg_hat = E_hat
# expr = reaction.scaled_net \
# - (macromolecule.kdeg / self.mu_max) * macromolecule.scaled_concentration
# expr = reaction.scaled_net - macromolecule.scaled_concentration
expr = reaction.net - macromolecule.kdeg * macromolecule.concentration
self.add_constraint(kind=kind,
hook=macromolecule,
expr=expr,
lb=0,
ub=0,
queue=queue)
def populate_expression(self):
"""
Defines upper- and lower_bound for the RNAP and Ribosome binding capacities
and define catalytic constraints for the RNAP and Ribosome
:return:
"""
# This part defines catalytic constraints by iterating over transcription
# and translation rxns. This already excluded ExpressedGene from translation
self._populate_rnap()
self._populate_ribosomes()
self._push_queue()
# This part defines the min and max binding capacities by iterating over
# the gene names, So we need to check if the gene is expressed or not.
for the_mrna in tqdm(self.mrnas, desc='populating expression'):
self.add_mrna_mass_balance(the_mrna)
self._constrain_polysome(the_mrna)
if self.dna is not None:
print("Constraining | |
<reponame>facebookresearch/Project_FARSI
#Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
import json
import os
from settings import config
from typing import Dict, List
import collections
import random
from datetime import datetime
import numpy as np
import time
import math
# This class is to model a task, that is the smallest software execution unit.
class Task:
task_id_for_debugging_static = 0
def __init__(self, name, work, iteration_ctr =1):
self.iteration_ctr = iteration_ctr
self.name = name
self.progress = 0 # progress percentage (how much of the task has been finished)
self.task_fraction = 1 # TODO: right now always set to 1
self.task_fraction_index = 0 # TODO: always shunned to zero for now
self.__children = [] # task children, i.e., the tasks that read from it
self.__parents = [] # task parents, i.e., the tasks that write to the task
self.__siblings = [] # tasks siblings, i.e., tasks with the same parents.
self.PA_prop_dict = collections.OrderedDict() # all the props used PA
self.PA_prop_auto_tuning_list = [] # list of auto tuned variables
self.__task_to_family_task_work = {} # task to parent work-ratio
self.__task_to_family_task_work[self] = work # task self work
self.task_id_for_debugging = self.task_id_for_debugging_static
self.updated_task_work_for_debug = False
self.__task_work_distribution = [] # Todo: double check and deprecate later
self.__task_to_child_work_distribution = {} # double check and deprecate later
self.__fake_children = [] # from the parent perspective, there is no new data transferred to children
# but the reuse of the old generate data. So this is not used in
# work ratio calculation
self.__fake_parents = [] # from the parent perspective, there is no new data transferred to children
# but the reuse of the old generate data. So this is not used in
# work ratio calculation
self.__task_to_family_task_work_unit = {} # task to family task unit of work. For example
# work unit from bus and memory perspective is the burst size
# (in bytes)
self.burst_size = config.default_burst_size
def get_iteration_cnt(self):
return self.iteration_ctr
def set_burst_size(self, burst_size):
self.burst_size = burst_size
def get_burst_size(self):
return self.burst_size
def get_name(self):
return self.name
# ---------------
# Functionality:
# get a task's family tasks (tasks that it reads from).
# Variables:
# task_name: name of the task.
# ---------------
def get_family_task_by_name(self, task_name):
for task in self.__parents + self.__siblings + self.__children + [self]:
if task.name == task_name:
return task
# ---------------
# Functionality:
# resetting Platform Architect (PA) props. Used in PA design generation.
# task_name: name of the task.
# ---------------
def reset_PA_props(self):
self.PA_prop_dict = collections.OrderedDict()
# ---------------
# Functionality:
# update Platform Architect (PA) props.
# Variables:
# PA_prop_dict: dictionary containing all the PA props
# ---------------
def update_PA_props(self, PA_prop_dict):
self.PA_prop_dict.update(PA_prop_dict)
def update_PA_auto_tunning_knob_list(self, prop_auto_tuning_list):
self.PA_prop_auto_tuning_list = prop_auto_tuning_list
# ---------------
# Functionality:
# pick one off the children at random.
# ---------------
def sample_child(self):
random.seed(datetime.now().microsecond)
return random.choice(self.get_children())
# ---------------
# Functionality:
# sample the task distribution work. Used for jitter modeling/incorporation.
# ---------------
def sample_self_task_work(self):
time.sleep(.00005)
np.random.seed(datetime.now().microsecond)
task_work = [task_work for task_work, work_prob in self.get_task_work_distribution()]
work_prob = [work_prob for task_work, work_prob in self.get_task_work_distribution()]
return np.random.choice(task_work, p=work_prob)
# ---------------
# Functionality:
# sample the task to child work (how much data gets writen into the child task) distribution.
# Used for jitter modeling/incorporation.
# Variables:
# child: task's child
# ---------------
def sample_self_to_child_task_work(self, child):
np.random.seed(datetime.now().microsecond)
task_to_child_work = [task_work for task_work, work_prob in self.get_task_to_child_work_distribution(child)]
work_prob = [work_prob for task_work, work_prob in self.get_task_to_child_work_distribution(child)]
return np.random.choice(task_to_child_work, p=work_prob)
# ---------------
# Functionality:
# update the task work (used in jitter modeling after a new work is assigned to the task).
# Variables:
# self_work: work to assign to the task.
# ---------------
def update_task_work(self, self_work):
delete_later_ = self.get_self_task_work()
self.updated_task_work_for_debug = True
self.__task_to_family_task_work[self] = self_work
delete_later = self.get_self_task_work()
a = delete_later
# ---------------
# Functionality:
# update the task to child work (used in jitter modeling after a new work is assigned to the task).
# Variables:
# child: tasks's child.
# child_work: tasks's child work.
# ---------------
def update_task_to_child_work(self, child, child_work):
self.__task_to_family_task_work[child] = child_work
child.__task_to_family_task_work[self] = child_work
if self.updated_task_work_for_debug:
self.update_task_work_for_debug = False
self.task_id_for_debugging +=1
# ---------------
# Functionality:
# add task to child work to the distribution of tasks. Used for jitter modeling.
# Variables:
# child: tasks's child.
# work: work to add to the distribution.
# ---------------
def add_task_to_child_work_distribution(self, child, work):
self.__task_to_child_work_distribution[child] = work
# ---------------
# Functionality:
# add a parent(a task that it reads data from) for the task.
# Variables:
# child: tasks's child.
# work: works of the child.
# ---------------
def add_parent(self, parent,child_nature = "real"):
self.__parents.append(parent)
if child_nature == "fake":
self.__fake_parents.append(parent)
# add a child (a task that it writes to).
# nature determines whether the work is real or fake. real means real generation of the data (which needs to be pass
# along to the child) whereas fake means that the data has already been generated and just needs to be passed along.
def add_child(self, child, work, child_nature="real"):
self.__task_to_family_task_work[child] = work
for other_child in self.__children:
other_child.add_sibling(child)
child.add_sibling(other_child)
self.__children.append(child)
child.add_parent(self, child_nature)
child.__task_to_family_task_work[self] = work
assert(child_nature in ["fake", "real"]), "child nature can only be fake or real but " + child_nature + " was given"
if child_nature == "fake":
self.__fake_children.append(child)
# ---------------
# Functionality:
# fake children are children that we need to pass data to, but we don't need to generate the data
# since it is already generate. This situation happens when two children use the same exact data
# ---------------
def get_fake_children(self):
return self.__fake_children
def get_fake_children_name(self):
return [task.name for task in self.__fake_children]
# ---------------
# Functionality:
# fake parents are parents that pass data to, but don't need to generate the data
# since it is already generate.
# ---------------
def get_fake_parent_name(self):
return [task.name for task in self.__fake_parents]
def get_fake_family_name(self):
return self.get_fake_children_name() + self.get_fake_parent_name()
# ---------------
# Functionality:
# remove a child (a task that it writes data to) for the task.
# Variables:
# child: tasks's child.
# ---------------
def remove_child(self, child):
for other_child in self.__children:
other_child.remove_sibling(child)
child.remove_sibling(other_child)
self.__children.remove(child)
del self.__task_to_family_task_work[child]
child.__parents.remove(self)
del child.__task_to_family_task_work[self]
def remove_parent(self, parent):
self.__parents.remove(parent)
del self.__task_to_family_task_work[parent]
parent.__children.remove(self)
del parent.__task_to_family_task_work[self]
# ---------------
# Functionality:
# add sibling (a task with the same parent) for the task.
# Variables:
# task: sibling task.
# ---------------
def add_sibling(self, task):
if task not in self.__siblings:
self.__siblings.append(task)
# ---------------
# Functionality:
# removing sibling (a task with the same parent) for the task.
# Variables:
# task: sibling task.
# ---------------
def remove_sibling(self, task):
if task in self.__siblings:
self.__siblings.remove(task)
# ---------------
# Functionality:
# get the relationship of the task with the input task.
# Variables:
# task_: the task to find the relationship for.
# ---------------
def get_relationship(self, task_):
if any([task__.name == task_.name for task__ in self.__children]):
return "child"
elif any([task__.name == task_.name for task__ in self.__parents]):
return "parent"
elif any([task__.name == task_.name for task__ in self.__siblings]):
return "sibling"
elif task_.name == self.name:
return "self"
else:
return "no relationship"
# ---------------
# Functionality:
# get tasks's work
# ---------------
def get_self_task_work(self):
return self.__task_to_family_task_work[self]
# ---------------
# Functionality:
# get self to task family work (how much data is passed from/to the family task).
# Variables:
# family_task: family task.
# ---------------
def get_self_to_family_task_work(self, family_task):
if family_task in self.get_children():
return self.__task_to_family_task_work[family_task]
elif family_task in self.get_parents():
return family_task.get_self_to_family_task_work(self)
elif family_task == self:
return self.get_self_task_work()
else:
print(family_task.name + " is not a family task of " + self.name)
exit(0)
def get_self_total_work(self, mode):
total_work = 0
if mode == "execute":
total_work = self.__task_to_family_task_work[self]
if mode == "read":
for family_task in self.get_parents():
total_work += family_task.get_self_to_family_task_work(self)
if mode == "write":
for family_task in self.get_children():
total_work += self.__task_to_family_task_work[family_task]
return total_work
# return self to family task unit of work. For example
# work unit from bus and memory perspective is the burst size
# (in bytes)
def get_self_to_family_task_work_unit(self, family_task):
return self.__task_to_family_task_work_unit[family_task]
# determines what the dicing "grain" should be such that that
# work unit (e.g., burst | |
"""Preprocessing functions.
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
def preprocess_fif(fif_file, l_freq=None, h_freq=None, down_sfreq=None):
"""Filter and downsample data."""
import os
from mne.io import read_raw_fif
from mne import pick_types
from nipype.utils.filemanip import split_filename as split_f
_, basename, ext = split_f(fif_file)
raw = read_raw_fif(fif_file, preload=True)
filt_str, down_str = '', ''
select_sensors = pick_types(raw.info, meg=True, ref_meg=False, eeg=False)
if l_freq or h_freq:
raw.filter(l_freq=l_freq, h_freq=h_freq,
picks=select_sensors, fir_design='firwin')
filt_str = '_filt'
if down_sfreq:
raw.resample(sfreq=down_sfreq, npad=0, stim_picks=select_sensors)
down_str = '_dsamp'
savename = os.path.abspath(basename + filt_str + down_str + ext)
raw.save(savename)
return savename
def compute_ica(fif_file, ecg_ch_name, eog_ch_name, n_components, reject):
"""Compute ica solution."""
import os
import mne
from mne.io import read_raw_fif
from mne.preprocessing import ICA
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
from nipype.utils.filemanip import split_filename as split_f
subj_path, basename, ext = split_f(fif_file)
raw = read_raw_fif(fif_file, preload=True)
# select sensors
select_sensors = mne.pick_types(raw.info, meg=True,
ref_meg=False, exclude='bads')
# 1) Fit ICA model using the FastICA algorithm
# Other available choices are `infomax` or `extended-infomax`
# We pass a float value between 0 and 1 to select n_components based on the
# percentage of variance explained by the PCA components.
# reject = dict(mag=1e-1, grad=1e-9)
flat = dict(mag=1e-13, grad=1e-13)
ica = ICA(n_components=n_components, method='fastica', max_iter=500)
ica.fit(raw, picks=select_sensors, reject=reject, flat=flat)
# -------------------- Save ica timeseries ---------------------------- #
ica_ts_file = os.path.abspath(basename + "_ica-tseries.fif")
ica_src = ica.get_sources(raw)
ica_src.save(ica_ts_file)
ica_src = None
# --------------------------------------------------------------------- #
# 2) identify bad components by analyzing latent sources.
# generate ECG epochs use detection via phase statistics
# if we just have exclude channels we jump these steps
n_max_ecg = 3
n_max_eog = 2
# check if ecg_ch_name is in the raw channels
if ecg_ch_name in raw.info['ch_names']:
raw.set_channel_types({ecg_ch_name: 'ecg'})
else:
ecg_ch_name = None
ecg_epochs = create_ecg_epochs(raw, tmin=-0.5, tmax=0.5,
picks=select_sensors,
ch_name=ecg_ch_name)
ecg_inds, ecg_scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ecg_evoked = ecg_epochs.average()
ecg_epochs = None
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
eog_ch_name = eog_ch_name.replace(' ', '')
if set(eog_ch_name.split(',')).issubset(set(raw.info['ch_names'])):
print('*** EOG CHANNELS FOUND ***')
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name=eog_ch_name)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
eog_evoked = create_eog_epochs(raw, tmin=-0.5, tmax=0.5,
picks=select_sensors,
ch_name=eog_ch_name).average()
else:
print('*** NO EOG CHANNELS FOUND!!! ***')
eog_inds = eog_scores = eog_evoked = None
report_file = generate_report(raw=raw, ica=ica, subj_name=fif_file,
basename=basename,
ecg_evoked=ecg_evoked, ecg_scores=ecg_scores,
ecg_inds=ecg_inds, ecg_ch_name=ecg_ch_name,
eog_evoked=eog_evoked, eog_scores=eog_scores,
eog_inds=eog_inds, eog_ch_name=eog_ch_name)
report_file = os.path.abspath(report_file)
ica_sol_file = os.path.abspath(basename + '_ica_solution.fif')
ica.save(ica_sol_file)
raw_ica = ica.apply(raw)
raw_ica_file = os.path.abspath(basename + '_ica' + ext)
raw_ica.save(raw_ica_file)
return raw_ica_file, ica_sol_file, ica_ts_file, report_file
def preprocess_set_ica_comp_fif_to_ts(fif_file, subject_id, n_comp_exclude,
is_sensor_space):
"""Preprocess ICA fif to ts."""
import os
import sys
import mne
from mne.preprocessing import read_ica
from nipype.utils.filemanip import split_filename as split_f
from ephypype.preproc import create_ts
subj_path, basename, ext = split_f(fif_file)
(data_path, sbj_name) = os.path.split(subj_path)
print(('*** SBJ %s' % subject_id + '***'))
# Read raw
current_dir = os.getcwd()
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_dsamp_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica' + ext)
print(('*** raw_ica_file %s' % raw_ica_file + '***'))
raw = mne.io.read_raw_fif(raw_ica_file, preload=True)
# load ICA
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + "_filt_dsamp_ica_solution."
"fif")):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica_solution.fif')
if os.path.exists(ica_sol_file) is False:
print(('$$$ Warning, no %s found' % ica_sol_file))
sys.exit()
else:
ica = read_ica(ica_sol_file)
print(('\n *** ica.exclude before set components= ', ica.exclude))
if subject_id in n_comp_exclude:
print(('*** ICA to be excluded for sbj %s ' % subject_id))
print((' ' + str(n_comp_exclude[subject_id]) + '***'))
session_dict = n_comp_exclude[subject_id]
session_names = list(session_dict.keys())
componentes = []
for s in session_names:
componentes = session_dict[s]
if len(componentes) == 0:
print('\n no ICA to be excluded \n')
else:
print(('\n *** ICA to be excluded for session %s ' % s +
' ' + str(componentes) + ' *** \n'))
ica.exclude = componentes
print(('\n *** ica.exclude after set components = ', ica.exclude))
# apply ICA to raw data
new_raw_ica_file = os.path.join(subj_path, basename + '_ica-raw.fif')
raw_ica = ica.apply(raw)
raw_ica.save(new_raw_ica_file, overwrite=True)
# save ICA solution
print(ica_sol_file)
ica.save(ica_sol_file)
(ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq']) = create_ts(new_raw_ica_file)
if is_sensor_space:
return (ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq'])
else:
return (raw_ica, channel_coords_file, channel_names_file,
raw.info['sfreq'])
def get_raw_info(raw_fname):
"""Get info from raw."""
from mne.io import Raw
raw = Raw(raw_fname, preload=True)
return raw.info
def get_epochs_info(raw_fname):
"""Get epoch info."""
from mne import read_epochs
epochs = read_epochs(raw_fname)
return epochs.info
def get_raw_sfreq(raw_fname):
"""Get raw sfreq."""
import mne
try:
data = mne.io.read_raw_fif(raw_fname)
except: # noqa
data = mne.read_epochs(raw_fname)
return data.info['sfreq']
def create_reject_dict(raw_info):
"""Create reject dir."""
from mne import pick_types
picks_eog = pick_types(raw_info, meg=False, ref_meg=False, eog=True)
picks_mag = pick_types(raw_info, meg='mag', ref_meg=False)
picks_grad = pick_types(raw_info, meg='grad', ref_meg=False)
reject = dict()
if picks_mag.size != 0:
reject['mag'] = 4e-12
if picks_grad.size != 0:
reject['grad'] = 4000e-13
if picks_eog.size != 0:
reject['eog'] = 150e-6
return reject
def create_ts(raw_fname):
"""Read a raw data in **.fif** format.
Parameters
----------
raw_fname : str
pathname of the raw data to read
Returns
-------
ts_file : str
pathname of the numpy file (.npy) containing the data read from
raw_fname
channel_coords_file : str
pathname of .txt file containing the channels coordinates
channel_names_file : str
pathname of .txt file containing the channels labels
sfreq : float
sampling frequency
"""
import os
import numpy as np
import mne
from mne.io import read_raw_fif
from nipype.utils.filemanip import split_filename as split_f
raw = read_raw_fif(raw_fname, preload=True)
subj_path, basename, ext = split_f(raw_fname)
select_sensors = mne.pick_types(raw.info, meg=True, ref_meg=False,
exclude='bads')
# save electrode locations
sens_loc = [raw.info['chs'][i]['loc'][:3] for i in select_sensors]
sens_loc = np.array(sens_loc)
channel_coords_file = os.path.abspath('correct_channel_coords.txt')
np.savetxt(channel_coords_file, sens_loc, fmt=str("%s"))
# np.savetxt(ROI_coords_file,np.array(ROI_coords,dtype = int),fmt = "%d")
# save electrode names
sens_names = np.array([raw.ch_names[pos] for pos in select_sensors],
dtype='str')
channel_names_file = os.path.abspath('correct_channel_names.txt')
np.savetxt(channel_names_file, sens_names, fmt=str('%s'))
data, times = raw[select_sensors, :]
print((data.shape))
ts_file = os.path.abspath(basename + '.npy')
np.save(ts_file, data)
print(('\n *** TS FILE ' + ts_file + '*** \n'))
print(('*** raw.info[sfreq] = ' + str(raw.info['sfreq'])))
return ts_file, channel_coords_file, channel_names_file, raw.info['sfreq']
def generate_report(raw, ica, subj_name, basename,
ecg_evoked, ecg_scores, ecg_inds, ecg_ch_name,
eog_evoked, eog_scores, eog_inds, eog_ch_name):
"""Generate report for ica solution."""
from mne.report import Report
import numpy as np
import os
report = Report()
ica_title = 'Sources related to %s artifacts (red)'
is_show = False
# ------------------- Generate report for ECG ------------------------ #
fig_ecg_scores = ica.plot_scores(ecg_scores,
exclude=ecg_inds,
title=ica_title % 'ecg',
show=is_show)
# Pick the five largest ecg_scores and plot them
show_picks = np.abs(ecg_scores).argsort()[::-1][:5]
# Plot estimated latent sources given the unmixing matrix.
fig_ecg_ts = ica.plot_sources(raw, show_picks, exclude=ecg_inds,
title=ica_title % 'ecg' + ' in 30s',
start=0, stop=30, show=is_show)
# topoplot of unmixing matrix columns
fig_ecg_comp = ica.plot_components(show_picks,
title=ica_title % 'ecg',
colorbar=True, show=is_show)
# plot ECG sources + selection
fig_ecg_src = ica.plot_sources(ecg_evoked, exclude=ecg_inds, show=is_show)
fig = [fig_ecg_scores, fig_ecg_ts, fig_ecg_comp, fig_ecg_src]
report.add_figs_to_section(fig,
captions=['Scores of ICs related to ECG',
'Time Series plots of ICs (ECG)',
'TopoMap of ICs (ECG)',
'Time-locked ECG sources'],
section='ICA - ECG')
# -------------------- end generate report for ECG ---------------------- #
# -------------------------- Generate report for EoG -------------------- #
# check how many EoG ch we have
if set(eog_ch_name.split(',')).issubset(set(raw.info['ch_names'])):
fig_eog_scores = ica.plot_scores(eog_scores, exclude=eog_inds,
title=ica_title % 'eog', show=is_show)
report.add_figs_to_section(fig_eog_scores,
captions=['Scores of ICs related to EOG'],
section='ICA - EOG')
n_eogs = np.shape(eog_scores)
if len(n_eogs) > 1:
n_eog0 = n_eogs[0]
show_picks = [np.abs(eog_scores[i][:]).argsort()[::-1][:5]
for i in range(n_eog0)]
for i in range(n_eog0):
fig_eog_comp = ica.plot_components(show_picks[i][:],
title=ica_title % 'eog',
colorbar=True, show=is_show)
fig = [fig_eog_comp]
report.add_figs_to_section(fig,
captions=['Scores of EoG ICs'],
section='ICA - EOG')
else:
show_picks = np.abs(eog_scores).argsort()[::-1][:5]
fig_eog_comp = ica.plot_components(show_picks,
title=ica_title % 'eog',
colorbar=True, show=is_show)
fig = [fig_eog_comp]
report.add_figs_to_section(fig, captions=['TopoMap of ICs (EOG)'],
section='ICA - EOG')
fig_eog_src = ica.plot_sources(eog_evoked,
exclude=eog_inds,
show=is_show)
fig = [fig_eog_src]
report.add_figs_to_section(fig, captions=['Time-locked EOG sources'],
section='ICA - EOG')
# ----------------- end generate report for EoG ---------- #
ic_nums = list(range(ica.n_components_))
fig = ica.plot_components(picks=ic_nums, show=False)
report.add_figs_to_section(fig, captions=['All IC topographies'],
section='ICA - muscles')
fig = ica.plot_sources(raw, start=0, stop=None, title='All IC time series')
report.add_figs_to_section(fig, captions=['All IC time series'],
section='ICA - muscles')
psds = []
captions_psd = []
ica_src = ica.get_sources(raw)
for i_ic in ic_nums:
fig = | |
<reponame>davidyzeng/sigpy<filename>sigpy/plot/image.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Image plot.
"""
import os
import uuid
import subprocess
import datetime
import numpy as np
import matplotlib.pyplot as plt
from sigpy.util import prod, move
class Image(object):
"""Plot array as image.
Keyword Args:
x/y: select current dimension as x and y dimension.
t: swap between x and y axis.
z: toggle current dimension as z dimension.
c: toggle current dimension as color channel. Only works if current dimension is of length 3.
left/right: increment/decrement current dimension
up/down: flip axis when current dimension is x or y.
Otherwise increment/decrement slice at current dimension.
h: toggle hide all labels, titles and axes.
m: magnitude mode. Renormalizes when pressed each time.
p: phase mode. Renormalizes when pressed each time.
r: real mode. Renormalizes when pressed each time.
i: imaginary mode. Renormalizes when pressed each time.
l: log mode. Renormalizes when pressed each time.
[: decreases brightness. Shifts window level up by 10% of window width.
]: increases brightness. Shifts window level down by 10% of window width.
{: decreases contrast. Scale window width by 0.9.
}: increases contrast. Scale window width by 1.1.
s: save as png.
g: save as gif by traversing current dimension.
v: save as mp4 by traversing current dimension.
0-9: enter slice number.
enter: Set current dimension as slice number.
"""
def __init__(self, im, x=-1, y=-2, z=None, c=None, hide=False, mode='m', title='',
interpolation='lanczos', save_basename='Figure', fps=10):
if im.ndim < 2:
raise TypeError('Image dimension must at least be two, got {im_ndim}'.format(
im_ndim=im.ndim))
self.axim = None
self.im = im
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.shape = self.im.shape
self.ndim = self.im.ndim
self.slices = [s // 2 for s in self.shape]
self.flips = [1] * self.ndim
self.x = x % self.ndim
self.y = y % self.ndim
self.z = z % self.ndim if z is not None else None
self.c = c % self.ndim if c is not None else None
self.d = max(self.ndim - 3, 0)
self.hide = hide
self.title = title
self.interpolation = interpolation
self.mode = mode
self.entering_slice = False
self.vmin = None
self.vmax = None
self.save_basename = save_basename
self.fps = fps
self.fig.canvas.mpl_disconnect(
self.fig.canvas.manager.key_press_handler_id)
self.fig.canvas.mpl_connect('key_press_event', self.key_press)
self.update_axes()
self.update_image()
self.fig.canvas.draw()
plt.show()
def key_press(self, event):
if event.key == 'up':
if self.d not in [self.x, self.y, self.z, self.c]:
self.slices[self.d] = (
self.slices[self.d] + 1) % self.shape[self.d]
else:
self.flips[self.d] *= -1
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 'down':
if self.d not in [self.x, self.y, self.z, self.c]:
self.slices[self.d] = (
self.slices[self.d] - 1) % self.shape[self.d]
else:
self.flips[self.d] *= -1
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 'left':
self.d = (self.d - 1) % self.ndim
self.update_axes()
self.fig.canvas.draw()
elif event.key == 'right':
self.d = (self.d + 1) % self.ndim
self.update_axes()
self.fig.canvas.draw()
elif event.key == 'x' and self.d not in [self.x, self.z, self.c]:
if self.d == self.y:
self.x, self.y = self.y, self.x
else:
self.x = self.d
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 'y' and self.d not in [self.y, self.z, self.c]:
if self.d == self.x:
self.x, self.y = self.y, self.x
else:
self.y = self.d
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 'z' and self.d not in [self.x, self.y, self.c]:
if self.d == self.z:
self.z = None
else:
self.z = self.d
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif (event.key == 'c' and
self.d not in [self.x, self.y, self.z] and
self.shape[self.d] == 3):
if self.d == self.c:
self.c = None
else:
self.c = self.d
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 't':
self.x, self.y = self.y, self.x
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 'h':
self.hide = not self.hide
self.update_axes()
self.fig.canvas.draw()
elif event.key == 'f':
self.fig.canvas.manager.full_screen_toggle()
elif event.key == ']':
width = self.vmax - self.vmin
self.vmin -= width * 0.1
self.vmax -= width * 0.1
self.update_image()
self.fig.canvas.draw()
elif event.key == '[':
width = self.vmax - self.vmin
self.vmin += width * 0.1
self.vmax += width * 0.1
self.update_image()
self.fig.canvas.draw()
elif event.key == '}':
width = self.vmax - self.vmin
center = (self.vmax + self.vmin) / 2
self.vmin = center - width * 1.1 / 2
self.vmax = center + width * 1.1 / 2
self.update_image()
self.fig.canvas.draw()
elif event.key == '{':
width = self.vmax - self.vmin
center = (self.vmax + self.vmin) / 2
self.vmin = center - width * 0.9 / 2
self.vmax = center + width * 0.9 / 2
self.update_image()
self.fig.canvas.draw()
elif event.key in ['m', 'p', 'r', 'i', 'l']:
self.vmin = None
self.vmax = None
self.mode = event.key
self.update_axes()
self.update_image()
self.fig.canvas.draw()
elif event.key == 's':
filename = self.save_basename + \
datetime.datetime.now().strftime(' %Y-%m-%d at %I.%M.%S %p.png')
self.fig.savefig(filename, transparent=True, format='png',
bbox_inches='tight', pad_inches=0)
elif event.key == 'g':
filename = self.save_basename + \
datetime.datetime.now().strftime(' %Y-%m-%d at %I.%M.%S %p.gif')
temp_basename = uuid.uuid4()
bbox = self.fig.get_tightbbox(self.fig.canvas.get_renderer())
for i in range(self.shape[self.d]):
self.slices[self.d] = i
self.update_axes()
self.update_image()
self.fig.canvas.draw()
self.fig.savefig('{} {:05d}.png'.format(temp_basename, i),
format='png', bbox_inches=bbox, pad_inches=0)
subprocess.run(['ffmpeg', '-f', 'image2',
'-s', '{}x{}'.format(int(bbox.width * self.fig.dpi),
int(bbox.height * self.fig.dpi)),
'-r', str(self.fps),
'-i', '{} %05d.png'.format(temp_basename),
'-vf', 'palettegen', '{} palette.png'.format(temp_basename)])
subprocess.run(['ffmpeg', '-f', 'image2',
'-s', '{}x{}'.format(int(bbox.width * self.fig.dpi),
int(bbox.height * self.fig.dpi)),
'-r', str(self.fps),
'-i', '{} %05d.png'.format(temp_basename),
'-i', '{} palette.png'.format(temp_basename),
'-lavfi', 'paletteuse', filename])
os.remove('{} palette.png'.format(temp_basename))
for i in range(self.shape[self.d]):
os.remove('{} {:05d}.png'.format(temp_basename, i))
elif event.key == 'v':
filename = self.save_basename + \
datetime.datetime.now().strftime(' %Y-%m-%d at %I.%M.%S %p.mp4')
temp_basename = uuid.uuid4()
bbox = self.fig.get_tightbbox(self.fig.canvas.get_renderer())
for i in range(self.shape[self.d]):
self.slices[self.d] = i
self.update_axes()
self.update_image()
self.fig.canvas.draw()
self.fig.savefig('{} {:05d}.png'.format(temp_basename, i),
format='png', bbox_inches=bbox, pad_inches=0)
subprocess.run(['ffmpeg', '-f', 'image2',
'-s', '{}x{}'.format(int(bbox.width * self.fig.dpi),
int(bbox.height * self.fig.dpi)),
'-r', str(self.fps),
'-i', '{} %05d.png'.format(temp_basename),
'-vf', "scale=trunc(iw/2)*2:trunc(ih/2)*2",
'-vcodec', 'libx264', '-pix_fmt', 'yuv420p', filename])
for i in range(self.shape[self.d]):
os.remove('{} {:05d}.png'.format(temp_basename, i))
elif (event.key in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'backspace'] and
self.d not in [self.x, self.y, self.z, self.c]):
if self.entering_slice:
if event.key == 'backspace':
if self.entered_slice < 10:
self.entering_slice = False
else:
self.entered_slice //= 10
else:
self.entered_slice = self.entered_slice * \
10 + int(event.key)
else:
self.entering_slice = True
self.entered_slice = int(event.key)
self.update_axes()
self.fig.canvas.draw()
elif event.key == 'enter' and self.entering_slice:
self.entering_slice = False
if self.entered_slice < self.shape[self.d]:
self.slices[self.d] = self.entered_slice
self.update_image()
self.update_axes()
self.fig.canvas.draw()
else:
return
def update_image(self):
# Extract slice.
idx = []
for i in range(self.ndim):
if i in [self.x, self.y, self.z, self.c]:
idx.append(slice(None, None, self.flips[i]))
else:
idx.append(self.slices[i])
imv = move(self.im[idx])
# Transpose to have [z, y, x, c].
imv_dims = [self.y, self.x]
if self.z is not None:
imv_dims = [self.z] + imv_dims
if self.c is not None:
imv_dims = imv_dims + [self.c]
imv = np.transpose(imv, np.argsort(np.argsort(imv_dims)))
imv = array_to_image(imv, color=self.c is not None)
if self.mode == 'm':
imv = np.abs(imv)
elif self.mode == 'p':
imv = np.angle(imv)
elif self.mode == 'r':
imv = np.real(imv)
elif self.mode == 'i':
imv = np.imag(imv)
elif self.mode == 'l':
imv = np.abs(imv)
imv = np.log(imv, out=np.ones_like(imv) * -31, where=imv != 0)
if self.vmin is None:
self.vmin = imv.min()
if self.vmax is None:
self.vmax = imv.max()
if self.axim is None:
self.axim = self.ax.imshow(imv,
vmin=self.vmin, vmax=self.vmax,
cmap='gray', origin='lower',
interpolation=self.interpolation, aspect=1.0,
extent=[0, imv.shape[1], 0, imv.shape[0]])
else:
self.axim.set_data(imv)
self.axim.set_extent([0, imv.shape[1], 0, imv.shape[0]])
self.axim.set_clim(self.vmin, self.vmax)
def update_axes(self):
if not self.hide:
caption = '['
for i in range(self.ndim):
if i == self.d:
caption += '['
else:
caption += ' '
if (self.flips[i] == -1 and (i == self.x or
i == self.y or
i == self.z)):
caption += '-'
if i == self.x:
caption += 'x'
elif i == self.y:
caption += 'y'
elif i == self.z:
caption += 'z'
elif i == self.c:
caption += 'c'
elif i == self.d and self.entering_slice:
caption += str(self.entered_slice) + '_'
else:
caption += str(self.slices[i])
if i == self.d:
caption += ']'
else:
caption += ' '
caption += ']'
self.ax.set_title(caption)
self.fig.suptitle(self.title)
self.ax.xaxis.set_visible(True)
self.ax.yaxis.set_visible(True)
self.ax.title.set_visible(True)
else:
self.ax.set_title('')
self.fig.suptitle('')
self.ax.xaxis.set_visible(False)
self.ax.yaxis.set_visible(False)
self.ax.title.set_visible(False)
def mosaic_shape(batch):
mshape = [int(batch**0.5), batch // int(batch**0.5)]
while (prod(mshape) < batch):
mshape[1] += 1
if (mshape[0] - 1) * (mshape[1] + 1) == batch:
mshape[0] -= 1
mshape[1] += 1
return tuple(mshape)
def array_to_image(arr, color=False):
"""
Flattens all dimensions except the last two
"""
if color:
arr = np.divide(arr, np.abs(arr).max(),
out=np.zeros_like(arr), where=arr != 0)
if arr.ndim == 2:
return arr
elif color and arr.ndim == 3:
return arr
if | |
<reponame>duncanesmith/lsclib
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 10:33:32 2019
@author: smithd24
"""
# import common functions from default python library
import math # import math functions
import random # import random number functions
import numpy as np # import numpy matrix operations
# import transformation functions
from lsclib import rotations as rm
from lsclib.coordinate_transformations import sph2cart, cart2sph
def xenon_spectrum(spectrum, spectrum_max):
"""From the normalized incident light spectrum on an LSC, probabilistically
determine an individual wavelength.
Parameters
----------
spectrum : DataFrame, float
Normalized incident light spectrum or individual wavelength
spectrum_max : float
Maximum normalized probability
Returns
-------
wave_len : float
Initial wavelength of a bundle
Notes
-----
This function should be renamed - it can handle other incident spectra, not
just xenon.
spectrum_max should be automated here, should not be an input
"""
wave_len = 0
if type(spectrum) is float:
wave_len = spectrum
else:
wave_len = spectrum.__call__(random.uniform(0,spectrum_max))
return wave_len
def pathlength_matrix(wave_len, wave_len_min, wave_len_max, absorption):
"""Determines pathlength through a volume as a function of wavelength using
the Bouger's law.
Parameters
----------
wave_len : float
Bundle wavelength
wave_len_min : float
Minimum wavelength absorbed by matrix
wave_len_max : float
Maximum wavelength absorbed by matrix
absorption : float, DataFrame
Float value for probability of absorption by the matrix, or the
normalized absorption spectrum for the matrix.
Returns
-------
matrix_path : float
Distance a bundle can travel before it is absorbed.
Notes
-----
"""
if type(absorption) is float:
matrix_abco = absorption # matrix_abco = matrix absorption coefficient
else:
matrix_abco = 0
if wave_len < wave_len_min or wave_len > wave_len_max:
matrix_abco = 10000
if wave_len >= wave_len_min and wave_len <= wave_len_max:
matrix_abco = absorption.__call__(wave_len)
# calculate pathlength using Bouger's law
matrix_path = ((-1 / matrix_abco) * math.log(random.uniform(0, 1)))
return matrix_path
def surface_absorption(wave_len, wave_len_min, wave_len_max, abs_surface):
"""Determines probability of absorption as a function of wavelength at a
particular boundary.
Parameters
----------
wave_len : float
Bundle wavelength
wave_len_min : float
Minimum wavelength absorbed by matrix
wave_len_max : float
Maximum wavelength absorbed by matrix
abs_surface : DataFrame
Probability of absorption of a surface as a function of wavelength
Returns
-------
probability : float
Probability that a bundle is absorbed
Notes
-----
surface_absorption should be renamed to boundary_absorption
"""
probability = 0
if wave_len >= wave_len_min and wave_len <= wave_len_max:
probability = abs_surface.__call__(wave_len)
return probability
def refracted_vect(n_i, n_f, vect):
"""Determines refracted angle based on incoming vector and index of
refraction of 1st/2nd mediums
Parameters
----------
n_i : float
Index of refraction of the current medium
n_f : float
Index of refraction of the medium a bundle is attempting to enter
vect : NumPy array
Current bundle trajectory in local coordinates
Returns
-------
refr_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If no refraction
occurred, the output is a string.
Notes
-----
"""
[theta, phi] = cart2sph(vect)
# solve Snell's law to evaluate critical angle
reflect_test = math.sin(theta) * n_i / n_f
# test for critical angle
if (n_f < n_i) and (reflect_test > 1):
refr_vect = "no refraction"
# use Snell's law to solve for the refracted vector
else:
refr_angle = math.asin(reflect_test)
# realign refr_angle within one quadrant
if theta > (math.pi/2):
refr_angle = math.pi - refr_angle
refr_vect = sph2cart(refr_angle, phi)
return refr_vect
def reflectivity(vect, refracted_vect):
"""Calculate reflectivity at an interface using Fresnel's equations. Light
is assumed to be unpolarized.
Parameters
----------
vect : NumPy array
Current bundle trajectory in local coordinates
refr_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If refraction
was impossible, this is a string.
Returns
-------
rho : float
Reflectivity at an interface calculated using Fresnel's equations
Notes
-----
"""
if isinstance(refracted_vect, str) is False:
[xi, phi] = cart2sph(refracted_vect)
[theta, phi] = cart2sph(vect)
# apply Fresnel's equations for reflectivity to determine reflectivity
rho = 0.5 * ((math.tan(theta - xi))**2 / (math.tan(theta + xi))**2 +
(math.sin(theta - xi))**2/(math.sin(theta + xi))**2)
else:
rho = 1 # critical angle was achieved ensuring reflection
return rho
def refraction_or_reflection(rho, vect, tilt, refracted_vect, indexmatch,
n, surface_type = 'specular'):
"""Determine if a bundle is refracted or reflected at an interface.
Parameters
----------
rho : float
Reflectivity at an interface calculated using Fresnel's equations
vect : NumPy array
Current bundle trajectory in local coordinates
tilt : float
Boundary angle relative to the xy plane
refracted_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If refraction
was impossible, this is a string.
indexmatch : int
If indexmatch is 1, bundle remains in the same volume or is lost
If indexmatch is 0, bundle has the opportunity to move to an
adjacent volume.
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
surface_type : string, optional
Indicates the behavior of reflected bundles, options are:
(1) Specular
(2) Diffuse
Returns
-------
theta : float
Polar incidence angle (classically defined as just the incidence
angle) in global coordinates. This is a spherical coordinate.
phi : float
Azimuthal incidence angle in global coordinates. This is a
spherical coordinate.
bundle_reflect_stay : int
Bundle is reflected but remains within the LSC (1) otherwise (0)
bundle_reflect_lost : int
Bundle is reflected and is lost (1) otherwise (0)
bundle_refract_lost : int
Bundle is refracted out of the LSC (1) otherwise (0)
Notes
-----
There is an excess rotation to produce "ray_vector", this could be included
within bundle_reflection and/or bundle_reflection and processed there
instead.
"""
bundle_reflect_stay = 0
bundle_reflect_lost = 0
bundle_refract_lost = 0
# ray_vector : NumPy array
# reflected/refracted vector in local coordinates
# ray_normal_angle : NumPy array
# Angle between surface normal and reflected/refracted vector
if random.uniform(0, 1) < rho:
[theta, phi] = bundle_reflection(surface_type, vect, tilt, n)
ray_vector = sph2cart(theta, phi)
ray_vector = rm.rot(tilt, ray_vector, n) # rotate into local coords
ray_normal_angle = tilt_angle(ray_vector)
if ray_normal_angle < 0:
ray_normal_angle = 2*math.pi + ray_normal_angle
# if outgoing angle will cause bundle to move in opposite direction of
# normal, otherwise bundle stays in LSC
if ((3*math.pi/2) > ray_normal_angle > (math.pi/2)):
bundle_reflect_lost = 1
else:
bundle_reflect_stay = 1
else:
[theta, phi] = bundle_refraction(surface_type, refracted_vect, tilt, n)
ray_vector = sph2cart(theta, phi)
ray_vector = rm.rot(tilt, ray_vector, n) # rotate into local coords
ray_normal_angle = tilt_angle(ray_vector)
if ray_normal_angle < 0:
ray_normal_angle = 2 * math.pi + ray_normal_angle
# if outgoing angle will cause bundle to move in opposite direction of
# the normal and bundle will not enter a new volume then the bundle is
# lost. Otherwise, the bundle stays in the LSC
if (((3 * math.pi / 2) > ray_normal_angle > (math.pi / 2))
and (indexmatch == 1)):
bundle_refract_lost = 1
return [theta, phi, bundle_reflect_stay,
bundle_reflect_lost, bundle_refract_lost]
def bundle_reflection(surface_type, vect, tilt, n):
"""Determine bundle trajectory upon reflection. Currently, bundles can be
reflected specularly or diffusely.
Parameters
----------
vect : NumPy array
Current bundle trajectory in local coordinates
tilt : float
Boundary angle relative to the xy plane
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
surface_type : string, optional
Indicates the behavior of reflected bundles, options are:
(1) Specular
(2) Diffuse
Returns
-------
theta : float
Polar incidence angle (classically defined as just the incidence
angle) in global coordinates. This is a spherical coordinate.
phi : float
Azimuthal incidence angle in global coordinates. This is a
spherical coordinate.
Notes
-----
"""
if surface_type == 'specular':
vect = np.array(vect) * -1 # flip direction of vector
vect = rm.z(math.pi, vect) # rotate 180 around normal
vect = rm.rot(-tilt, vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
return [theta, phi]
elif surface_type == 'diffuse':
theta_rand = math.asin(math.sqrt(random.uniform(0, 1)))
phi_rand = 2 * math.pi * random.uniform(0, 1)
vect = sph2cart(theta_rand, phi_rand)
vect = rm.rot(-tilt, vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
| |
# -*- coding: utf-8 -*-
import time
import sys
import traceback
import xbmc
import xbmcgui
# Add JSON support for queries
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
# Import the common settings
from settings import Settings
from settings import log
from settings import os_path_join
from settings import os_path_split
from settings import normalize_string
from settings import WindowShowing
from themeFinder import ThemeFiles
from themeFinder import MusicThemeFiles
from themePlayer import ThemePlayer
#########################################################
# Class to handle delaying the start of playing a theme
#########################################################
class DelayedStartTheme():
def __init__(self):
self.themesToStart = None
self.anchorTime = 0
def shouldStartPlaying(self, themes):
# For Music Themes there will not be any locations
firstTheme = None
if len(themes.getThemeLocations()) > 0:
firstTheme = themes.getThemeLocations()[0]
delaySeconds = Settings.getStartDelaySeconds(firstTheme)
# Check is the start playing should be delayed
if delaySeconds < 1:
# Start playing straight away, but check for List playing built in delay first
return self._checkListPlayingDelay(themes)
currentTime = int(time.time())
if themes != self.themesToStart:
log("DelayedStartTheme: Themes do not match, new anchor = %s" % str(currentTime))
self.themesToStart = themes
# Reset the current time as we need the delay from here
self.anchorTime = currentTime
else:
log("DelayedStartTheme: Target time = %s current time = %s" % (str(self.anchorTime + delaySeconds), str(currentTime)))
# Themes are the same, see if it is time to play the the theme yet
if currentTime > (self.anchorTime + delaySeconds):
log("DelayedStartTheme: Start playing")
# Now we are going to start the theme, clear the values
self.clear()
return True
return False
def clear(self):
self.themesToStart = None
self.anchorTime = 0
# Method to support a small delay if running on the list screen
def _checkListPlayingDelay(self, themes):
# Check if we are playing themes on the list view, in which case we will want to delay them
if (Settings.isPlayMovieList() and WindowShowing.isMovies()) or (Settings.isPlayTvShowList() and WindowShowing.isTvShowTitles()) or (Settings.isPlayMusicVideoList() and WindowShowing.isMusicVideoTitles()):
log("DelayedStartTheme: Movie List playing delay detected, anchorTime = %s" % str(self.anchorTime))
if themes != self.themesToStart:
# Theme selection has changed
self.themesToStart = themes
# Reset the current time as we need the delay from here
self.anchorTime = 2 # for movie list delay, it is just a counter
else:
# reduce the anchor by one
self.anchorTime = self.anchorTime - 1
if self.anchorTime < 1:
self.clear()
return True
return False
# Default is to allow playing
return True
###########################################
# Class to run the program back-end in
###########################################
class TunesBackend():
def __init__(self):
self.themePlayer = ThemePlayer()
log("### starting TvTunes Backend ###")
self.newThemeFiles = ThemeFiles("")
self.oldThemeFiles = ThemeFiles("")
self.prevThemeFiles = ThemeFiles("")
self.delayedStart = DelayedStartTheme()
self.isAlive = False
# Only used for logging filtering
self.lastLoggedThemePath = ""
def runAsAService(self):
logVideoLibraryNotShowing = True
while not xbmc.abortRequested:
# Wait a little before starting the check each time
xbmc.sleep(200)
# Check the forced TV Tunes status at the start of the loop, if this is True
# then we don't want to stop themes until the next iteration, this stops the case
# where some checks are done and the value changes part was through a single
# loop iteration
isForcedTvTunesContinue = WindowShowing.isTvTunesOverrideContinuePlaying()
# Stop the theme if the shutdown menu appears - it normally means
# we are about to shut the system down, so get ahead of the game
if WindowShowing.isShutdownMenu():
self.stop(fastFade=True)
continue
# NOTE: The screensaver kicking in will only be picked up if the option
# "Use Visualization if Playing Audio" is disabled
if WindowShowing.isScreensaver():
if self.isAlive:
log("TunesBackend: Screensaver active")
self.stop(fastFade=True)
# It may be possible that we stopped for the screen-saver about to kick in
# If we are using Gotham or higher, it is possible for us to re-kick off the
# screen-saver, otherwise the action of us stopping the theme will reset the
# timeout and the user will have to wait longer
log("TunesBackend: Restarting screensaver that TvTunes stopped")
xbmc.executebuiltin("ActivateScreensaver", True)
continue
# Check if TvTunes is blocked from playing any themes
if xbmcgui.Window(10025).getProperty('TvTunesBlocked') not in [None, ""]:
self.stop(fastFade=True)
continue
if (not WindowShowing.isVideoLibrary()) and (not WindowShowing.isMusicSection()):
log("TunesBackend: Video Library no longer visible", logVideoLibraryNotShowing)
logVideoLibraryNotShowing = False
# End playing cleanly (including any fade out) and then stop everything
self.stop()
continue
else:
logVideoLibraryNotShowing = True
# There is a valid page selected and there is currently nothing playing
if self.isPlayingZone() and not WindowShowing.isTvTunesOverrideContinuePrevious():
newThemes = self.getThemes()
if self.newThemeFiles != newThemes:
self.newThemeFiles = newThemes
# Check if the file path has changed, if so there is a new file to play
if self.newThemeFiles != self.oldThemeFiles and self.newThemeFiles.hasThemes():
log("TunesBackend: old path: %s" % self.oldThemeFiles.getPath())
log("TunesBackend: new path: %s" % self.newThemeFiles.getPath())
if self.start_playing():
# Now that playing has started, update the current themes that are being used
self.oldThemeFiles = self.newThemeFiles
# Check the operations where we are currently running and we need to stop
# playing the current theme
if self.isAlive:
if self.themePlayer.isPlayingTheme():
# There is no theme at this location, so make sure we are stopped
if not self.newThemeFiles.hasThemes():
log("TunesBackend: No themes to play for current item")
self.themePlayer.endPlaying()
self.oldThemeFiles.clear()
self.prevThemeFiles.clear()
self.delayedStart.clear()
self.isAlive = False
else:
# This will occur when a theme has stopped playing, maybe is is not set to loop
# There can be a delay when playing between playlist items, so give it a little
# time to start playing the next one
themeIsStillPlaying = False
maxLoop = 500
while (maxLoop > 0) and (not themeIsStillPlaying):
maxLoop = maxLoop - 1
xbmc.sleep(1)
if self.themePlayer.isPlayingTheme():
themeIsStillPlaying = True
break
if not themeIsStillPlaying:
log("TunesBackend: playing ended, restoring settings")
self.themePlayer.restoreSettings()
self.isAlive = False
# This is the case where the user has moved from within an area where the themes
# to an area where the theme is no longer played, so it will trigger a stop and
# reset everything to highlight that nothing is playing
if (not self.isPlayingZone()) and (not isForcedTvTunesContinue):
self.stop()
else:
# Check for the case where we are playing the trailer as a theme
# video, if so we want to stop the trailer playing when the video
# information screen is displayed. If we don't, when the trailer is
# started then TvTunes will automatically stop it
if Settings.useTrailers() and WindowShowing.isMovieInformation() and self.themePlayer.isPlayingTrailerTheme():
self.stop()
# Check to see if the setting to restrict the theme duration is enabled
# and if it is we need to stop the current theme playing
self.themePlayer.checkEnding()
# We have finished running, just make one last check to ensure
# we do not need to stop any audio
self.stop(True)
del self.themePlayer
# Works out if the currently displayed area on the screen is something
# that is deemed a zone where themes should be played
def isPlayingZone(self):
if WindowShowing.isTvTunesOverrideContinuePlaying():
return True
if WindowShowing.isRecentEpisodesAdded():
return False
if WindowShowing.isPluginPath():
return False
if WindowShowing.isMovieInformation() and Settings.isPlayVideoInformation():
return True
if WindowShowing.isSeasons() and Settings.isPlayTvShowSeasons():
return True
if WindowShowing.isEpisodes() and Settings.isPlayTvShowEpisodes():
return True
# Only valid if wanting theme on movie list
if WindowShowing.isMovies() and Settings.isPlayMovieList():
return True
# Only valid if wanting theme on TV list
if WindowShowing.isTvShowTitles() and Settings.isPlayTvShowList():
return True
# Only valid if wanting theme on Music Video list
if WindowShowing.isMusicVideoTitles() and Settings.isPlayMusicVideoList():
return True
if WindowShowing.isMusicSection():
return True
# Any other area is deemed to be a non play area
return False
# Locates the path to look for a theme to play based on what is
# currently being displayed on the screen
def getThemes(self):
themePath = ""
# Only need the theme path for videos
if not WindowShowing.isMusicSection():
# Check if the files are stored in a custom path
if Settings.isCustomPathEnabled():
if not WindowShowing.isMovies():
videotitle = xbmc.getInfoLabel("ListItem.TVShowTitle")
else:
videotitle = xbmc.getInfoLabel("ListItem.Title")
videotitle = normalize_string(videotitle)
themePath = os_path_join(Settings.getCustomPath(), videotitle)
# Looking at the TV Show information page
elif WindowShowing.isMovieInformation() and (WindowShowing.isTvShowTitles() or WindowShowing.isTvShows()):
themePath = xbmc.getInfoLabel("ListItem.FilenameAndPath")
else:
themePath = xbmc.getInfoLabel("ListItem.Path")
# To try and reduce the amount of "noise" in the logging, where the
# same check is logged again and again, we record if it has been
# logged for this video, and then do not do it again until the
# video changes and what | |
<filename>mom6_tools/m6plot.py<gh_stars>1-10
"""
A method for producing a standardized pseudo-color plot of 2D data
"""
import os
try:
if os.environ['DISPLAY'] != None: pass
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm, ListedColormap, LogNorm
from matplotlib.ticker import MaxNLocator
import matplotlib
import matplotlib.path as mpath
import math
import numpy, numpy.matlib
import cartopy.crs as ccrs
import cartopy.feature
from mom6_tools import VerticalSplitScale
from mom6_tools import m6toolbox
try: from mpl_toolkits.basemap import Basemap
except: print('Basemap module not found. Some regional plots may not function properly')
try:
import cmocean
except:
if "HTTP_USER_AGENT" in os.environ.keys():
pass
else:
print('cmocean module not found. Some color maps may not render properly')
from sys import modules
def plot_stats_da(stats, var, units, save=None):
"""
Plot statistics computed by myStats_da for multiple basins.
Parameters
----------
stats : xarray.DataArray
DataArray with statistics computed using myStats_da.
title : str
Title to be used in the plot.
var : str
Variable to used to compute stats.
units : str
Units of variable var.
save : str, optional
Name of file to save figure in. Default None.
"""
import seaborn
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
seaborn.set()
labels = ['Min', 'Max', 'Mean', 'Std', 'RMS']
plt.figure(figsize=(12,14))
for i in range(len(labels)):
ax1 = plt.subplot(5,1,i+1)
for r in range(len(stats.basin)):
stats[r,i,:].plot(ax=ax1, marker='.', linewidth = 3, label=str(stats.basin[r].values));
ax1.set_ylabel(str(labels[i]))
if i == 0:
plt.legend()
plt.title(str(var) + ' ['+str(units)+']')
else:
plt.title('')
if i < (len(labels) -1):
ax1.set_xlabel('')
if save is not None:
plt.savefig(save)
plt.close()
return
def plot_stats_da_reg(stats, title, var, units):
"""
Plot statistics computed by myStats_da for a specific region.
Parameters
----------
stats : xarray.DataArray
DataArray with statistics computed using myStats_da.
title : str
Title to be used in the plot.
var : str
Variable to used to compute stats.
units : str
Units of variable var.
"""
import seaborn
matplotlib.rcParams.update({'font.size': 15})
seaborn.set()
plt.figure(figsize=(12,14))
ax1 = plt.subplot(511)
stats[0,:].plot(ax=ax1, marker='.'); ax1.set_ylabel('Min')
plt.title(str(var) + ' ['+str(units)+'], Region = '+ str(title))
ax1.set_xlabel('')
ax2 = plt.subplot(512, sharex=ax1)
stats[1,:].plot(ax=ax2, marker='.'); ax2.set_ylabel('Max')
plt.title('')
ax2.set_xlabel('')
ax3 = plt.subplot(513, sharex=ax1)
stats[2,:].plot(ax=ax3, marker='.'); ax3.set_ylabel('Mean')
plt.title('')
ax3.set_xlabel('')
ax4 = plt.subplot(514, sharex=ax1)
stats[3,:].plot(ax=ax4, marker='.'); ax4.set_ylabel('Std')
plt.title('')
ax4.set_xlabel('')
ax5 = plt.subplot(515, sharex=ax1)
stats[4,:].plot(ax=ax5, marker='.'); ax5.set_ylabel('RMS')
plt.title('')
if save is not None:
plt.savefig(save)
plt.close()
return
def polarcomparison(field1, field2, grd, proj='SP', circle=True,
clim = None, colormap = None, nbins = None, save=None,
title1='', title2='', title3='A - B', suptitle='',
ignore = None, debug=False, show=False, extend = None,
sigma = 2.0, logscale = False, landcolor=[.5,.5,.5], interactive=False,
dlim=None, dcolormap=None, dextend=None, addplabel=True):
"""Renders n-panel plot of two scalar field1(x,y) and field2(x,y) using polar projections.
Parameters
----------
field1 : 2D numpy array
Scalar 2D array to be plotted and compared to field2.
field2 : 2D numpy array
Scalar 2D array to be plotted and compared to field1.
grd : object with MOM6 grid data
This is the output of module MOM6grid.
title1 : str, optional
The title to place at the top of panel 1. Default ''.
title2 : str, optional
The title to place at the top of panel 2. Default ''.
title3 : str, optional
The title to place at the top of panel 3. Default 'A-B'.
addplabel : boolean, optional
Adds a 'A:' or 'B:' to the title1 and title2. Default True.
suptitle : str, optional
The super-title to place at the top of the figure. Default ''.
proj : str, optional
Type of projection: 'SP' = SouthPolarStereo (default) or 'NP' = NorthPolarStereo
circle : boolean, optional
If true, compute a circle in axes coordinates, which we can use as a boundary
for the map
clim : tuple of (min,max), optional
color range OR a list of contour levels. Default None
colormap : str, optional
The name of the colormap to use. Default None (choose using chooseColorMap)
nbins : integer, optional
The number of colors levels (used if clim is missing or only specifies the color range).
ignore : float, optional
A value to use as no-data (NaN). Default None
save : str, optional
Name of file to save figure in. Default None (do not save figure)
debug : boolean, optional
If true, report stuff for debugging. Default False
show : boolean, optional
If true, causes the figure to appear on screen. Used for testing. Default False.
extend : str, optional
Can be one of 'both', 'neither', 'max', 'min'. Default None
logscale : boolean, optional
If true, use logaritmic coloring scheme. Default False
sigma : float, optional
Range for difference plot autocolor levels. Default is to span a 2. sigma range
title : str, optional
The title to place at the top of the panel. Default ''
landcolor : RGB tuple, optional
An rgb tuple to use for the color of land (no data). Default [.5,.5,.5].
dlim : tuple (min,max)
A tuple of (min,max) color range OR a list of contour levels for the difference plot. Default None.
dcolormap : str, optional
The name of the colormap to use for the difference plot. Default None.
dextend : str, optional
For the difference colorbar. Can be one of 'both', 'neither', 'max', 'min'. Default None.
interactive : boolean, optional
If true, adds interactive features such as zoom, close and cursor. Default False.
Returns
-------
"""
if (field1.shape)!=(field2.shape): raise Exception('field1 and field2 must be the same shape')
xCoord, yCoord = createXYcoords(field1, grd.geolon, grd.geolat)
if proj == 'SP':
proj = ccrs.SouthPolarStereo()
extent = [-180, 180, -90, -50]
else: # NP
proj = ccrs.NorthPolarStereo()
extent = [-180, 180, 50, 90]
# Establish ranges for sectors
#lonRange=(grd.geolon.min(), grd.geolon.max()); latRange=(extent[2], extent[3])
lonRange=(-360, 360.); latRange=(extent[2], extent[3])
# Diagnose statistics
if ignore is not None:
maskedField1 = numpy.ma.masked_array(field1, mask=[field1==ignore])
maskedField2 = numpy.ma.masked_array(field2, mask=[field2==ignore])
else:
maskedField1 = regionalMasking(field1, yCoord, xCoord, latRange, lonRange)
maskedField2 = regionalMasking(field2, yCoord, xCoord, latRange, lonRange)
s1Min, s1Max, s1Mean, s1Std, s1RMS = myStats(maskedField1, grd.area_t, debug=debug)
s2Min, s2Max, s2Mean, s2Std, s2RMS = myStats(maskedField2, grd.area_t, debug=debug)
dMin, dMax, dMean, dStd, dRMS = myStats(maskedField1 - maskedField2, grd.area_t, debug=debug)
if s1Mean is not None: dRxy = corr(maskedField1 - s1Mean, maskedField2 - s2Mean, grd.area_t)
else: dRxy = None
s12Min = min(s1Min, s2Min); s12Max = max(s1Max, s2Max)
xLims = boundaryStats(xCoord); yLims = boundaryStats(yCoord)
if debug:
print('s1: min, max, mean =', s1Min, s1Max, s1Mean)
print('s2: min, max, mean =', s2Min, s2Max, s2Mean)
print('s12: min, max =', s12Min, s12Max)
# Choose colormap
if nbins is None and (clim is None or len(clim)==2): cBins=35
else: cBins=nbins
if nbins is None and (dlim is None or len(dlim)==2): nbins=35
if colormap is None: colormap = chooseColorMap(s12Min, s12Max)
cmap, norm, extend = chooseColorLevels(s12Min, s12Max, colormap, clim=clim, nbins=cBins, extend=extend)
if addplabel: preTitleA = 'A: '; preTitleB = 'B: '
else: preTitleA = ''; preTitleB = ''
# hard-coded number of panels, aspect and resolution
npanels=3
#aspect=None
#resolution=None
#axis = None
# fig=setFigureSize(aspect, resolution, npanels=npanels, debug=debug)
fig = plt.figure(figsize=[8, 24])
# panel 1
ax = plt.subplot(npanels,1,1, projection=proj)
ax.set_extent(extent, ccrs.PlateCarree())
ax.add_feature(cartopy.feature.LAND)
ax.gridlines()
if circle:
circle_value = get_circle()
ax.set_boundary(circle_value, transform=ax.transAxes)
cs = ax.pcolormesh(xCoord,yCoord,maskedField1,transform=ccrs.PlateCarree(),cmap=cmap,
shading='flat', norm=norm)
def add_features(fig, ax, cs, extend, landcolor):
""" Adds some features to the map """
fig.colorbar(cs,fraction=.08, pad=0.02, extend=extend)
# Add Land
ax.add_feature( cartopy.feature.LAND, zorder=1, edgecolor='none', facecolor=landcolor)
# add Ocean
ax.add_feature(cartopy.feature.OCEAN)
# Add coastline
ax.coastlines(color='black')
# Add lat lon rings
ax.gridlines(alpha='0.1',color='black')
return
add_features(fig, ax, cs, extend, landcolor)
if interactive: addStatusBar(xCoord, yCoord, maskedField1)
ax.set_xticklabels([''])
annotateStats(ax, s1Min, s1Max, s1Mean, s1Std, s1RMS, webversion=False)
if len(title1)>0: ax.set_title(preTitleA+title1)
# panel 2
ax = plt.subplot(npanels,1,2, projection=proj)
ax.set_extent(extent, ccrs.PlateCarree())
ax.add_feature(cartopy.feature.LAND)
ax.gridlines()
if circle:
circle_value = get_circle()
ax.set_boundary(circle_value, transform=ax.transAxes)
cs = ax.pcolormesh(xCoord,yCoord,maskedField2,transform=ccrs.PlateCarree(),cmap=cmap,
shading='flat', norm=norm)
add_features(fig, ax, cs, extend, landcolor)
if interactive: addStatusBar(xCoord, yCoord, maskedField2)
if npanels>2: ax.set_xticklabels([''])
annotateStats(ax, s2Min, s2Max, s2Mean, s2Std, s2RMS, webversion=False)
if len(title2)>0: ax.set_title(preTitleB+title2)
# panel 3
ax = plt.subplot(npanels,1,npanels, projection=proj)
ax.set_extent(extent, ccrs.PlateCarree())
ax.add_feature(cartopy.feature.LAND)
ax.gridlines()
if circle:
circle_value = get_circle()
ax.set_boundary(circle_value, transform=ax.transAxes)
if dcolormap is None: dcolormap = chooseColorMap(dMin, dMax)
if dlim is None and dStd>0:
cmap, norm, dextend = chooseColorLevels(dMean-sigma*dStd, dMean+sigma*dStd, dcolormap, clim=dlim, nbins=nbins, \
extend=dextend, autocenter=True)
else:
cmap, norm, dextend = chooseColorLevels(dMin, dMax, dcolormap, clim=dlim, nbins=nbins, extend=dextend, autocenter=True)
cs = ax.pcolormesh(xCoord,yCoord,maskedField1 - maskedField2,transform=ccrs.PlateCarree(),cmap=cmap, shading='flat', norm=norm)
if interactive: addStatusBar(xCoord, yCoord, maskedField1 - maskedField2)
if dextend is None: dextend = extend
add_features(fig, ax, cs, dextend, landcolor)
annotateStats(ax, dMin, dMax, dMean, dStd, | |
ID can be in ASCII form (e.g. "abcd") or in
colon-separated HEX form (e.g. 1:2:ab:cd). HEX representation is
used only when the sub-option value contains unprintable
characters. If a remote ID sub-option value is in ASCII form, it
is always enclosed in quotes to prevent ambiguous values (e.g.
"10:20" - ASCII 5-byte string; 10:20 - HEX 2-byte value).NIOS
does not support the convertion between HEX and ASCII formats.
Searches are performed using the exact same format and value as
the sub-option is represented.Query examples assume the
following leases are stored in the database:Expected results:
served_by: The IP address of the server that sends an active lease
to a client.
server_host_name: The host name of the Grid member or Microsoft DHCP
server that issues the lease.
starts: The start time of a DHCP Lease object. This field specifies
the time when the lease starts.
tsfp: The TSFP (Time Sent From Partner) value of a DHCP Lease
object. This field specifies the time that the current lease
state ends, from the point of view of a remote DHCP failover
peer. This field is for IPv4 leases only.
tstp: The TSTP (Time Sent To Partner) value of a DHCP Lease object.
This field specifies the time that the current lease state ends,
from the point of view of a local DHCP failover peer. This field
is for IPv4 leases only.
uid: The UID (User ID) value of a DHCP Lease object. This field
specifies the client identifier that the DHCP client sends the
Infoblox appliance (in DHCP option 61) when it acquires the
lease. Not all DHCP clients send a UID. This field is for IPv4
leases only.
username: The user name that the server has associated with a DHCP
Lease object.
variable: The variable value of a DHCP Lease object. This field
keeps all variables related to the DDNS update of the DHCP
lease. The variables related to the DDNS updates of the DHCP
lease. The variables can be one of the following:ddns-text: The
ddns-text variable is used to record the value of the client's
TXT identification record when the interim DDNS update style has
been used to update the DNS service for a particular lease.ddns-
fwd-name: When a DDNS update was successfully completed, the
ddns-fwd-name variable records the value of the name used when
the client's A record was updated. The server may have used this
name when it updated the client's PTR record.ddns-client-fqdn:
If the server is configured to use the interim DDNS update style
and is also configured to allow clients to update their own
FQDNs, the ddns-client-fqdn variable records the name that the
client used when it updated its own FQDN. This is also the name
that the server used to update the client's PTR record.ddns-rev-
name: If the server successfully updates the client's PTR
record, this variable will record the name that the DHCP server
used for the PTR record. The name to which the PTR record points
will be either the ddns-fwd-name or the ddns-client-fqdn.
"""
_infoblox_type = 'lease'
_fields = ['address', 'billing_class', 'binding_state', 'client_hostname',
'cltt', 'discovered_data', 'ends', 'fingerprint', 'hardware',
'ipv6_duid', 'ipv6_iaid', 'ipv6_preferred_lifetime',
'ipv6_prefix_bits', 'is_invalid_mac', 'ms_ad_user_data',
'network', 'network_view', 'never_ends', 'never_starts',
'next_binding_state', 'on_commit', 'on_expiry', 'on_release',
'option', 'protocol', 'remote_id', 'served_by',
'server_host_name', 'starts', 'tsfp', 'tstp', 'uid', 'username',
'variable']
_search_for_update_fields = ['address', 'network_view']
_updateable_search_fields = []
_all_searchable_fields = ['address', 'client_hostname', 'fingerprint',
'hardware', 'ipv6_duid', 'ipv6_prefix_bits',
'network', 'network_view', 'protocol',
'remote_id', 'username']
_return_fields = ['address', 'network_view']
_remap = {}
_shadow_fields = ['_ref']
class LicenseGridwide(InfobloxObject):
""" LicenseGridwide: Gridwide license object.
Corresponds to WAPI object 'license:gridwide'
This object represents the Grid-wide license.
Fields:
expiration_status: The license expiration status.
expiry_date: The expiration timestamp of the license.
key: The license string.
limit: The license limit value.
limit_context: The license limit context.
type: The license type.
"""
_infoblox_type = 'license:gridwide'
_fields = ['expiration_status', 'expiry_date', 'key', 'limit',
'limit_context', 'type']
_search_for_update_fields = ['type']
_updateable_search_fields = []
_all_searchable_fields = ['key', 'limit', 'type']
_return_fields = ['type']
_remap = {}
_shadow_fields = ['_ref']
class LocaluserAuthservice(InfobloxObject):
""" LocaluserAuthservice: Local user authentication service object.
Corresponds to WAPI object 'localuser:authservice'
The object represents a local authentication service for
authenticating users against the local database.
Note that read by reference is not supported.
Fields:
comment: The local user authentication service comment.
disabled: Flag that indicates whether the local user authentication
service is enabled or not.
name: The name of the local user authentication service.
"""
_infoblox_type = 'localuser:authservice'
_fields = ['comment', 'disabled', 'name']
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = ['comment', 'disabled', 'name']
_remap = {}
_shadow_fields = ['_ref']
class Macfilteraddress(InfobloxObject):
""" Macfilteraddress: MAC Filter Address object.
Corresponds to WAPI object 'macfilteraddress'
MAC filter address is part of the MAC filter.
Fields:
authentication_time: The absolute UNIX time (in seconds) since the
address was last authenticated.
comment: Comment for the MAC filter address; maximum 256 characters.
expiration_time: The absolute UNIX time (in seconds) until the
address expires.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
filter: Name of the MAC filter to which this address belongs.
fingerprint: DHCP fingerprint for the address.
guest_custom_field1: Guest custom field 1.
guest_custom_field2: Guest custom field 2.
guest_custom_field3: Guest custom field 3.
guest_custom_field4: Guest custom field 4.
guest_email: Guest e-mail.
guest_first_name: Guest first name.
guest_last_name: Guest last name.
guest_middle_name: Guest middle name.
guest_phone: Guest phone number.
is_registered_user: Determines if the user has been authenticated or
not.
mac: MAC Address.
never_expires: Determines if MAC address expiration is enabled or
disabled.
reserved_for_infoblox: Reserved for future use.
username: Username for authenticated DHCP purposes.
"""
_infoblox_type = 'macfilteraddress'
_fields = ['authentication_time', 'comment', 'expiration_time', 'extattrs',
'filter', 'fingerprint', 'guest_custom_field1',
'guest_custom_field2', 'guest_custom_field3',
'guest_custom_field4', 'guest_email', 'guest_first_name',
'guest_last_name', 'guest_middle_name', 'guest_phone',
'is_registered_user', 'mac', 'never_expires',
'reserved_for_infoblox', 'username']
_search_for_update_fields = ['authentication_time',
'expiration_time', 'filter',
'guest_custom_field1', 'guest_custom_field2',
'guest_custom_field3', 'guest_custom_field4',
'guest_email', 'guest_first_name',
'guest_last_name', 'guest_middle_name',
'guest_phone', 'mac', 'never_expires',
'reserved_for_infoblox', 'username']
_updateable_search_fields = ['authentication_time', 'comment',
'expiration_time', 'filter',
'guest_custom_field1', 'guest_custom_field2',
'guest_custom_field3', 'guest_custom_field4',
'guest_email', 'guest_first_name',
'guest_last_name', 'guest_middle_name',
'guest_phone', 'mac', 'never_expires',
'reserved_for_infoblox', 'username']
_all_searchable_fields = ['authentication_time', 'comment',
'expiration_time', 'filter', 'fingerprint',
'guest_custom_field1', 'guest_custom_field2',
'guest_custom_field3', 'guest_custom_field4',
'guest_email', 'guest_first_name',
'guest_last_name', 'guest_middle_name',
'guest_phone', 'mac', 'never_expires',
'reserved_for_infoblox', 'username']
_return_fields = ['authentication_time', 'comment', 'expiration_time',
'extattrs', 'filter', 'guest_custom_field1',
'guest_custom_field2', 'guest_custom_field3',
'guest_custom_field4', 'guest_email', 'guest_first_name',
'guest_last_name', 'guest_middle_name', 'guest_phone',
'is_registered_user', 'mac', 'never_expires',
'reserved_for_infoblox', 'username']
_remap = {}
_shadow_fields = ['_ref']
class Mastergrid(InfobloxObject):
""" Mastergrid: Master Grid object.
Corresponds to WAPI object 'mastergrid'
This object represents the Master Grid. The Master Grid object is
automatically generated when a Grid successfully joins the Master
Grid.
Fields:
address: The domain name or IP address for the Master Grid.
connection_disabled: Determines if the sub-grid is currently
disabled.
connection_timestamp: The timestamp that indicates when the
connection to the Master Grid was established.
detached: The detached flag for the Master Grid.
enable: Determines if the Master Grid is enabled.
joined: The flag shows if the Grid has joined the Master Grid.
last_event: The Master Grid's last event.
last_event_details: The details of the Master Grid's last event.
last_sync_timestamp: The timestamp or the last synchronization
operation with the Master Grid.
port: The Master Grid port to which the Grid connects.
status: The Master Grid's status.
use_mgmt_port: The flag shows if the MGMT port was used to join the
Grid.
"""
_infoblox_type = 'mastergrid'
_fields = ['address', 'connection_disabled', 'connection_timestamp',
'detached', 'enable', 'joined', 'last_event',
'last_event_details', 'last_sync_timestamp', 'port', 'status',
'use_mgmt_port']
_search_for_update_fields = ['address', 'port']
_updateable_search_fields = ['address', 'port']
_all_searchable_fields = ['address', 'port']
_return_fields = ['address', 'enable', 'port']
_remap = {}
_shadow_fields = ['_ref']
class Member(InfobloxObject):
""" Member: Member object.
Corresponds to WAPI object 'member'
This object represents the Infoblox Grid Member.
Fields:
active_position: The active server of a Grid member.
additional_ip_list: The additional IP list of a Grid member. This
list contains additional interface information that can be used
at the member level.Note that interface structure(s) with
interface type set to 'MGMT' are not supported.
automated_traffic_capture_setting: Member level settings for
automated traffic capture.
bgp_as: The BGP configuration | |
<filename>evap/staff/views.py
import csv
import itertools
from collections import OrderedDict, defaultdict, namedtuple
from dataclasses import dataclass
from datetime import date, datetime
from typing import Any, Container, Dict
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import IntegrityError, transaction
from django.db.models import BooleanField, Case, Count, ExpressionWrapper, IntegerField, Prefetch, Q, Sum, When
from django.dispatch import receiver
from django.forms import formset_factory
from django.forms.models import inlineformset_factory, modelformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import get_language
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, ngettext
from django.views.decorators.http import require_POST
from xlrd import open_workbook
from xlutils.copy import copy as copy_workbook
from evap.contributor.views import export_contributor_results
from evap.evaluation.auth import manager_required, reviewer_required, staff_permission_required
from evap.evaluation.models import (
Contribution,
Course,
CourseType,
Degree,
EmailTemplate,
Evaluation,
FaqQuestion,
FaqSection,
Question,
Questionnaire,
RatingAnswerCounter,
Semester,
TextAnswer,
UserProfile,
)
from evap.evaluation.tools import FileResponse, get_parameter_from_url_or_session, sort_formset
from evap.grades.models import GradeDocument
from evap.results.exporters import ResultsExporter
from evap.results.tools import TextResult, calculate_average_distribution, distribution_to_grade
from evap.results.views import update_template_cache_of_published_evaluations_in_course
from evap.rewards.models import RewardPointGranting
from evap.rewards.tools import can_reward_points_be_used_by, is_semester_activated
from evap.staff import staff_mode
from evap.staff.forms import (
AtLeastOneFormSet,
ContributionCopyForm,
ContributionCopyFormSet,
ContributionForm,
ContributionFormSet,
CourseCopyForm,
CourseForm,
CourseTypeForm,
CourseTypeMergeSelectionForm,
DegreeForm,
EmailTemplateForm,
EvaluationCopyForm,
EvaluationEmailForm,
EvaluationForm,
EvaluationParticipantCopyForm,
ExportSheetForm,
FaqQuestionForm,
FaqSectionForm,
ImportForm,
ModelWithImportNamesFormSet,
QuestionForm,
QuestionnaireForm,
QuestionnairesAssignForm,
RemindResponsibleForm,
SemesterForm,
SingleResultForm,
TextAnswerForm,
TextAnswerWarningForm,
UserBulkUpdateForm,
UserForm,
UserImportForm,
UserMergeSelectionForm,
)
from evap.staff.importers import EnrollmentImporter, PersonImporter, UserImporter, sorted_messages
from evap.staff.tools import (
ImportType,
bulk_update_users,
delete_import_file,
delete_navbar_cache_for_users,
find_unreviewed_evaluations,
forward_messages,
get_import_file_content_or_raise,
import_file_exists,
merge_users,
save_import_file,
)
from evap.student.forms import QuestionnaireVotingForm
from evap.student.models import TextAnswerWarning
from evap.student.views import get_valid_form_groups_or_render_vote_page
@manager_required
def index(request):
template_data = dict(
semesters=Semester.objects.all(),
templates=EmailTemplate.objects.all().order_by("id"),
sections=FaqSection.objects.all(),
disable_breadcrumb_manager=True,
)
return render(request, "staff_index.html", template_data)
def annotate_evaluations_with_grade_document_counts(evaluations):
return evaluations.annotate(
midterm_grade_documents_count=Count(
"course__grade_documents",
filter=Q(course__grade_documents__type=GradeDocument.Type.MIDTERM_GRADES),
distinct=True,
),
final_grade_documents_count=Count(
"course__grade_documents",
filter=Q(course__grade_documents__type=GradeDocument.Type.FINAL_GRADES),
distinct=True,
),
)
def get_evaluations_with_prefetched_data(semester):
evaluations = (
semester.evaluations.select_related("course__type")
.prefetch_related(
Prefetch(
"contributions", queryset=Contribution.objects.filter(contributor=None), to_attr="general_contribution"
),
"course__degrees",
"course__responsibles",
)
.annotate(
num_contributors=Count("contributions", filter=~Q(contributions__contributor=None), distinct=True),
num_textanswers=Count(
"contributions__textanswer_set",
filter=Q(contributions__evaluation__can_publish_text_results=True),
distinct=True,
),
num_reviewed_textanswers=Count(
"contributions__textanswer_set",
filter=~Q(contributions__textanswer_set__state=TextAnswer.State.NOT_REVIEWED),
distinct=True,
),
num_course_evaluations=Count("course__evaluations", distinct=True),
)
).order_by("pk")
evaluations = annotate_evaluations_with_grade_document_counts(evaluations)
evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)
return evaluations
@reviewer_required
def semester_view(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if semester.results_are_archived and not request.user.is_manager:
raise PermissionDenied
rewards_active = is_semester_activated(semester)
evaluations = get_evaluations_with_prefetched_data(semester)
evaluations = sorted(evaluations, key=lambda cr: cr.full_name)
courses = Course.objects.filter(semester=semester)
# semester statistics (per degree)
@dataclass
class Stats:
# pylint: disable=too-many-instance-attributes
num_enrollments_in_evaluation: int = 0
num_votes: int = 0
num_evaluations_evaluated: int = 0
num_evaluations: int = 0
num_textanswers: int = 0
num_textanswers_reviewed: int = 0
first_start: datetime = datetime(9999, 1, 1)
last_end: date = date(2000, 1, 1)
degree_stats = defaultdict(Stats)
total_stats = Stats()
for evaluation in evaluations:
if evaluation.is_single_result:
continue
degrees = evaluation.course.degrees.all()
stats_objects = [degree_stats[degree] for degree in degrees]
stats_objects += [total_stats]
for stats in stats_objects:
if evaluation.state >= Evaluation.State.IN_EVALUATION:
stats.num_enrollments_in_evaluation += evaluation.num_participants
stats.num_votes += evaluation.num_voters
stats.num_textanswers += evaluation.num_textanswers
stats.num_textanswers_reviewed += evaluation.num_reviewed_textanswers
if evaluation.state >= Evaluation.State.EVALUATED:
stats.num_evaluations_evaluated += 1
if evaluation.state != Evaluation.State.NEW:
stats.num_evaluations += 1
stats.first_start = min(stats.first_start, evaluation.vote_start_datetime)
stats.last_end = max(stats.last_end, evaluation.vote_end_date)
degree_stats = OrderedDict(sorted(degree_stats.items(), key=lambda x: x[0].order))
degree_stats["total"] = total_stats
template_data = dict(
semester=semester,
evaluations=evaluations,
Evaluation=Evaluation,
disable_breadcrumb_semester=True,
rewards_active=rewards_active,
num_evaluations=len(evaluations),
degree_stats=degree_stats,
courses=courses,
approval_states=[
Evaluation.State.NEW,
Evaluation.State.PREPARED,
Evaluation.State.EDITOR_APPROVED,
Evaluation.State.APPROVED,
],
)
return render(request, "staff_semester_view.html", template_data)
class EvaluationOperation:
email_template_name = None
email_template_contributor_name = None
email_template_participant_name = None
confirmation_message = None
@staticmethod
def applicable_to(evaluation):
raise NotImplementedError
@staticmethod
def warning_for_inapplicables(amount):
raise NotImplementedError
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
raise NotImplementedError
class RevertToNewOperation(EvaluationOperation):
confirmation_message = gettext_lazy("Do you want to revert the following evaluations to preparation?")
@staticmethod
def applicable_to(evaluation):
return Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.APPROVED
@staticmethod
def warning_for_inapplicables(amount):
return ngettext(
"{} evaluation can not be reverted, because it already started. It was removed from the selection.",
"{} evaluations can not be reverted, because they already started. They were removed from the selection.",
amount,
).format(amount)
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
assert email_template_contributor is None
assert email_template_participant is None
for evaluation in evaluations:
evaluation.revert_to_new()
evaluation.save()
messages.success(
request,
ngettext(
"Successfully reverted {} evaluation to in preparation.",
"Successfully reverted {} evaluations to in preparation.",
len(evaluations),
).format(len(evaluations)),
)
class ReadyForEditorsOperation(EvaluationOperation):
email_template_name = EmailTemplate.EDITOR_REVIEW_NOTICE
confirmation_message = gettext_lazy("Do you want to send the following evaluations to editor review?")
@staticmethod
def applicable_to(evaluation):
return evaluation.state in [Evaluation.State.NEW, Evaluation.State.EDITOR_APPROVED]
@staticmethod
def warning_for_inapplicables(amount):
return ngettext(
"{} evaluation can not be reverted, because it already started. It was removed from the selection.",
"{} evaluations can not be reverted, because they already started. They were removed from the selection.",
amount,
).format(amount)
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
assert email_template_contributor is None
assert email_template_participant is None
for evaluation in evaluations:
evaluation.ready_for_editors()
evaluation.save()
messages.success(
request,
ngettext(
"Successfully enabled {} evaluation for editor review.",
"Successfully enabled {} evaluations for editor review.",
len(evaluations),
).format(len(evaluations)),
)
if email_template:
evaluations_by_responsible = {}
for evaluation in evaluations:
for responsible in evaluation.course.responsibles.all():
evaluations_by_responsible.setdefault(responsible, []).append(evaluation)
for responsible, responsible_evaluations in evaluations_by_responsible.items():
body_params = {"user": responsible, "evaluations": responsible_evaluations}
editors = UserProfile.objects.filter(
contributions__evaluation__in=responsible_evaluations,
contributions__role=Contribution.Role.EDITOR,
).exclude(pk=responsible.pk)
email_template.send_to_user(
responsible,
subject_params={},
body_params=body_params,
use_cc=True,
additional_cc_users=editors,
request=request,
)
class BeginEvaluationOperation(EvaluationOperation):
email_template_name = EmailTemplate.EVALUATION_STARTED
confirmation_message = gettext_lazy("Do you want to immediately start the following evaluations?")
@staticmethod
def applicable_to(evaluation):
return evaluation.state == Evaluation.State.APPROVED and evaluation.vote_end_date >= date.today()
@staticmethod
def warning_for_inapplicables(amount):
return ngettext(
"{} evaluation can not be started, because it was not approved, was already evaluated or its evaluation end date lies in the past. It was removed from the selection.",
"{} evaluations can not be started, because they were not approved, were already evaluated or their evaluation end dates lie in the past. They were removed from the selection.",
amount,
).format(amount)
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
assert email_template_contributor is None
assert email_template_participant is None
for evaluation in evaluations:
evaluation.vote_start_datetime = datetime.now()
evaluation.begin_evaluation()
evaluation.save()
messages.success(
request,
ngettext(
"Successfully started {} evaluation.", "Successfully started {} evaluations.", len(evaluations)
).format(len(evaluations)),
)
if email_template:
email_template.send_to_users_in_evaluations(
evaluations, [EmailTemplate.Recipients.ALL_PARTICIPANTS], use_cc=False, request=request
)
class UnpublishOperation(EvaluationOperation):
confirmation_message = gettext_lazy("Do you want to unpublish the following evaluations?")
@staticmethod
def applicable_to(evaluation):
return evaluation.state == Evaluation.State.PUBLISHED
@staticmethod
def warning_for_inapplicables(amount):
return ngettext(
"{} evaluation can not be unpublished, because it's results have not been published. It was removed from the selection.",
"{} evaluations can not be unpublished because their results have not been published. They were removed from the selection.",
amount,
).format(amount)
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
assert email_template_contributor is None
assert email_template_participant is None
for evaluation in evaluations:
evaluation.unpublish()
evaluation.save()
messages.success(
request,
ngettext(
"Successfully unpublished {} evaluation.", "Successfully unpublished {} evaluations.", len(evaluations)
).format(len(evaluations)),
)
class PublishOperation(EvaluationOperation):
email_template_contributor_name = EmailTemplate.PUBLISHING_NOTICE_CONTRIBUTOR
email_template_participant_name = EmailTemplate.PUBLISHING_NOTICE_PARTICIPANT
confirmation_message = gettext_lazy("Do you want to publish the following evaluations?")
@staticmethod
def applicable_to(evaluation):
return evaluation.state == Evaluation.State.REVIEWED
@staticmethod
def warning_for_inapplicables(amount):
return ngettext(
"{} evaluation can not be published, because it's not finished or not all of its text answers have been reviewed. It was removed from the selection.",
"{} evaluations can not be published, because they are not finished or not all of their text answers have been reviewed. They were removed from the selection.",
amount,
).format(amount)
@staticmethod
def apply(
request, evaluations, email_template=None, email_template_contributor=None, email_template_participant=None
):
assert email_template is None
for evaluation in evaluations:
evaluation.publish()
evaluation.save()
messages.success(
request,
ngettext(
"Successfully published {} evaluation.", "Successfully published {} evaluations.", len(evaluations)
).format(len(evaluations)),
)
if email_template_contributor:
EmailTemplate.send_contributor_publish_notifications(evaluations, template=email_template_contributor)
if email_template_participant:
EmailTemplate.send_participant_publish_notifications(evaluations, template=email_template_participant)
EVALUATION_OPERATIONS = {
Evaluation.State.NEW: RevertToNewOperation,
Evaluation.State.PREPARED: ReadyForEditorsOperation,
Evaluation.State.IN_EVALUATION: BeginEvaluationOperation,
Evaluation.State.REVIEWED: UnpublishOperation,
Evaluation.State.PUBLISHED: PublishOperation,
}
@manager_required
def semester_evaluation_operation(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if semester.participations_are_archived:
raise PermissionDenied
raw_target_state = request.GET.get("target_state")
try:
target_state = int(raw_target_state)
except ValueError as err:
raise SuspiciousOperation("Unparseable target state: " + str(raw_target_state)) from err
if target_state not in EVALUATION_OPERATIONS.keys():
raise SuspiciousOperation("Unknown target state: " + str(target_state))
evaluation_ids = (request.GET if request.method == "GET" else request.POST).getlist("evaluation")
evaluations = list(
annotate_evaluations_with_grade_document_counts(Evaluation.objects.filter(id__in=evaluation_ids))
)
evaluations.sort(key=lambda evaluation: evaluation.full_name)
operation = EVALUATION_OPERATIONS[target_state]
if request.method == "POST":
email_template = None
email_template_contributor = None
email_template_participant = None
if request.POST.get("send_email") == "on":
email_template = EmailTemplate(
subject=request.POST["email_subject"],
plain_content=request.POST["email_plain"],
html_content=request.POST["email_html"],
)
if request.POST.get("send_email_contributor") == "on":
email_template_contributor = EmailTemplate(
subject=request.POST["email_subject_contributor"],
plain_content=request.POST["email_plain_contributor"],
html_content=request.POST["email_html_contributor"],
)
if request.POST.get("send_email_participant") == "on":
email_template_participant = EmailTemplate(
subject=request.POST["email_subject_participant"],
plain_content=request.POST["email_plain_participant"],
html_content=request.POST["email_html_participant"],
)
operation.apply(request, evaluations, email_template, email_template_contributor, email_template_participant)
return redirect("staff:semester_view", semester_id)
applicable_evaluations = list(filter(operation.applicable_to, evaluations))
difference = len(evaluations) - len(applicable_evaluations)
if difference:
messages.warning(request, operation.warning_for_inapplicables(difference))
if not applicable_evaluations: # no evaluations where applicable or none were selected
messages.warning(request, _("Please select at least one evaluation."))
return redirect("staff:semester_view", semester_id)
email_template = None
email_template_contributor = None
email_template_participant = None
if operation.email_template_name:
email_template = EmailTemplate.objects.get(name=operation.email_template_name)
if operation.email_template_contributor_name:
email_template_contributor | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Core objects and functionality for py2neo.
authenticate - register authentication details for a host:port
rewrite - register a rewrite hook for a scheme://host:port
Resource - local representation of a remote web resource
ResourceTemplate - template for Resource generation based on a pattern
Service - base class for objects that can be bound to remote resources
ServiceRoot - root resource for a Neo4j server instance
Graph - main graph resource class to bind to a remote graph database service
Schema - schema index and constraint management resource
PropertySet - dict subclass that equates None and missing values for storing properties
LabelSet - set subclass for storing labels
PropertyContainer - base class for Node and Relationship classes
Node - local graph node object that can be bound to a remote Neo4j node
NodePointer - reference to a node object defined elsewhere
Rel - forward relationship without start and end node information
Rev - reverse relationship without start and end node information
Path - local graph path object that represents a remote Neo4j path
Relationship - local graph relationship object that can be bound to a remote Neo4j relationship
"""
from __future__ import division, unicode_literals
import base64
import re
from warnings import warn
from weakref import WeakValueDictionary
from py2neo import __version__
from py2neo.error.client import BindError, JoinError
from py2neo.error.server import GraphError
from py2neo.packages.httpstream import http, ClientError, ServerError, \
Resource as _Resource, ResourceTemplate as _ResourceTemplate
from py2neo.packages.httpstream.http import JSONResponse
from py2neo.packages.httpstream.numbers import NOT_FOUND
from py2neo.packages.httpstream.packages.urimagic import URI
from py2neo.types import cast_property
from py2neo.util import is_collection, is_integer, round_robin, ustr, version_tuple, raise_from
__all__ = ["authenticate", "rewrite",
"Resource", "ResourceTemplate", "Service",
"ServiceRoot", "Graph", "Schema", "PropertySet", "LabelSet", "PropertyContainer",
"Node", "NodePointer", "Rel", "Rev", "Path", "Relationship",
"ServerPlugin", "UnmanagedExtension"]
DEFAULT_SCHEME = "http"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 7474
DEFAULT_HOST_PORT = "{0}:{1}".format(DEFAULT_HOST, DEFAULT_PORT)
PRODUCT = ("py2neo", __version__)
NON_ALPHA_NUM = re.compile("[^0-9A-Za-z_]")
SIMPLE_NAME = re.compile(r"[A-Za-z_][0-9A-Za-z_]*")
http.default_encoding = "UTF-8"
_headers = {
None: [("X-Stream", "true")],
}
_http_rewrites = {}
def _add_header(key, value, host_port=None):
""" Add an HTTP header to be sent with all requests if no `host_port`
is provided or only to those matching the value supplied otherwise.
"""
if host_port in _headers:
_headers[host_port].append((key, value))
else:
_headers[host_port] = [(key, value)]
def _get_headers(host_port):
"""Fetch all HTTP headers relevant to the `host_port` provided.
"""
uri_headers = {}
for n, headers in _headers.items():
if n is None or n == host_port:
uri_headers.update(headers)
return uri_headers
def authenticate(host_port, user_name, password):
""" Set HTTP basic authentication values for specified `host_port`. The
code below shows a simple example::
# set up authentication parameters
neo4j.authenticate("camelot:7474", "arthur", "excalibur")
# connect to authenticated graph database
graph = neo4j.Graph("http://camelot:7474/db/data/")
Note: a `host_port` can be either a server name or a server name and port
number but must match exactly that used within the Graph
URI.
:param host_port: the host and optional port requiring authentication
(e.g. "bigserver", "camelot:7474")
:param user_name: the user name to authenticate as
:param password: the password
"""
credentials = (user_name + ":" + password).encode("UTF-8")
value = "Basic " + base64.b64encode(credentials).decode("ASCII")
_add_header("Authorization", value, host_port=host_port)
def rewrite(from_scheme_host_port, to_scheme_host_port):
""" Automatically rewrite all URIs directed to the scheme, host and port
specified in `from_scheme_host_port` to that specified in
`to_scheme_host_port`.
As an example::
# implicitly convert all URIs beginning with <http://localhost:7474>
# to instead use <https://dbserver:9999>
neo4j.rewrite(("http", "localhost", 7474), ("https", "dbserver", 9999))
If `to_scheme_host_port` is :py:const:`None` then any rewrite rule for
`from_scheme_host_port` is removed.
This facility is primarily intended for use by database servers behind
proxies which are unaware of their externally visible network address.
"""
global _http_rewrites
if to_scheme_host_port is None:
try:
del _http_rewrites[from_scheme_host_port]
except KeyError:
pass
else:
_http_rewrites[from_scheme_host_port] = to_scheme_host_port
class Resource(_Resource):
""" Variant of HTTPStream Resource that passes extra headers and product
detail.
"""
error_class = GraphError
def __init__(self, uri, metadata=None):
uri = URI(uri)
scheme_host_port = (uri.scheme, uri.host, uri.port)
if scheme_host_port in _http_rewrites:
scheme_host_port = _http_rewrites[scheme_host_port]
# This is fine - it's all my code anyway...
uri._URI__set_scheme(scheme_host_port[0])
uri._URI__set_authority("{0}:{1}".format(scheme_host_port[1],
scheme_host_port[2]))
if uri.user_info:
authenticate(uri.host_port, *uri.user_info.partition(":")[0::2])
self._resource = _Resource.__init__(self, uri)
#self._subresources = {}
self.__headers = _get_headers(self.__uri__.host_port)
self.__base = super(Resource, self)
if metadata is None:
self.__initial_metadata = None
else:
self.__initial_metadata = dict(metadata)
self.__last_get_response = None
uri = uri.string
service_root_uri = uri[:uri.find("/", uri.find("//") + 2)] + "/"
if service_root_uri == uri:
self.__service_root = self
else:
self.__service_root = ServiceRoot(service_root_uri)
self.__relative_uri = NotImplemented
@property
def graph(self):
return self.__service_root.graph
@property
def headers(self):
return self.__headers
@property
def metadata(self):
if self.__last_get_response is None:
if self.__initial_metadata is not None:
return self.__initial_metadata
self.get()
return self.__last_get_response.content
@property
def relative_uri(self):
if self.__relative_uri is NotImplemented:
self_uri = self.uri.string
graph_uri = self.graph.uri.string
self.__relative_uri = URI(self_uri[len(graph_uri):])
return self.__relative_uri
@property
def service_root(self):
return self.__service_root
def get(self, headers=None, redirect_limit=5, **kwargs):
headers = dict(headers or {})
headers.update(self.__headers)
kwargs.update(product=PRODUCT, cache=True)
try:
response = self.__base.get(headers, redirect_limit, **kwargs)
except (ClientError, ServerError) as error:
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP GET returned response %s" % error.status_code)
raise_from(self.error_class(message, **content), error)
else:
self.__last_get_response = response
return response
def put(self, body=None, headers=None, **kwargs):
headers = dict(headers or {})
headers.update(self.__headers)
kwargs.update(product=PRODUCT)
try:
response = self.__base.put(body, headers, **kwargs)
except (ClientError, ServerError) as error:
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP PUT returned response %s" % error.status_code)
raise_from(self.error_class(message, **content), error)
else:
return response
def post(self, body=None, headers=None, **kwargs):
headers = dict(headers or {})
headers.update(self.__headers)
kwargs.update(product=PRODUCT)
try:
response = self.__base.post(body, headers, **kwargs)
except (ClientError, ServerError) as error:
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP POST returned response %s" % error.status_code)
raise_from(self.error_class(message, **content), error)
else:
return response
def delete(self, headers=None, **kwargs):
headers = dict(headers or {})
headers.update(self.__headers)
kwargs.update(product=PRODUCT)
try:
response = self.__base.delete(headers, **kwargs)
except (ClientError, ServerError) as error:
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP DELETE returned response %s" % error.status_code)
raise_from(self.error_class(message, **content), error)
else:
return response
class ResourceTemplate(_ResourceTemplate):
error_class = GraphError
def expand(self, **values):
resource = Resource(self.uri_template.expand(**values))
resource.error_class = self.error_class
return resource
class Service(object):
""" Base class for objects that can be bound to a remote resource.
"""
error_class = GraphError
__resource__ = None
def bind(self, uri, metadata=None):
""" Bind object to Resource or ResourceTemplate.
"""
if "{" in uri and "}" in uri:
if metadata:
raise ValueError("Initial metadata cannot be passed to a resource template")
self.__resource__ = ResourceTemplate(uri)
else:
self.__resource__ = Resource(uri, metadata)
self.__resource__.error_class = self.error_class
@property
def bound(self):
""" Returns :const:`True` if bound to a remote resource.
"""
return self.__resource__ is not None
@property
def graph(self):
return self.service_root.graph
@property
def relative_uri(self):
return self.resource.relative_uri
@property
def resource(self):
""" Returns the :class:`Resource` to which this is bound.
"""
if self.bound:
return self.__resource__
else:
raise BindError("Local entity is not bound to a remote entity")
@property
def service_root(self):
return self.resource.service_root
def unbind(self):
self.__resource__ = None
@property
def uri(self):
if isinstance(self.resource, ResourceTemplate):
return self.resource.uri_template
else:
return self.resource.uri
class ServiceRoot(object):
""" Neo4j REST API service root resource.
"""
DEFAULT_URI = "{0}://{1}/".format(DEFAULT_SCHEME, DEFAULT_HOST_PORT)
__instances = {}
__graph = None
def __new__(cls, uri=None):
if uri is None:
uri = cls.DEFAULT_URI
if not uri.endswith("/"):
uri += "/"
try:
inst = cls.__instances[uri]
except KeyError:
inst = super(ServiceRoot, cls).__new__(cls)
inst.__resource = Resource(uri)
inst.__graph = None
cls.__instances[uri] = inst
return inst
@property
def graph(self):
if self.__graph is None:
self.__graph = Graph(self.resource.metadata["data"])
return self.__graph
@property
def resource(self):
return self.__resource
@property
def uri(self):
return self.resource.uri
class Graph(Service):
""" Top-level wrapper around a Neo4j database service identified by
URI. To connect to a local server on the default URI, simply use::
>>> from py2neo import Graph
>>> graph = Graph()
The server address can also be provided explicitly::
>>> other_graph = Graph("http://camelot:1138/db/data/")
If the database server is behind a proxy that requires HTTP
authorisation,
this can | |
Waveform
0xe0c8: {
'align': 'l', 'valign': 'c', 'stretch': 'xy', 'params': {'overlap': 0.01}},
# Hexagons
0xe0cc: {'align': 'l', 'valign': 'c', 'stretch': 'xy', 'params': ''},
0xe0cd: {'align': 'l', 'valign': 'c', 'stretch': 'xy', 'params': ''},
# Legos
0xe0ce: {'align': 'l', 'valign': 'c', 'stretch': 'xy', 'params': ''},
0xe0cf: {'align': 'c', 'valign': 'c', 'stretch': 'xy',
'params': ''}, 0xe0d1: {
'align': 'l', 'valign': 'c', 'stretch': 'xy', 'params': {'overlap': 0.02}},
# Top and bottom trapezoid
0xe0d2: {
'align': 'l', 'valign': 'c', 'stretch': 'xy',
'params': {'overlap': 0.02}}, 0xe0d4: {
'align': 'r', 'valign': 'c', 'stretch': 'xy', 'params': {'overlap': 0.02}}}
symAttrDefault = {
# 'pa' == preserve aspect ratio
'default': {'align': 'c', 'valign': 'c', 'stretch': 'pa', 'params': ''}}
symAttrFontA = {
# 'pa' == preserve aspect ratio
'default': {'align': 'c', 'valign': 'c', 'stretch': 'pa', 'params': ''},
# Don't center these arrows vertically
0xf0dc: {'align': 'c', 'valign': '', 'stretch': 'pa', 'params': ''},
0xf0dd: {'align': 'c', 'valign': '', 'stretch': 'pa', 'params': ''},
0xf0de: {'align': 'c', 'valign': '', 'stretch': 'pa', 'params': ''}}
customAttr = {
# 'pa' == preserve aspect ratio
'default': {'align': 'c', 'valign': '', 'stretch': '', 'params': ''}}
# Most glyphs we want to maximize during the scale. However, there are some
# that need to be small or stay relative in size to each other.
# The following list are those glyphs. A tuple represents a range.
deviScaleList = {'ScaleGlyph': 0xE60E, 'GlyphsToScale': [(0xe6bd, 0xe6c3)]}
fontAScaleList = {
'ScaleGlyph': 0xF17A, 'GlyphsToScale': [
0xf005, 0xf006, (0xf026, 0xf028), 0xf02b, 0xf02c, (0xf031, 0xf035),
(0xf044, 0xf054), (0xf060, 0xf063), 0xf077, 0xf078, 0xf07d, 0xf07e, 0xf089,
(0xf0d7, 0xf0da), (0xf0dc, 0xf0de), (0xf100, 0xf107), 0xf141, 0xf142,
(0xf153, 0xf15a), (0xf175, 0xf178), 0xf182, 0xf183, (0xf221, 0xf22d),
(0xf255, 0xf25b)]}
octiScaleList = {
'ScaleGlyph': 0xF02E, 'GlyphsToScale': [(0xf03d, 0xf040), 0xf044,
(0xf051, 0xf053), 0xf05a, 0xf05b, 0xf071, 0xf078, (0xf09f, 0xf0aa), 0xf0ca]}
# Define the character ranges
# Symbol font ranges
# yapf: disable
self.patchSet = [
{'Enabled': True, 'Name': "Seti-UI + Custom",
'Filename': "original-source.otf", 'Exact': False, 'SymStart': 0xE4FA,
'SymEnd': 0xE52E, 'SrcStart': 0xE5FA, 'SrcEnd': 0xE62E, 'ScaleGlyph': None,
'Attributes': symAttrDefault},
{'Enabled': True, 'Name': "Devicons",
'Filename': "devicons.ttf", 'Exact': False, 'SymStart': 0xE600,
'SymEnd': 0xE6C5, 'SrcStart': 0xE700, 'SrcEnd': 0xE7C5, 'ScaleGlyph': deviScaleList,
'Attributes': symAttrDefault},
{'Enabled': self.args.powerline, 'Name': "Powerline Symbols",
'Filename': "PowerlineSymbols.otf", 'Exact': True, 'SymStart': 0xE0A0,
'SymEnd': 0xE0A2, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.powerline, 'Name': "Powerline Symbols",
'Filename': "PowerlineSymbols.otf", 'Exact': True, 'SymStart': 0xE0B0,
'SymEnd': 0xE0B3, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.powerlineextra, 'Name': "Powerline Extra Symbols",
'Filename': "PowerlineExtraSymbols.otf", 'Exact': True, 'SymStart': 0xE0A3,
'SymEnd': 0xE0A3, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.powerlineextra, 'Name': "Powerline Extra Symbols",
'Filename': "PowerlineExtraSymbols.otf", 'Exact': True, 'SymStart': 0xE0B4,
'SymEnd': 0xE0C8, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.powerlineextra, 'Name': "Powerline Extra Symbols",
'Filename': "PowerlineExtraSymbols.otf", 'Exact': True, 'SymStart': 0xE0CA,
'SymEnd': 0xE0CA, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.powerlineextra, 'Name': "Powerline Extra Symbols",
'Filename': "PowerlineExtraSymbols.otf", 'Exact': True, 'SymStart': 0xE0CC,
'SymEnd': 0xE0D4, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrPowerline},
{'Enabled': self.args.pomicons, 'Name': "Pomicons",
'Filename': "Pomicons.otf", 'Exact': True, 'SymStart': 0xE000,
'SymEnd': 0xE00A, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrDefault},
{'Enabled': self.args.fontawesome, 'Name': "Font Awesome",
'Filename': "FontAwesome.otf", 'Exact': True, 'SymStart': 0xF000,
'SymEnd': 0xF2E0, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': fontAScaleList,
'Attributes': symAttrFontA},
{'Enabled': self.args.fontawesomeextension, 'Name': "Font Awesome Extension",
'Filename': "font-awesome-extension.ttf", 'Exact': False, 'SymStart': 0xE000,
'SymEnd': 0xE0A9, 'SrcStart': 0xE200, 'SrcEnd': 0xE2A9, 'ScaleGlyph': None,
'Attributes': symAttrDefault}, # Maximize
{'Enabled': self.args.powersymbols, 'Name': "Power Symbols",
'Filename': "Unicode_IEC_symbol_font.otf", 'Exact': True, 'SymStart': 0x23FB,
'SymEnd': 0x23FE, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrDefault}, # Power, Power On/Off, Power On, Sleep
{'Enabled': self.args.powersymbols, 'Name': "Power Symbols",
'Filename': "Unicode_IEC_symbol_font.otf", 'Exact': True, 'SymStart': 0x2B58,
'SymEnd': 0x2B58, 'SrcStart': None, 'SrcEnd': None, 'ScaleGlyph': None,
'Attributes': symAttrDefault}, # Heavy Circle (aka Power Off)
{'Enabled': self.args.material, 'Name': "Material",
'Filename': "materialdesignicons-webfont.ttf", 'Exact': False, 'SymStart': 0xF001,
'SymEnd': 0xF847, 'SrcStart': 0xF500, 'SrcEnd': 0xFD46, 'ScaleGlyph': None,
'Attributes': symAttrDefault},
{'Enabled': self.args.weather, 'Name': "Weather Icons",
'Filename': "weathericons-regular-webfont.ttf", 'Exact': False,
'SymStart': 0xF000, 'SymEnd': 0xF0EB, 'SrcStart': 0xE300, 'SrcEnd': 0xE3EB, 'ScaleGlyph': None,
'Attributes': symAttrDefault},
{'Enabled': self.args.fontlinux, 'Name': "Font Logos (Font Linux)",
'Filename': "font-logos.ttf", 'Exact': self.fontlinuxExactEncodingPosition,
'SymStart': 0xF100, 'SymEnd': 0xF11C, 'SrcStart': 0xF300, 'SrcEnd': 0xF31C, 'ScaleGlyph': None,
'Attributes': symAttrDefault},
{'Enabled': self.args.octicons, 'Name': "Octicons",
'Filename': "octicons.ttf", 'Exact': self.octiconsExactEncodingPosition,
'SymStart': 0xF000, 'SymEnd': 0xF105, 'SrcStart': 0xF400, 'SrcEnd': 0xF505,
'ScaleGlyph': octiScaleList, 'Attributes': symAttrDefault}, # Magnifying glass
{'Enabled': self.args.octicons, 'Name': "Octicons",
'Filename': "octicons.ttf", 'Exact': self.octiconsExactEncodingPosition,
'SymStart': 0x2665, 'SymEnd': 0x2665, 'SrcStart': None, 'SrcEnd': None,
'ScaleGlyph': octiScaleList, 'Attributes': symAttrDefault}, # Heart
{'Enabled': self.args.octicons, 'Name': "Octicons",
'Filename': "octicons.ttf", 'Exact': self.octiconsExactEncodingPosition,
'SymStart': 0X26A1, 'SymEnd': 0X26A1, 'SrcStart': None, 'SrcEnd': None,
'ScaleGlyph': octiScaleList, 'Attributes': symAttrDefault}, # Zap
{'Enabled': self.args.octicons, 'Name': "Octicons",
'Filename': "octicons.ttf", 'Exact': self.octiconsExactEncodingPosition,
#'SymStart': 0xF27C, 'SymEnd': 0xF27C, 'SrcStart': 0xF4A9, 'SrcEnd': 0xF4A9,
'SymStart': 0xF27C, 'SymEnd': 0xF2BD, 'SrcStart': 0xF4A9, 'SrcEnd': 0xF4EA,
'ScaleGlyph': octiScaleList, 'Attributes': symAttrDefault}, # Desktop
{'Enabled': self.args.custom, 'Name': "Custom",
'Filename': self.args.custom, 'Exact': True,
'SymStart': 0x0000, 'SymEnd': 0x0000, 'SrcStart': 0x0000, 'SrcEnd': 0x0000,
'ScaleGlyph': None, 'Attributes': customAttr}]
# yapf: enable
def setupLineDimensions(self):
"""
win_ascent and win_descent are used to set the line height for windows fonts.
hhead_ascent and hhead_descent are used to set the line height for mac fonts.
Make the total line size even. This seems to make the powerline separators
center more evenly.
"""
if self.args.adjustLineHeight:
if (self.sourceFont.os2_winascent +
self.sourceFont.os2_windescent) % 2 != 0:
self.sourceFont.os2_winascent += 1
# Make the line size identical for windows and mac
self.sourceFont.hhea_ascent = self.sourceFont.os2_winascent
self.sourceFont.hhea_descent = -self.sourceFont.os2_windescent
# Line gap add extra space on the bottom of the line which
# doesn't allow the powerline glyphs to fill the entire line.
self.sourceFont.hhea_linegap = 0
self.sourceFont.os2_typolinegap = 0
def getSourceFontDimensions(self):
# Initial font dimensions
self.fontDim = {
'xmin': 0, 'ymin': -self.sourceFont.os2_windescent, 'xmax': 0,
'ymax': self.sourceFont.os2_winascent, 'width': 0, 'height': 0, }
# Find the biggest char width
# Ignore the y-values, os2_winXXXXX values set above are used for line height
#
# 0x00-0x17f is the Latin Extended-A range
for glyph in range(0x00, 0x17f):
try:
(_, _, xmax, _) = self.sourceFont[glyph].boundingBox()
except TypeError:
continue
if self.fontDim['width'] < self.sourceFont[glyph].width:
self.fontDim['width'] = self.sourceFont[glyph].width
if xmax > self.fontDim['xmax']:
self.fontDim['xmax'] = xmax
# Calculate font height
self.fontDim['height'] = abs(self.fontDim['ymin']) + self.fontDim['ymax']
def getScaleFactor(self, symDim):
scaleRatio = 1
# We want to preserve x/y aspect ratio, so find biggest scale factor that allows symbol to fit
scaleRatioX = self.fontDim['width'] / symDim['width']
# fontDim['height'] represents total line height, keep our symbols sized based upon font's em
# NOTE: is this comment correct? fontDim['height'] isn't used here
scaleRatioY = self.sourceFont.em / symDim['height']
if scaleRatioX > scaleRatioY:
scaleRatio = scaleRatioY
else:
scaleRatio = scaleRatioX
return scaleRatio
def copyGlyphs(self, sourceFontStart, sourceFontEnd, symbolFont,
symbolFontStart, symbolFontEnd, exactEncoding, scaleGlyph, setName,
attributes):
""" Copies symbol glyphs into self.sourceFont """
progressText = ''
careful = False
glyphSetLength = 0
if self.args.careful:
careful = True
if exactEncoding is False:
sourceFontList = []
sourceFontCounter = 0
for i in range(sourceFontStart, sourceFontEnd + 1):
sourceFontList.append(format(i, 'X'))
scaleFactor = 0
if scaleGlyph:
symDim = getGlyphDimensions(symbolFont[scaleGlyph['ScaleGlyph']])
scaleFactor = self.getScaleFactor(symDim)
# Create glyphs from symbol font
#
# If we are going to copy all Glyphs, then assume we want to be careful
# and only copy those that are not already contained in the source font
if symbolFontStart == 0:
symbolFont.selection.all()
self.sourceFont.selection.all()
careful = True
else:
symbolFont.selection.select((str("ranges"), str("unicode")),
symbolFontStart, symbolFontEnd)
self.sourceFont.selection.select((str("ranges"), str("unicode")),
sourceFontStart, sourceFontEnd)
# Get number of selected non-empty glyphs @TODO FIXME
for index, symGlyph in enumerate(symbolFont.selection.byGlyphs):
glyphSetLength += 1
# end for
if self.args.quiet is False:
sys.stdout.write("Adding " + str(max(1, glyphSetLength)) + " Glyphs from " +
setName + " Set \n")
for index, symGlyph in enumerate(symbolFont.selection.byGlyphs):
index = max(1, index)
try:
symAttr = attributes[symGlyph.unicode]
except KeyError:
symAttr = attributes['default']
if exactEncoding:
# use the exact same hex values for the source font as for the symbol font
currentSourceFontGlyph = symGlyph.encoding
# Save as a hex string without the '0x' prefix
copiedToSlot = format(symGlyph.unicode, 'X')
else:
# use source font defined hex values based on passed in start and end
# convince that this string really is a hex:
currentSourceFontGlyph = int("0x" + sourceFontList[sourceFontCounter], 16)
copiedToSlot = sourceFontList[sourceFontCounter]
sourceFontCounter += 1
if int(copiedToSlot, 16) < 0:
print("Found invalid glyph slot number. Skipping.")
continue
if self.args.quiet is False:
updateProgress(round(float(index + 1) / glyphSetLength, 2))
# Prepare symbol glyph dimensions
symDim = getGlyphDimensions(symGlyph)
# check if a glyph already exists in this location
if careful or 'careful' in symAttr['params']:
if copiedToSlot.startswith("uni"):
copiedToSlot = copiedToSlot[3:]
codepoint = int("0x" + copiedToSlot, 16)
if codepoint in self.sourceFont:
if self.args.quiet is False:
print(" Found existing Glyph at {}. Skipping...".format(copiedToSlot))
# We don't want to touch anything so move to next Glyph
continue
# Select and copy symbol from its encoding point
# We need to do this select after the careful check, this way we don't
# reset our selection before starting the next loop
symbolFont.selection.select(symGlyph.encoding)
symbolFont.copy()
# Paste it
self.sourceFont.selection.select(currentSourceFontGlyph)
self.sourceFont.paste()
self.sourceFont[currentSourceFontGlyph].glyphname = symGlyph.glyphname
scaleRatioX = 1
scaleRatioY = 1
# Now that we have copy/pasted the glyph, if we are creating a monospace
# font we need to scale and move the glyphs. It is possible to have
# empty glyphs, so we need to skip those.
if self.args.single and symDim['width'] and symDim['height']:
# If we want to preserve that aspect ratio of the glyphs we need to
# find the largest possible scaling factor that will allow the glyph
# to fit in both the x and y directions
if symAttr['stretch'] == 'pa':
if scaleFactor and useScaleGlyph(symGlyph.unicode,
scaleGlyph['GlyphsToScale']):
# We want to preserve the relative size of each glyph to other glyphs
# in the same symbol font.
scaleRatioX = scaleFactor
scaleRatioY = scaleFactor
else:
# In this case, each glyph is sized independently to each other
scaleRatioX = self.getScaleFactor(symDim)
scaleRatioY = scaleRatioX
else:
if 'x' in symAttr['stretch']:
# Stretch the glyph horizontally to fit the entire | |
<filename>melodic/lib/python2.7/dist-packages/mavros_msgs/msg/_CommandCode.py
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/CommandCode.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class CommandCode(genpy.Message):
_md5sum = "9c980aa1230f756ac9d693ff35accb29"
_type = "mavros_msgs/CommandCode"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# MAV_CMD command codes.
# Actual meaning and params you may find in MAVLink documentation
# https://mavlink.io/en/messages/common.html#MAV_CMD
# [[[cog:
# from pymavlink.dialects.v20 import common
# from collections import OrderedDict
# import re
#
# def wr_enum(enum, ename, pfx='', bsz=16):
# cog.outl("# " + ename + "_" + pfx)
# for k, e in enum:
# # exclude also deprecated commands
# if 'MAV_CMD' + "_" + pfx in e.name and not re.search('deprecated', e.description, re.IGNORECASE):
# sn = e.name[len('MAV_CMD') + 1:]
# l = "uint{bsz} {sn} = {k}".format(**locals())
# if e.description:
# l += ' ' * (50 - len(l)) + ' # ' + e.description
# cog.outl(l)
# cog.out('\n')
#
# def decl_enum(ename):
# enum = sorted(common.enums[ename].items())
# enum.pop() # remove ENUM_END
#
# enumt = []
# # exception list of commands to not include
# exlist = ['SPATIAL', 'USER', 'WAYPOINT']
# for k, e in enum:
# enumt.extend(e.name[len(ename) + 1:].split('_')[0:1])
#
# enumt = sorted(set(enumt))
# enumt = [word for word in enumt if word not in exlist]
#
# for key in enumt:
# wr_enum(enum, ename, key)
#
# decl_enum('MAV_CMD')
# ]]]
# MAV_CMD_AIRFRAME
uint16 AIRFRAME_CONFIGURATION = 2520
# MAV_CMD_ARM
uint16 ARM_AUTHORIZATION_REQUEST = 3001 # Request authorization to arm the vehicle to a external entity, the arm authorizer is responsible to request all data that is needs from the vehicle before authorize or deny the request. If approved the progress of command_ack message should be set with period of time that this authorization is valid in seconds or in case it was denied it should be set with one of the reasons in ARM_AUTH_DENIED_REASON.
# MAV_CMD_COMPONENT
uint16 COMPONENT_ARM_DISARM = 400 # Arms / Disarms a component
# MAV_CMD_CONDITION
uint16 CONDITION_DELAY = 112 # Delay mission state machine.
uint16 CONDITION_CHANGE_ALT = 113 # Ascend/descend at rate. Delay mission state machine until desired altitude reached.
uint16 CONDITION_DISTANCE = 114 # Delay mission state machine until within desired distance of next NAV point.
uint16 CONDITION_YAW = 115 # Reach a certain target angle.
uint16 CONDITION_LAST = 159 # NOP - This command is only used to mark the upper limit of the CONDITION commands in the enumeration
# MAV_CMD_CONTROL
uint16 CONTROL_HIGH_LATENCY = 2600 # Request to start/stop transmitting over the high latency telemetry
# MAV_CMD_DO
uint16 DO_FOLLOW = 32 # Being following a target
uint16 DO_FOLLOW_REPOSITION = 33 # Reposition the MAV after a follow target command has been sent
uint16 DO_SET_MODE = 176 # Set system mode.
uint16 DO_JUMP = 177 # Jump to the desired command in the mission list. Repeat this action only the specified number of times
uint16 DO_CHANGE_SPEED = 178 # Change speed and/or throttle set points.
uint16 DO_SET_HOME = 179 # Changes the home location either to the current location or a specified location.
uint16 DO_SET_PARAMETER = 180 # Set a system parameter. Caution! Use of this command requires knowledge of the numeric enumeration value of the parameter.
uint16 DO_SET_RELAY = 181 # Set a relay to a condition.
uint16 DO_REPEAT_RELAY = 182 # Cycle a relay on and off for a desired number of cycles with a desired period.
uint16 DO_SET_SERVO = 183 # Set a servo to a desired PWM value.
uint16 DO_REPEAT_SERVO = 184 # Cycle a between its nominal setting and a desired PWM for a desired number of cycles with a desired period.
uint16 DO_FLIGHTTERMINATION = 185 # Terminate flight immediately
uint16 DO_CHANGE_ALTITUDE = 186 # Change altitude set point.
uint16 DO_LAND_START = 189 # Mission command to perform a landing. This is used as a marker in a mission to tell the autopilot where a sequence of mission items that represents a landing starts. It may also be sent via a COMMAND_LONG to trigger a landing, in which case the nearest (geographically) landing sequence in the mission will be used. The Latitude/Longitude is optional, and may be set to 0 if not needed. If specified then it will be used to help find the closest landing sequence.
uint16 DO_RALLY_LAND = 190 # Mission command to perform a landing from a rally point.
uint16 DO_GO_AROUND = 191 # Mission command to safely abort an autonomous landing.
uint16 DO_REPOSITION = 192 # Reposition the vehicle to a specific WGS84 global position.
uint16 DO_PAUSE_CONTINUE = 193 # If in a GPS controlled position mode, hold the current position or continue.
uint16 DO_SET_REVERSE = 194 # Set moving direction to forward or reverse.
uint16 DO_SET_ROI_LOCATION = 195 # Sets the region of interest (ROI) to a location. This can then be used by the vehicles control system to control the vehicle attitude and the attitude of various sensors such as cameras.
uint16 DO_SET_ROI_WPNEXT_OFFSET = 196 # Sets the region of interest (ROI) to be toward next waypoint, with optional pitch/roll/yaw offset. This can then be used by the vehicles control system to control the vehicle attitude and the attitude of various sensors such as cameras.
uint16 DO_SET_ROI_NONE = 197 # Cancels any previous ROI command returning the vehicle/sensors to default flight characteristics. This can then be used by the vehicles control system to control the vehicle attitude and the attitude of various sensors such as cameras.
uint16 DO_CONTROL_VIDEO = 200 # Control onboard camera system.
uint16 DO_SET_ROI = 201 # Sets the region of interest (ROI) for a sensor set or the vehicle itself. This can then be used by the vehicles control system to control the vehicle attitude and the attitude of various sensors such as cameras.
uint16 DO_DIGICAM_CONFIGURE = 202 # Configure digital camera. This is a fallback message for systems that have not yet implemented PARAM_EXT_XXX messages and camera definition files (see https://mavlink.io/en/services/camera_def.html ).
uint16 DO_DIGICAM_CONTROL = 203 # Control digital camera. This is a fallback message for systems that have not yet implemented PARAM_EXT_XXX messages and camera definition files (see https://mavlink.io/en/services/camera_def.html ).
uint16 DO_MOUNT_CONFIGURE = 204 # Mission command to configure a camera or antenna mount
uint16 DO_MOUNT_CONTROL = 205 # Mission command to control a camera or antenna mount
uint16 DO_SET_CAM_TRIGG_DIST = 206 # Mission command to set camera trigger distance for this flight. The camera is triggered each time this distance is exceeded. This command can also be used to set the shutter integration time for the camera.
uint16 DO_FENCE_ENABLE = 207 # Mission command to enable the geofence
uint16 DO_PARACHUTE = 208 # Mission command to trigger a parachute
uint16 DO_MOTOR_TEST = 209 # Mission command to perform motor test.
uint16 DO_INVERTED_FLIGHT = 210 # Change to/from inverted flight.
uint16 DO_SET_CAM_TRIGG_INTERVAL = 214 # Mission command to set camera trigger interval for this flight. If triggering is enabled, the camera is triggered each time this interval expires. This command can also be used to set the shutter integration time for the camera.
uint16 DO_MOUNT_CONTROL_QUAT = 220 # Mission command to control a camera or antenna mount, using a quaternion as reference.
uint16 DO_GUIDED_MASTER = 221 # set id of master controller
uint16 DO_GUIDED_LIMITS = 222 # Set limits for external control
uint16 DO_ENGINE_CONTROL = 223 # Control vehicle engine. This is interpreted by the vehicles engine controller to change the target engine state. It is intended for vehicles with internal combustion engines
uint16 DO_SET_MISSION_CURRENT = 224 # Set the mission item with sequence number seq as current item. This means that the MAV will continue to this mission item on the shortest path (not following the mission items in-between).
uint16 DO_LAST = 240 # NOP - This command is only used to mark the upper limit of the DO commands in the enumeration
uint16 DO_JUMP_TAG = 601 # Jump to the matching tag in the mission list. Repeat this action for the specified number of times. A mission should contain a single matching tag for each jump. If this is not the case then a jump to a missing tag should complete the mission, and a jump where there are multiple matching tags should always select the one with the lowest mission sequence number.
uint16 DO_TRIGGER_CONTROL = 2003 | |
= [
]
mock_do_request.side_effect = [
MockResponse(200, ""),
MockResponse(200)
]
expected = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=html&lang=en",
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "pdf",
"id": "en",
"present_in_html": [],
"absent_in_html": [
"/j/xjk/a/ldld?format=pdf&lang=en",
"/j/xjk/a/ldld?lang=en&format=pdf",
"/j/xjk/a/ldld?format=pdf",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=pdf&lang=en",
"/j/xjk/a/ldld/?lang=en&format=pdf",
"/j/xjk/a/ldld/?format=pdf",
"/j/xjk/a/ldld/?lang=en",
],
},
],
"total expected components": 1,
"total missing components": 1,
"pdf": {"total": 1, "missing": 1},
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
"existing_uri_items_in_html": [],
},
{
"lang": "en",
"format": "pdf",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=pdf&lang=en",
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
},
]
object_store_url = None
result, summary = check_document_webpages_availability(
website_url, doc_data_list, assets_data, object_store_url)
self.assertDictEqual({
"web html": {"total": 1, "total unavailable": 0, "total incomplete": 1},
"web pdf": {"total": 1, "total unavailable": 0},
},
summary
)
self.assertListEqual(expected, result)
@patch("operations.check_website_operations.do_request")
def test_check_document_webpages_availability_returns_html_es_is_not_available_although_it_is_present_in_html_en(self, mock_do_request):
website_url = "https://www.scielo.br"
doc_data_list = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=en",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
{
"lang": "es",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=es",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=es",
"/j/xjk/a/ldld?lang=es&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=es",
"/j/xjk/a/ldld/?format=html&lang=es",
"/j/xjk/a/ldld/?lang=es&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=es",
],
},
]
assets_data = [
]
mock_do_request.side_effect = [
MockResponse(200,
"""
Versão ingles no formato html
<a href="/j/xjk/a/ldld?format=html&lang=es"/>
"""
),
MockResponse(None)
]
expected = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=html&lang=en",
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "html",
"id": "es",
"present_in_html": [
"/j/xjk/a/ldld?format=html&lang=es",
],
},
],
"total missing components": 0,
"total expected components": 1,
"html": {"total": 1, "missing": 0},
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
},
{
"lang": "es",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=html&lang=es",
"available": False,
"status code": None,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"total missing components": 1,
"total expected components": 1,
},
]
object_store_url = None
result, summary = check_document_webpages_availability(
website_url, doc_data_list, assets_data, object_store_url)
self.assertDictEqual(
{
"web html": {
"total": 2, "total unavailable": 1, "total incomplete": 0},
},
summary
)
self.assertListEqual(expected, result)
@patch("operations.check_website_operations.do_request")
def test_check_document_webpages_availability_returns_html_es_is_available_although_it_is_not_present_in_html_en(self, mock_do_request):
website_url = "https://www.scielo.br"
doc_data_list = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=en",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
{
"lang": "es",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=es",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=es",
"/j/xjk/a/ldld?lang=es&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=es",
"/j/xjk/a/ldld/?format=html&lang=es",
"/j/xjk/a/ldld/?lang=es&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=es",
],
},
]
assets_data = [
]
mock_do_request.side_effect = [
MockResponse(
200, "documento sem links, conteúdo do html em Ingles"),
MockResponse(
200,
"""
conteúdo do documento em espanhol com link para a versão ingles
<a href="/j/xjk/a/ldld?format=html"/>
"""
),
]
expected = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=html&lang=en",
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "html",
"id": "es",
"present_in_html": [],
"absent_in_html": [
"/j/xjk/a/ldld?format=html&lang=es",
"/j/xjk/a/ldld?lang=es&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=es",
"/j/xjk/a/ldld/?format=html&lang=es",
"/j/xjk/a/ldld/?lang=es&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=es",
],
},
],
"total expected components": 1,
"total missing components": 1,
"html": {"total": 1, "missing": 1},
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
"existing_uri_items_in_html": []
},
{
"lang": "es",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "https://www.scielo.br/j/xjk/a/ldld?format=html&lang=es",
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "html",
"id": "en",
"present_in_html": [
"/j/xjk/a/ldld?format=html",
],
},
],
"total expected components": 1,
"total missing components": 0,
"html": {"total": 1, "missing": 0},
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
},
]
object_store_url = None
result, summary = check_document_webpages_availability(
website_url, doc_data_list, assets_data, object_store_url)
self.assertDictEqual(
{
"web html":
{"total": 2, "total unavailable": 0, "total incomplete": 1, },
},
summary
)
self.assertListEqual(expected, result)
class TestCheckDocumentHtml(TestCase):
@patch("operations.check_website_operations.datetime")
@patch("operations.check_website_operations.requests.get")
def test_check_document_html_returns_not_available(self, mock_get, mock_dt):
mock_get.return_value = None
mock_dt.utcnow.side_effect = [START_TIME, END_TIME]
uri = "https://..."
assets_data = []
other_webpages_data = []
expected = {
"available": False, "status code": None,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"total expected components": 0,
"total missing components": 0,
}
object_store_url = None
result = check_document_html(
uri, assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
@patch("operations.check_website_operations.datetime")
@patch("operations.check_website_operations.requests.get")
def test_check_document_html_returns_available_and_empty_components(self, mock_get, mock_dt):
mock_response = MockResponse(200, "")
mock_get.return_value = mock_response
mock_dt.utcnow.side_effect = [START_TIME, END_TIME]
uri = "https://..."
assets_data = []
other_webpages_data = []
expected = {
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [],
"total missing components": 0,
"total expected components": 0,
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
}
object_store_url = None
result = check_document_html(
uri, assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
@patch("operations.check_website_operations.datetime")
@patch("operations.check_website_operations.requests.get")
def test_check_document_html_returns_available_and_components_are_absent(self, mock_get, mock_dt):
mock_response = MockResponse(200, "")
mock_get.return_value = mock_response
mock_dt.utcnow.side_effect = [START_TIME, END_TIME]
uri = "https://..."
assets_data = [
{
"prefix": "asset_uri_1",
"uri_alternatives": [
"asset_uri_1.tiff", "asset_uri_1.jpg", "asset_uri_1.png"]
},
]
other_webpages_data = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=en",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
]
expected = {
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "asset",
"id": "asset_uri_1",
"present_in_html": [],
"absent_in_html": [
"asset_uri_1.tiff", "asset_uri_1.jpg",
"asset_uri_1.png"],
},
{
"type": "html",
"id": "en",
"present_in_html": [],
"absent_in_html": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
],
"total missing components": 2,
"total expected components": 2,
"html": {"total": 1, "missing": 1},
"assets": {
"total expected": 1,
"total missing": 1,
"total alternatives": 3,
"total alternatives present in html": 0,
},
"existing_uri_items_in_html": []
}
object_store_url = None
result = check_document_html(
uri, assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
@patch("operations.check_website_operations.datetime")
@patch("operations.check_website_operations.requests.get")
def test_check_document_html_returns_available_and_components_are_present(self, mock_get, mock_dt):
mock_response = MockResponse(200, "")
mock_response.text = """
<img src="asset_uri_1.jpg"/>
<a href="/j/xjk/a/ldld?lang=en"/>
"""
mock_get.return_value = mock_response
mock_dt.utcnow.side_effect = [START_TIME, END_TIME]
uri = "https://..."
assets_data = [
{
"prefix": "asset_uri_1",
"uri_alternatives": [
"asset_uri_1.tiff", "asset_uri_1.jpg", "asset_uri_1.png"]
},
]
other_webpages_data = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=en",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
]
expected = {
"available": True,
"status code": 200,
"start time": START_TIME,
"end time": END_TIME,
"duration": DURATION,
"components": [
{
"type": "asset",
"id": "asset_uri_1",
"present_in_html": ["asset_uri_1.jpg"],
"absent_in_html": [
"asset_uri_1.tiff", "asset_uri_1.png"
],
},
{
"type": "html",
"id": "en",
"present_in_html": [
"/j/xjk/a/ldld?lang=en",
],
},
],
"total missing components": 0,
"total expected components": 2,
"html": {"total": 1, "missing": 0},
"assets": {
"total expected": 1,
"total missing": 0,
"total alternatives": 3,
"total alternatives present in html": 1,
},
}
object_store_url = None
result = check_document_html(
uri, assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
class TestCheckDocumentHtmlContent(TestCase):
def test_check_document_html_content_returns_not_available(self):
assets_data = []
other_webpages_data = []
expected = {
"total expected components": 0,
"total missing components": 0,
}
object_store_url = None
result = check_document_html_content(
None, assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
def test_check_document_html_content_returns_available_and_empty_components(self):
assets_data = []
other_webpages_data = []
expected = {
"components": [],
"total missing components": 0,
"total expected components": 0,
"assets": {
"total expected": 0,
"total missing": 0,
"total alternatives": 0,
"total alternatives present in html": 0,
},
}
object_store_url = None
result = check_document_html_content(
"", assets_data, other_webpages_data, object_store_url)
self.assertEqual(expected, result)
def test_check_document_html_content_returns_available_and_components_are_absent(self):
assets_data = [
{
"prefix": "asset_uri_1",
"uri_alternatives": [
"asset_uri_1.tiff", "asset_uri_1.jpg", "asset_uri_1.png"]
},
]
other_webpages_data = [
{
"lang": "en",
"format": "html",
"pid_v2": "pid-v2", "acron": "xjk",
"doc_id_for_human": "artigo-1234",
"doc_id": "ldld",
"uri": "/j/xjk/a/ldld?format=html&lang=en",
"uri_alternatives": [
"/j/xjk/a/ldld?format=html&lang=en",
"/j/xjk/a/ldld?lang=en&format=html",
"/j/xjk/a/ldld?format=html",
"/j/xjk/a/ldld?lang=en",
"/j/xjk/a/ldld/?format=html&lang=en",
"/j/xjk/a/ldld/?lang=en&format=html",
"/j/xjk/a/ldld/?format=html",
"/j/xjk/a/ldld/?lang=en",
],
},
]
expected = {
"components": [
{
"type": | |
<gh_stars>0
"""
This module handles conversion of POP files to JSON files in ESPEI format
"""
from sympy.parsing.sympy_parser import parse_expr
from pyparsing import *
from pop_keywords import expand_keyword, POP_COMMANDS
print("""WARNING! This module is VERY experimental. You will most likely get failures or incorrect answers.
You should check all of your data instead of assuming it is correct.
Please report any errors so that this module can be improved.""")
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
class ExperimentSet(dict):
"""
Experiment set, which is a stored as a dictionary.
The reason for the subclass is to store metadata about the experiment set while it is being
constructed by parsing. Once it is constructed, there is no use for having a class because the
data will be fully populated as a normal dictionary.
"""
def update(self, E=None, **F):
"""
Overrides dict update to return self (for lambda functions)
"""
super(ExperimentSet, self).update(E, **F)
return self
class POPCommand(CaselessKeyword):
"""
Parser element for dealing with POP command abbreviations.
"""
def parseImpl(self, instring, loc, doActions=True):
# Find the end of the keyword by searching for an end character
# TODO: how much of this do I need?
start = loc
endchars = ' ():,'
loc = -1
for charx in endchars:
locx = instring.find(charx, start)
if locx != -1:
# match the end-character closest to the start character
if loc != -1:
loc = min(loc, locx)
else:
loc = locx
# if no end character found, just match the whole thing
if loc == -1:
loc = len(instring)
try:
res = expand_keyword([self.match], instring[start:loc])
if len(res) > 1:
self.errmsg = '{0!r} is ambiguous: matches {1}' \
.format(instring[start:loc], res)
raise ParseException(instring, loc, self.errmsg, self)
# res[0] is the unambiguous expanded keyword
# in principle, res[0] == self.match
return loc, res[0]
except ValueError:
pass
raise ParseException(instring, loc, self.errmsg, self)
def _pop_grammar():
"""
Returns the pyparsing grammar for a POP file.
"""
# pyparsing "constants"
sCOMMA = Suppress(',') # supresses
int_number = Word(nums).setParseAction(lambda t: [int(t[0])])
# matching float w/ regex is ugly but is recommended by pyparsing
float_number = (Regex(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?') | '0') \
.setParseAction(lambda t: [float(t[0])])
# symbol name, e.g., phase name or equilibrium name
symbol_name = Word(alphanums + '_:', min=1)
equalities = Word('=') ^ Word('<') ^ Word('>')
pm = oneOf('+ -')
label = Word('@' + nums)
value = (float_number | int_number | label | symbol_name)
const = Group(symbol_name + equalities + value)
phases = Group(OneOrMore(symbol_name + Optional(sCOMMA))) | '*'
prop_prefix = symbol_name + Suppress('(') + phases + Suppress(')')
property = Group(prop_prefix + equalities + value)
expr = Group(OneOrMore(prop_prefix | float_number | oneOf('. + - / * ( )') | symbol_name))
arith_cond = Group(OneOrMore(Optional(Word(nums) + '*') + prop_prefix + Optional(
pm)) + equalities + value) # an arithmetic matcher for complex conditions
error = Group(Suppress(':') + float_number + Optional('%'))
sERROR = Suppress(error)
cmd_equilibrium = POPCommand('CREATE_NEW_EQUILIBRIUM') + (Word('@@,') ^ int_number) + Optional(
sCOMMA) + int_number
# TODO: implement changing status of other things
phase_statuses = ((POPCommand('FIXED') ^ POPCommand('ENTERED')) + float_number) | POPCommand(
'DORMANT') | POPCommand('SUSPENDED')
cmd_change_status = POPCommand('CHANGE_STATUS') + POPCommand('PHASE') + phases + Suppress(
'=') + Group(phase_statuses)
enter_const = POPCommand('CONSTANT') + Group(OneOrMore(const))
enter_func_var = (POPCommand('FUNCTION') | POPCommand('VARIABLE')) + Group(symbol_name + '=' + expr)
enter_table = POPCommand('TABLE') # TODO: implement
cmd_en_symbol = POPCommand('ENTER_SYMBOL') + (enter_const | enter_func_var | enter_table)
cmd_table_head = POPCommand('TABLE_HEAD') + int_number
cmd_table_values = POPCommand('TABLE_VALUES') + sCOMMA + Group(
delimitedList(Group(OneOrMore(float_number)))) + Suppress(POPCommand('TABLE_END'))
cmd_set_ref_state = POPCommand('SET_REFERENCE_STATE') + symbol_name + Optional(sCOMMA) + symbol_name + Optional(
OneOrMore(sCOMMA)) # TODO: should these default values be handled?
cmd_set_condition = POPCommand('SET_CONDITION') + OneOrMore(
(arith_cond | property | const) + Optional(sERROR) + Optional(sCOMMA))
cmd_label = POPCommand('LABEL_DATA') + OneOrMore(Word(alphanums))
cmd_alternate = POPCommand('SET_ALTERNATE_CONDITION') + OneOrMore(
Group((property | const) + sERROR) + Optional(sCOMMA))
cmd_experiment_phase = POPCommand('EXPERIMENT') + OneOrMore(
Group((property | const) + sERROR) + Optional(sCOMMA))
cmd_start_value = POPCommand('SET_START_VALUE') + OneOrMore(
(arith_cond | property | const) + Optional(sERROR) + Optional(sCOMMA))
cmd_save = POPCommand('SAVE_WORKSPACES')
return (
cmd_equilibrium | cmd_change_status | cmd_en_symbol | cmd_table_head | cmd_table_values |
cmd_set_ref_state | cmd_set_condition | cmd_label | cmd_alternate |
cmd_experiment_phase | cmd_start_value | cmd_save) + Optional(
Suppress(';')) + stringEnd
def unpack_parse_results(parse_results):
"""
Recursively unpacks parse results and return the unpacked result.
Args:
parse_results (ParseResults): a tuple of a list of values and dict of names. The list may contain nested ParseResults
Returns:
list: List of the intended structure
"""
results_list = []
for result in parse_results:
if isinstance(result, ParseResults):
results_list.append(unpack_parse_results(result))
else:
results_list.append(result)
return results_list
def _unimplemented(*args, **kwargs):
"""
Wrapper to raise NotImplementedError
This should be used when a command that affects the data is unimplmented. If the command does not
affect the data, then `_pass` should be used.
"""
raise NotImplementedError
def _pass(*args, **kwargs):
"""
Pass is intended for POP commands that do not impact the data
Commands that do impact the data should use `_unimplemented`
"""
return args[0] # return the experiment unchanged
def _new_equilibrium(_, name, code):
"""
Args:
_ (ExperimentSet): The old experiment set. Will not be used.
name (str): The name code given to the new equilibrium. Unused.
code (The ): The initilization code for the equilibrium
Returns:
Possible initialization codes:
0: suspend all phases and components
1: enter all components only
2: enter all
"""
if code == 1:
return ExperimentSet()
else:
_unimplemented()
def _process_phases(exp, status_type, phases, status):
"""
Get the phases from change status
Args:
exp (ExperimentSet): The current experiment set
status_type (str): What to change the status of e.g. a PHASE
phases ([str]): A list of phase names
status ([str, int?]): A string denoting the status with an optional int for the numerical representation
Returns:
"""
if status_type != 'PHASE': _unimplemented()
# TODO: fixed vs entered in implementation?
if status[0] == 'FIXED' or status[0] == 'ENTERED' or status[0][:3] == 'DOR':
existing_phases = exp.get("phases", {})
if status[0][:3] == 'DOR':
status = status.asList()
status.append("DORMANT")
exp["phases"] = {phase: status[1] for phase in phases}
for existing_phase in existing_phases:
exp["phases"][existing_phase] = existing_phases[existing_phase]
elif status[0] == 'SUSPENDED':
pass
return exp
def construct_symbol(symbol_list):
"""
Get a SymPy representation from brute force concatenating and parsing
Args:
symbol_list ([str*, float*, int*]): Strings, floats and/or ints describing the symbol symbolically
Returns:
Expr: A SymPy expression constructed from the SymPy parser
"""
symbol_string = ''
for s in symbol_list:
if s == '.':
s = '*d' # handle derivatives. TODO: improve derivative handling
elif '@' in str(s):
# print(type(s))
s = str(s).replace('@', 'col') # replace column reference, '@' with 'col'
# print(str(s))
if isinstance(s, ParseResults):
s = unpack_parse_results(s)
new_s = '_'
for sub_s in s:
new_s = ''.join([new_s, sub_s])
s = new_s
symbol_string = ''.join([symbol_string,str(s)])
expr = parse_expr(symbol_string)
return expr
def _process_symbols(exp, symbol_type, symbols):
"""
Args:
exp (ExperimentSet): The current experiment set
symbol_type (str): CONSTANT, FUNCTION, or TABLE
symbols ([[str]]): List of symbol names and values usually ["SYM","=","VALUE"]
Returns:
ExperimentSet: the passed experiment set object
"""
if symbol_type == 'CONSTANT':
exp_symbols = exp.get('symbols', {})
for symbol in symbols:
# we are going to assume that the first value in an array is the symbol name,
# the second is '=' and the remaining are symbolic and can be constructed with SymPy
exp_symbols[symbol[0]] = construct_symbol(symbol[2:])
exp["symbols"] = exp_symbols
elif symbol_type == 'FUNCTION':
exp_symbols = exp.get('symbols', {})
# we are going to assume that the first value in an array is the symbol name,
# the second is '=' and the remaining are symbolic and can be constructed with SymPy
exp_symbols[symbols[0]] = construct_symbol(symbols[2])
exp["symbols"] = exp_symbols
elif symbol_type == 'TABLE':
raise NotImplementedError
return exp
def _process_experiment(exp, experiments):
exp_experiments = exp.get('experiments', [])
for experiment in experiments:
d = {}
d["property"] = experiment[0]
# print(experiment)
# directly create a symbolic equation with all of the
if isinstance(experiment[1], ParseResults):
# assume the format prop(phase) (=/>/<) symbol is followed
d["phases"] = experiment[1]
d["equality"] = experiment[2]
d["symbol_repr"] = construct_symbol(experiment[3:])
else:
# assume prop (=/>/<) symbol
d["equality"] = experiment[1]
d["symbol_repr"] = construct_symbol(experiment[2:])
exp_experiments.append(d)
exp["experiments"] = exp_experiments
return exp
def _process_condition(exp, *conditions):
exp["conditions"] = exp.get('conditions', [])
for cond in conditions:
d = {}
d["property"] = cond[0]
if isinstance(cond[1], ParseResults):
# assume the format prop(phase) (=/>/<) | |
= SymmetricFunctions(QQ)
sage: e = NCSym.e()
sage: elem = Sym.e()
sage: elt = e.from_symmetric_function(elem[2,1,1]); elt
1/12*e{{1}, {2}, {3, 4}} + 1/12*e{{1}, {2, 3}, {4}} + 1/12*e{{1}, {2, 4}, {3}}
+ 1/12*e{{1, 2}, {3}, {4}} + 1/12*e{{1, 3}, {2}, {4}} + 1/12*e{{1, 4}, {2}, {3}}
sage: elem(elt.to_symmetric_function())
e[2, 1, 1]
sage: e.from_symmetric_function(elem[4])
1/24*e{{1, 2, 3, 4}}
sage: p = NCSym.p()
sage: pow = Sym.p()
sage: elt = p.from_symmetric_function(pow[2,1,1]); elt
1/6*p{{1}, {2}, {3, 4}} + 1/6*p{{1}, {2, 3}, {4}} + 1/6*p{{1}, {2, 4}, {3}}
+ 1/6*p{{1, 2}, {3}, {4}} + 1/6*p{{1, 3}, {2}, {4}} + 1/6*p{{1, 4}, {2}, {3}}
sage: pow(elt.to_symmetric_function())
p[2, 1, 1]
sage: p.from_symmetric_function(pow[4])
p{{1, 2, 3, 4}}
sage: h = NCSym.h()
sage: comp = Sym.complete()
sage: elt = h.from_symmetric_function(comp[2,1,1]); elt
1/12*h{{1}, {2}, {3, 4}} + 1/12*h{{1}, {2, 3}, {4}} + 1/12*h{{1}, {2, 4}, {3}}
+ 1/12*h{{1, 2}, {3}, {4}} + 1/12*h{{1, 3}, {2}, {4}} + 1/12*h{{1, 4}, {2}, {3}}
sage: comp(elt.to_symmetric_function())
h[2, 1, 1]
sage: h.from_symmetric_function(comp[4])
1/24*h{{1, 2, 3, 4}}
"""
m = self.realization_of().m()
return self(m.from_symmetric_function(f))
def primitive(self, A, i=1):
r"""
Return the primitive associated to ``A`` in ``self``.
.. SEEALSO::
:meth:`~sage.combinat.ncsym.ncsym.SymmetricFunctionsNonCommutingVariables.powersum.primitive`
INPUT:
- ``A`` -- a set partition
- ``i`` -- a positive integer
OUTPUT:
- an element of ``self``
EXAMPLES::
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: elt = e.primitive(SetPartition([[1,3],[2]])); elt
e{{1, 2}, {3}} - e{{1, 3}, {2}}
sage: elt.coproduct()
e{} # e{{1, 2}, {3}} - e{} # e{{1, 3}, {2}} + e{{1, 2}, {3}} # e{} - e{{1, 3}, {2}} # e{}
"""
p = self.realization_of().p()
return self(p.primitive(A, i))
@abstract_method(optional = True)
def internal_coproduct_on_basis(self, i):
"""
The internal coproduct of the algebra on the basis (optional).
INPUT:
- ``i`` -- the indices of an element of the basis of ``self``
OUTPUT:
- an element of the tensor squared of ``self``
EXAMPLES::
sage: m = SymmetricFunctionsNonCommutingVariables(QQ).m()
sage: m.internal_coproduct_on_basis(SetPartition([[1,2]]))
m{{1, 2}} # m{{1, 2}}
"""
@lazy_attribute
def internal_coproduct(self):
"""
Compute the internal coproduct of ``self``.
If :meth:`internal_coproduct_on_basis()` is available, construct
the internal coproduct morphism from ``self`` to ``self``
`\otimes` ``self`` by extending it by linearity. Otherwise, this uses
:meth:`internal_coproduct_by_coercion()`, if available.
OUTPUT:
- an element of the tensor squared of ``self``
EXAMPLES::
sage: cp = SymmetricFunctionsNonCommutingVariables(QQ).cp()
sage: cp.internal_coproduct(cp[[1,3],[2]] - 2*cp[[1]])
-2*cp{{1}} # cp{{1}} + cp{{1, 2, 3}} # cp{{1, 3}, {2}} + cp{{1, 3}, {2}} # cp{{1, 2, 3}}
+ cp{{1, 3}, {2}} # cp{{1, 3}, {2}}
"""
if self.internal_coproduct_on_basis is not NotImplemented:
return Hom(self, tensor([self, self]),
ModulesWithBasis(self.base_ring()))(on_basis=self.internal_coproduct_on_basis)
elif hasattr(self, "internal_coproduct_by_coercion"):
return self.internal_coproduct_by_coercion
def internal_coproduct_by_coercion(self, x):
r"""
Return the internal coproduct by coercing the element to the powersum basis.
INPUT:
- ``x`` -- an element of ``self``
OUTPUT:
- an element of the tensor squared of ``self``
EXAMPLES::
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: h[[1,3],[2]].internal_coproduct() # indirect doctest
2*h{{1}, {2}, {3}} # h{{1}, {2}, {3}} - h{{1}, {2}, {3}} # h{{1, 3}, {2}}
- h{{1, 3}, {2}} # h{{1}, {2}, {3}} + h{{1, 3}, {2}} # h{{1, 3}, {2}}
"""
R = self.realization_of().a_realization()
return self.tensor_square().sum(coeff * tensor([self(R[A]), self(R[B])])
for ((A, B), coeff) in R(x).internal_coproduct())
class ElementMethods:
def expand(self, n, alphabet='x'):
r"""
Expand the symmetric function into ``n`` non-commuting
variables in an alphabet, which by default is ``'x'``.
This computation is completed by coercing the element ``self``
into the monomial basis and computing the expansion in
the ``alphabet`` there.
INPUT:
- ``n`` -- the number of variables in the expansion
- ``alphabet`` -- (default: ``'x'``) the alphabet in which
``self`` is to be expanded
OUTPUT:
- an expansion of ``self`` into the ``n`` non-commuting
variables specified by ``alphabet``
EXAMPLES::
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: h[[1,3],[2]].expand(3)
2*x0^3 + x0^2*x1 + x0^2*x2 + 2*x0*x1*x0 + x0*x1^2 + x0*x1*x2 + 2*x0*x2*x0
+ x0*x2*x1 + x0*x2^2 + x1*x0^2 + 2*x1*x0*x1 + x1*x0*x2 + x1^2*x0 + 2*x1^3
+ x1^2*x2 + x1*x2*x0 + 2*x1*x2*x1 + x1*x2^2 + x2*x0^2 + x2*x0*x1 + 2*x2*x0*x2
+ x2*x1*x0 + x2*x1^2 + 2*x2*x1*x2 + x2^2*x0 + x2^2*x1 + 2*x2^3
sage: x = SymmetricFunctionsNonCommutingVariables(QQ).x()
sage: x[[1,3],[2]].expand(3)
-x0^2*x1 - x0^2*x2 - x0*x1^2 - x0*x1*x2 - x0*x2*x1 - x0*x2^2 - x1*x0^2
- x1*x0*x2 - x1^2*x0 - x1^2*x2 - x1*x2*x0 - x1*x2^2 - x2*x0^2 - x2*x0*x1
- x2*x1*x0 - x2*x1^2 - x2^2*x0 - x2^2*x1
"""
m = self.parent().realization_of().monomial()
return m(self).expand(n, alphabet)
def to_symmetric_function(self):
r"""
Compute the projection of an element of symmetric function in
non-commuting variables to the symmetric functions.
The projection of a monomial symmetric function in non-commuting
variables indexed by the set partition ``A`` is defined as
.. MATH::
\mathbf{m}_A \mapsto m_{\lambda(A)} \prod_i n_i(\lambda(A))!
where `\lambda(A)` is the partition associated with `A` by
taking the sizes of the parts and `n_i(\mu)` is the
multiplicity of `i` in `\mu`. For other bases this map is extended
linearly.
OUTPUT:
- an element of the symmetric functions in the monomial basis
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: e = NCSym.e()
sage: h = NCSym.h()
sage: p = NCSym.p()
sage: cp = NCSym.cp()
sage: x = NCSym.x()
sage: cp[[1,3],[2]].to_symmetric_function()
m[2, 1]
sage: x[[1,3],[2]].to_symmetric_function()
-6*m[1, 1, 1] - 2*m[2, 1]
sage: e[[1,3],[2]].to_symmetric_function()
2*e[2, 1]
sage: h[[1,3],[2]].to_symmetric_function()
2*h[2, 1]
sage: p[[1,3],[2]].to_symmetric_function()
p[2, 1]
"""
m = self.parent().realization_of().monomial()
return m(self).to_symmetric_function()
def internal_coproduct(self):
"""
Return the internal coproduct of ``self``.
The internal coproduct is defined on the power sum basis as
.. MATH::
\mathbf{p}_A \mapsto \mathbf{p}_A \otimes \mathbf{p}_A
and the map is extended linearly.
OUTPUT:
- an element of the tensor square of the basis of ``self``
EXAMPLES::
sage: x = SymmetricFunctionsNonCommutingVariables(QQ).x()
sage: x[[1,3],[2]].internal_coproduct()
x{{1}, {2}, {3}} # x{{1, 3}, {2}} + x{{1, 3}, {2}} # x{{1}, {2}, {3}}
+ x{{1, 3}, {2}} # x{{1, 3}, {2}}
"""
return self.parent().internal_coproduct(self)
def omega(self):
"""
Return the involution `\omega` applied to ``self``.
The involution `\omega` is defined by
.. MATH::
\mathbf{e}_A \mapsto \mathbf{h}_A
and the result is extended linearly.
OUTPUT:
- an element in the same basis as ``self``
EXAMPLES::
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: m = NCSym.m()
sage: m[[1,3],[2]].omega()
-2*m{{1, 2, 3}} - m{{1, 3}, {2}}
sage: p = NCSym.p()
sage: p[[1,3],[2]].omega()
-p{{1, 3}, {2}}
sage: cp = NCSym.cp()
sage: cp[[1,3],[2]].omega()
-2*cp{{1, 2, 3}} - cp{{1, 3}, {2}}
sage: x = NCSym.x()
sage: x[[1,3],[2]].omega()
-2*x{{1}, {2}, {3}} - x{{1, 3}, {2}}
"""
P = self.parent()
e = P.realization_of().e()
h = P.realization_of().h()
return P(h.sum_of_terms(e(self)))
class MultiplicativeNCSymBases(Category_realization_of_parent):
r"""
Category of multiplicative bases of symmetric functions in non-commuting variables.
A multiplicative basis is one for which `\mathbf{b}_A \mathbf{b}_B = \mathbf{b}_{A|B}`
where `A|B` is the :meth:`~sage.combinat.set_partition.SetPartition.pipe` operation
on set partitions.
EXAMPLES::
sage: from sage.combinat.ncsym.bases import MultiplicativeNCSymBases
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: MultiplicativeNCSymBases(NCSym)
Category of multiplicative bases of symmetric functions in non-commuting variables over the Rational Field
"""
def super_categories(self):
r"""
Return the super categories of bases of the Hopf dual of the
symmetric functions in non-commuting variables.
OUTPUT:
- a list of categories
TESTS::
sage: from sage.combinat.ncsym.bases import MultiplicativeNCSymBases
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: MultiplicativeNCSymBases(NCSym).super_categories()
[Category of bases of symmetric functions in non-commuting variables over the Rational Field]
"""
return [NCSymBases(self.base())]
def _repr_(self):
r"""
Return a string representation of ``self``.
TESTS::
sage: from sage.combinat.ncsym.bases import MultiplicativeNCSymBases
sage: NCSym = SymmetricFunctionsNonCommutingVariables(QQ)
sage: MultiplicativeNCSymBases(NCSym)
Category of multiplicative bases of symmetric functions in non-commuting variables over the Rational Field
"""
return "Category of multiplicative bases of symmetric functions in non-commuting"\
" variables over the {}".format(self.base().base_ring())
class ParentMethods:
def product_on_basis(self, A, B):
r"""
The product on basis elements.
The product on a multiplicative basis is given by
`\mathbf{b}_A \cdot \mathbf{b}_B = \mathbf{b}_{A | B}`.
The bases `\{ \mathbf{e}, \mathbf{h}, \mathbf{x}, \mathbf{cp}, \mathbf{p},
\mathbf{chi}, \mathbf{rho} \}` are all multiplicative.
INPUT:
- ``A``, ``B`` -- set partitions
OUTPUT:
- an element in the basis ``self``
EXAMPLES::
sage: e = SymmetricFunctionsNonCommutingVariables(QQ).e()
sage: h = SymmetricFunctionsNonCommutingVariables(QQ).h()
sage: x = SymmetricFunctionsNonCommutingVariables(QQ).x()
sage: cp = SymmetricFunctionsNonCommutingVariables(QQ).cp()
sage: p = SymmetricFunctionsNonCommutingVariables(QQ).p()
sage: chi = SymmetricFunctionsNonCommutingVariables(QQ).chi()
sage: rho = SymmetricFunctionsNonCommutingVariables(QQ).rho()
sage: A = SetPartition([[1], [2, 3]])
sage: B = SetPartition([[1], [3], [2,4]])
sage: e.product_on_basis(A, B)
e{{1}, {2, 3}, {4}, {5, 7}, | |
if len(list_sentences) > 1:
split_match = TMSplitMatch([TMUtilsMatching.pre_process(q.split(' '), self.src_lang, 'untokenizer', {}) for q in list_sentences], [], self.src_lang, self.tgt_lang, 'sentence', self.machine_translation, self.domain)
src_text, tgt_text, editSplit = split_match._match()
#print('*****Only sentences *****')
#print(src_text)
#print(tgt_text)
#print(editSplit)
if editSplit >= self.min_match: # Check if split method return segments from ActivaTM
segment.source_text, segment.target_text, editD = src_text, tgt_text, editSplit
else: # Split in small phrase
# Check if exist split for an especific pairs of languages
lang_class = G_CONFIG.get_split_rules(self.src_lang, self.tgt_lang)
if lang_class:
logging.info("Split Query by Phrase")
all_split, all_marks = self._splitByPhrase(lang_class, list_sentences)
# Check if any split rule was applied
if len(all_split) > 1:
# print(list_query_split)
split_match = TMSplitMatch(all_split, all_marks, self.src_lang, self.tgt_lang, 'phrase', self.machine_translation, self.domain)
src_text, tgt_text, editSplit = split_match._match()
if editSplit >= self.min_match: #Check if split method return segments from ActivaTM
segment.source_text, segment.target_text, editD = src_text, tgt_text, editSplit
if editD >= self.min_match:
status = 'find'
status_tokenizer = True
else:
if not self.trans_segments: #If doesn't found any match, prepare segment to automatic translation. If there aren't automatic translation, then return []
#logging.info("Prepare Automatic Translation : ")
self.trans_segments.append((segment, editD))
status = 'break' # If exist segment on the list, break the for and there aren't translation
return segment, editD, status, equal, status_tokenizer
# Send and input sentence into subsegments usng rules based on posTag annotation
def _splitByPhrase(self, lang_class, list_sentences):
splitTask = TMMatching.split_source(lang_class, self.src_lang) # class with the rule for specific language and language
list_word_pos = []
if 'pos' in self.query_dic:
i = 0
for each_sent in list_sentences:
# Create word_pos
len_e = len(each_sent.split())
list_word_pos.append([(w, p) for w, p in zip(each_sent.split(), self.query_dic['pos'].split()[i:i + len_e])])
i = i + len_e
# TODO: Call another method to applied other rules that don't need posTag anotation --> ELSE STATEMENT
'''
if list_sentences:
i = 0
for each_sent in list_sentences:
# Create word_pos
len_e = len(each_sent.split())
list_word_pos.append([(w, p) for w, p in zip(each_sent.split(), self.query_dic['pos'].split()[i:i + len_e])])
i = i + len_e
else:
if 'pos' in self.query_dic: list_word_pos.append([(w, p) for w, p in zip(self.query_dic['tokenizer'].split(), self.query_dic['pos'].split())])
'''
all_split = []
all_marks = []
for sentence in list_word_pos:
segmentsStructure = splitTask.clause_chunk(sentence) # preProcess.split_process(p_segments)
logging.info("split INFO : {} ".format(segmentsStructure))
# print(segmentsStructure)
list_query_split, list_marks_split = splitTask.split_output(segmentsStructure)
if len(list_query_split) > 1:
for e_part in list_query_split:
all_split.append(e_part)
if list_marks_split:
all_marks.append(list_marks_split.pop(0))
else:
all_split.append(sentence)
all_marks.append([])
logging.info("split Output : {} ".format(all_split))
logging.info("split Sequences : {} ".format(all_marks))
return all_split, all_marks
#*********General Functions***********
def _deals_output(self, segment, editD, trans_segments, status_tokenizer, status):
if self.out == 'moses': # Moses output is tokenizer
if status_tokenizer == False:# tokenize output
segment.source_text = TMUtilsMatching.pre_process(segment.source_text, self.src_lang, 'tokenizer', {})
segment.target_text = TMUtilsMatching.pre_process(segment.target_text, self.tgt_lang, 'tokenizer', {})
trans_segments.append((segment, editD))
return trans_segments, 'break'
else:
if status_tokenizer == True: # TM output is untokenizer
segment.target_text = TMUtilsMatching.pre_process(segment.target_text.split(' '), self.tgt_lang, 'untokenizer', {})
segment.source_text = TMUtilsMatching.pre_process(segment.source_text.split(' '), self.src_lang, 'untokenizer', {})
trans_segments.append((segment, editD))
if status == 'translate': status = 'break'
else: status = 'continue'
#if editD == 100: # Add this if to obtain better matching time
# status = 'break'
logging.info("Final Output (Query -- Source -- Target): {} {} {}".format(safe_str(self.query_dic['query'] + ' -- '), safe_str(segment.source_text + ' -- '), safe_str(segment.target_text)))
return trans_segments, status
def style_string(self, src_text, tgt_text, status_tokenizer):
#Check upper and lower case
if src_text and tgt_text:
src_text, tgt_text = self._transform_case(src_text, tgt_text)
# Transfer XML tags (if needed)
self.timer.start("transfer_tags")
if re.search("</?[^<>]+/?>", self.query) is not None: # transfer tags only if query has and tgt and src don't
status_tokenizer = True
if (re.search("</?[^<>]+/?>", src_text) is None):
src_text = TMUtilsMatching.transfer_tags(self.query, src_text, (self.src_lang, self.tgt_lang))
if (re.search("</?[^<>]+/?>", tgt_text) is None):
tgt_text = TMUtilsMatching.transfer_tags(self.query, tgt_text, (self.src_lang, self.tgt_lang))
self.timer.stop("transfer_tags")
return src_text, tgt_text, status_tokenizer
def _transform_case(self, src_text, tgt_text):
#All cases (first word phrase; all first word and all upper all lower)
if self.query.istitle(): # All the first words are upper
src_text = src_text.title()
tgt_text = tgt_text.title()
else:
if self.query[0].istitle(): #Only the first word is upper
src_text = src_text[0].upper() + src_text[1:]
tgt_text = tgt_text[0].upper() + tgt_text[1:]
if self.query.isupper(): # All in upper case
src_text = src_text.upper()
tgt_text = tgt_text.upper()
if self.query.islower(): # All in lower case
src_text = src_text.lower()
tgt_text = tgt_text.lower()
return src_text, tgt_text
def _preprocess(self):
self.query_dic['query'] = self.query
if re.search("<.*>", self.query): # Uniform tags --> # Yo tengo un <b>gato</b>. --> Yo tengo un <T1>gato</T1>
self.query_dic['query_tags'] = TMUtilsMatching.pre_process(self.query, (self.src_lang, self.tgt_lang), 'tags', {})
self.query_dic['query'] = self.query_dic['query_tags'] # query now have the tags <T1>gato</T1>
if 'regex' in self.pipe: self.query_dic['query_re'] = TMUtilsMatching.pre_process(self.query_dic['query'], self.src_lang, 'reg_exp', self.match['regex'].re_pp)
else: self.query_dic['query_re'] = self.query_dic['query']
self.query_dic['query_re_reduce'] = TMRegexMatch.simplified_name(self.query_dic['query_re'])
return self.query_dic
# Good explanation about editdistance --> http://stackoverflow.com/questions/10405440/percentage-rank-of-matches-using-levenshtein-distance-matching
# http://math.stackexchange.com/questions/1776860/convert-levenshtein-distance-to-percents
# Ways to estimate the match percent --> https://www.tm-town.com/blog/the-fuzziness-of-fuzzy-matches
def _tm_edit_distance(self, q_text, s_text, q_simplified, s_simplified):
# Corner case - matching artificial empty segment -> giving minimal score
if q_text and not s_text.strip():
return 1
#Always reduce the tags to count only one element
'''
print('**original**')
print(q_text)
print('**src**')
print(s_text)
print('**originalS**')
print(q_simplified)
print('**srcS**')
print(s_simplified)
'''
# 1) ********** Obtain words and stop words sequences
q_onlyW, q_st_word = TMMatching._only_word_sequence(q_text, self.src_lang)
s_onlyW, s_st_word = TMMatching._only_word_sequence(s_text, self.src_lang)
'''
print(q_onlyW)
print(s_onlyW)
print(q_st_word)
print(s_st_word)
'''
if not q_onlyW and not q_st_word:
#print(self.src_lang)
#if self.src_lang=='zh':
editD = 100 - (TMUtilsMatching._edit_distance(q_text, s_text)) #* 100
else:
# Normal editDistance, without puntuation marks and only word, without stop words
nchar_diff = TMUtilsMatching._edit_distance(' '.join(q_onlyW), ' '.join(s_onlyW)) # Consider all the words, without any substitution
#print(q_onlyW)
#print(s_onlyW)
nchar_len = len(' '.join(q_onlyW)) + len(' '.join(s_onlyW))
if nchar_len == 0: nchar_len = 1
#print(nchar_len)
char_diff = (2*nchar_diff)/(nchar_len) # total of charaters
# 2) ********* Simplified --> Convert to letter and keep only puntuation marks
q_replaceW, q_onlyS = TMMatching._symbol_sequence(q_simplified) # Original query
# Ex. '- 3.67 housing units constructed under the $ # home % ownership saving scheme in the Hanano/ and (Hamdaniya districts;' --> - N N N N N N $ # N % N N N N N N/ N (N N;
s_replaceW, s_onlyS = TMMatching._symbol_sequence(s_simplified) # Original tm_src
if (len(s_onlyS) == 0 and len(q_onlyS) == 0): # There are not symbol
n_symbol_diff = 0
else:
n_symbol_diff = TMUtilsMatching._edit_distance(q_replaceW, s_replaceW) #(' '.join(q_onlyS), ' '.join(s_onlyS))/2#
len_symbols = len(q_replaceW.split(' ')) + len(q_replaceW.split(' ')) # len(q_onlyS) + len(s_onlyS)
if len_symbols == 0: len_symbols = 1
symbol_diff = (2*n_symbol_diff)/len_symbols
# 3) ********* Exist or not exist the query words on source
nword_diff = set(q_onlyW).difference(s_onlyW) # Replace regular expression by only one word
onlyW_len = len(q_onlyW)
if onlyW_len == 0: onlyW_len = 1
word_diff = (len(nword_diff))/onlyW_len # only query words
# 4) ********* Stop words
stop_words = True
if (len(q_st_word) == 0 and len(s_st_word) == 0): # There are not stop word or this language doesn't have stop words list
stop_words = False
if stop_words:
n_st_diff = TMUtilsMatching._edit_distance(' '.join(q_st_word), ' '.join(s_st_word))
len_stop_word = len(' '.join(q_st_word)) + len(' '.join(s_st_word))
stop_word_diff = (2 * n_st_diff)/len_stop_word
editD = (1 - ((0.70 * (char_diff)) + (0.10 * (word_diff)) + (0.10 * (symbol_diff)) + (0.10 * (stop_word_diff)))) * 100
else:
editD = (1 - ((0.70 * (char_diff)) + (0.15 * (word_diff)) + (0.15 * (symbol_diff)))) * 100
if editD < 0:
editD = 10
return int(math.floor(editD))
# Match Kanji --> u'[\u4E00-\u9FFF]+'
# Match Hiragana --> u'[\u3040-\u309Fー]+'
# Match Katakana --> u'[\u30A0-\u30FF]+
@staticmethod
def _only_word_sequence(text, lang): # Receive original sequence
only_word = []
only_st = []
l_src_st = TMUtilsMatching.check_stopwords(lang)
for match in re.finditer(r'[a-zA-Z0-9\u4e00-\u9fff\u3040-\u309Fー\u30A0-\u30FF]+', text): # Get all the words and numbers
if l_src_st: # For some language we don't have stopwords list
if match.group() in l_src_st:
only_st.append(match.group())
else:
only_st.append('P')
only_word.append(match.group())
return only_word, only_st
@staticmethod
def _symbol_sequence(text): # Receive simplified sequence, without elements match with regular expression
only_symbol = []
for match in re.finditer(r'[a-zA-Z0-9\u4e00-\u9fff\u3040-\u309Fー\u30A0-\u30FF]+', text): text = text.replace(match.group(), 'P', 1) # Replace all words by
for match in re.finditer(r'[^\w\s]', text): only_symbol.append(match.group()) # Obtain list os symbols # r'[^a-zA-Z0-9\s]+'
return text, only_symbol
# Input : list of segments; query
# Output: Sort the list of all the segmets [(segment, editD); ...(segment, editD)] considering edit distance
def _match_rank(self, best_segments):
self.timer.start("rank segments")
editD_score = []
if 'query_tags' in self.query_dic: # Simplified tags
query = TMUtilsMatching.reduce_tags(self.query_dic['query_tags']) # Yo tengo un <T1>gato</T1>. Yo tengo un T | |
<reponame>aalmah/Theano<gh_stars>0
"""
Define abstract conv2d interface
"""
import logging
import theano
from theano.tensor import (as_tensor_variable, patternbroadcast)
from theano.tensor import TensorType
from theano.gof import Apply, Op
from theano.gof import local_optimizer
from theano.tensor.opt import register_specialize_device
# Cpu implementation
from theano.tensor.nnet import conv2d as cpu_conv2d, ConvOp
from theano.tensor.nnet.ConvGrad3D import convGrad3D
from theano.tensor.nnet.ConvTransp3D import convTransp3D
__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.tensor.nnet.conv2d")
def conv2d(input,
filters,
input_shape=None,
filter_shape=None,
border_mode='valid',
subsample=(1, 1),
filter_flip=True):
"""
This function will build the symbolic graph for convolving a mini-batch of a
stack of 2D inputs with a set of 2D filters. The implementation is modelled
after Convolutional Neural Networks (CNN).
:type input: symbolic 4D tensor
:param input: mini-batch of feature map stacks, of shape
(batch size, input channels, input rows, input columns).
See the optional parameter ``input_shape``.
:type filters: symbolic 4D tensor
:param filters: set of filters used in CNN layer of shape
(output channels, input channels, filter rows, filter columns).
See the optional parameter ``filter_shape``.
:type input_shape: None, tuple/list of len 4 of int or Constant variable
:param input_shape: The shape of the input parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
:type filter_shape: None, tuple/list of len 4 of int or Constant variable
:param filter_shape: The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
:type border_mode: str, int or tuple of two int
:param border_mode: Either of the following:
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``int``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type filter_flip: bool
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
:rtype: symbolic 4D tensor
:return: set of feature maps generated by convolutional layer. Tensor is
of shape (batch size, output channels, output rows, output columns)
"""
conv_op = AbstractConv2d(imshp=input_shape,
kshp=filter_shape,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip)
return conv_op(input, filters)
class BaseAbstractConv2d(Op):
"""
Base class for AbstractConv
Define an abstract convolution op that will be replaced with the appropriate implementation
:type imshp: None, tuple/list of len 4 of int or Constant variable
:param imshp: The shape of the input parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
imshp is defined w.r.t the forward conv.
:type kshp: None, tuple/list of len 4 of int or Constant variable
:param kshp: The shape of the filters parameter.
Optional, possibly used to choose an optimal implementation.
You can give ``None`` for any element of the list to specify that this
element is not known at compile time.
kshp is defined w.r.t the forward conv.
:type border_mode: str, int or tuple of two int
:param border_mode: Either of the following:
* ``'valid'``: apply filter wherever it completely overlaps with the
input. Generates output of shape: input shape - filter shape + 1
* ``'full'``: apply filter wherever it partly overlaps with the input.
Generates output of shape: input shape + filter shape - 1
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``int``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
and ``int2`` columns, then perform a valid convolution.
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type filter_flip: bool
:param filter_flip: If ``True``, will flip the filter rows and columns
before sliding them over the input. This operation is normally referred
to as a convolution, and this is the default. If ``False``, the filters
are not flipped and the operation is referred to as a cross-correlation.
"""
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')
def __init__(self,
imshp=None, kshp=None,
border_mode="valid", subsample=(1, 1),
filter_flip=True):
if isinstance(border_mode, int):
border_mode = (border_mode, border_mode)
if isinstance(border_mode, tuple):
pad_h, pad_w = map(int, border_mode)
border_mode = (pad_h, pad_w)
if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or
border_mode in ('valid', 'full', 'half')):
raise ValueError(
'invalid border_mode {}, which must be either '
'"valid", "full", "half", an integer or a pair of'
' integers'.format(border_mode))
self.imshp = imshp
self.kshp = kshp
self.border_mode = border_mode
self.filter_flip = filter_flip
if len(subsample) != 2:
raise ValueError("subsample must have two elements")
self.subsample = subsample
def flops(self, inp, outp):
""" Useful with the hack in profilemode to print the MFlops"""
# if the output shape is correct, then this gives the correct
# flops for any direction, sampling, padding, and border mode
inputs, filters = inp
outputs, = outp
assert inputs[1] == filters[1]
# nb mul and add by output pixel
flops = filters[2] * filters[3] * 2
# nb flops by output image
flops *= outputs[2] * outputs[3]
# nb patch multiplied
flops *= inputs[1] * filters[0] * inputs[0]
return flops
class AbstractConv2d(BaseAbstractConv2d):
"""
Abstract Op for the forward convolution.
"""
def __init__(self,
imshp=None,
kshp=None,
border_mode="valid",
subsample=(1, 1),
filter_flip=True):
super(AbstractConv2d, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip)
def make_node(self, img, kern):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
broadcastable = [img.broadcastable[0],
kern.broadcastable[0],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, kern], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d theano optimization failed')
def grad(self, inp, grads):
bottom, weights = inp
top, = grads
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(
weights, top, bottom.shape[-2:])
d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(
bottom, top, weights.shape[-2:])
return d_bottom, d_weights
class AbstractConv2d_gradWeights(BaseAbstractConv2d):
"""Gradient wrt. filters for `AbstractConv2d`.
:note: You will not want to use this directly, but rely on
Theano's automatic differentiation or graph optimization to
use it as needed.
"""
def __init__(self,
imshp=None,
kshp=None,
border_mode="valid",
subsample=(1, 1),
filter_flip=True):
super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,
border_mode, subsample, filter_flip)
# Update shape/height_width
def make_node(self, img, topgrad, shape):
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
shape = as_tensor_variable(shape)
broadcastable = [topgrad.broadcastable[1],
img.broadcastable[1],
False, False]
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, topgrad, shape], [output])
def perform(self, node, inp, out_):
raise NotImplementedError('AbstractConv2d_gradWeight theano optimization failed')
def grad(self, inp, grads):
bottom, top = inp[:2]
weights, = grads
d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(weights, top, bottom.shape[-2:])
d_top = AbstractConv2d(self.imshp,
self.kshp,
self.border_mode,
self.subsample,
self.filter_flip)(bottom, weights)
d_height_width = (theano.gradient.DisconnectedType()(),)
return (d_bottom, d_top) + d_height_width
def connection_pattern(self, node):
return [[1], [1], [0]] # no connection to height, width
class AbstractConv2d_gradInputs(BaseAbstractConv2d):
"""Gradient wrt. inputs for `AbstractConv2d`.
:note: You will | |
import datetime
import json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, Float, \
Enum, DateTime, Numeric, Text, Unicode, UnicodeText
from sqlalchemy import event
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy_i18n import make_translatable, translation_base, Translatable
make_translatable(options={'locales': ['pt', 'en'],
'auto_create_locales': True,
'fallback_locale': 'en'})
db = SQLAlchemy()
# noinspection PyClassHasNoInit
class DataSourceFormat:
CSV = 'CSV'
CUSTOM = 'CUSTOM'
GEO_JSON = 'GEO_JSON'
JDBC = 'JDBC'
IMAGE_FOLDER = 'IMAGE_FOLDER'
DATA_FOLDER = 'DATA_FOLDER'
HAR_IMAGE_FOLDER = 'HAR_IMAGE_FOLDER'
HDF5 = 'HDF5'
HIVE = 'HIVE'
JSON = 'JSON'
NPY = 'NPY'
PICKLE = 'PICKLE'
PARQUET = 'PARQUET'
SAV = 'SAV'
SHAPEFILE = 'SHAPEFILE'
TAR_IMAGE_FOLDER = 'TAR_IMAGE_FOLDER'
TEXT = 'TEXT'
VIDEO_FOLDER = 'VIDEO_FOLDER'
XML_FILE = 'XML_FILE'
UNKNOWN = 'UNKNOWN'
@staticmethod
def values():
return [n for n in list(DataSourceFormat.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DataSourceInitialization:
NO_INITIALIZED = 'NO_INITIALIZED'
INITIALIZING = 'INITIALIZING'
INITIALIZED = 'INITIALIZED'
@staticmethod
def values():
return [n for n in list(DataSourceInitialization.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class ModelType:
KERAS = 'KERAS'
MLEAP = 'MLEAP'
PERFORMANCE_SPARK = 'PERFORMANCE_SPARK'
PERFORMANCE_KERAS = 'PERFORMANCE_KERAS'
SPARK_ML_CLASSIFICATION = 'SPARK_ML_CLASSIFICATION'
SPARK_ML_REGRESSION = 'SPARK_ML_REGRESSION'
SPARK_MLLIB_CLASSIFICATION = 'SPARK_MLLIB_CLASSIFICATION'
UNSPECIFIED = 'UNSPECIFIED'
@staticmethod
def values():
return [n for n in list(ModelType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DeploymentStatus:
NOT_DEPLOYED = 'NOT_DEPLOYED'
ERROR = 'ERROR'
EDITING = 'EDITING'
SAVED = 'SAVED'
RUNNING = 'RUNNING'
STOPPED = 'STOPPED'
SUSPENDED = 'SUSPENDED'
PENDING = 'PENDING'
DEPLOYED = 'DEPLOYED'
@staticmethod
def values():
return [n for n in list(DeploymentStatus.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class StorageType:
MONGODB = 'MONGODB'
ELASTIC_SEARCH = 'ELASTIC_SEARCH'
HDFS = 'HDFS'
HIVE = 'HIVE'
HIVE_WAREHOUSE = 'HIVE_WAREHOUSE'
KAFKA = 'KAFKA'
LOCAL = 'LOCAL'
JDBC = 'JDBC'
CASSANDRA = 'CASSANDRA'
@staticmethod
def values():
return [n for n in list(StorageType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DataType:
BINARY = 'BINARY'
CHARACTER = 'CHARACTER'
DATE = 'DATE'
DATETIME = 'DATETIME'
DECIMAL = 'DECIMAL'
DOUBLE = 'DOUBLE'
ENUM = 'ENUM'
FILE = 'FILE'
FLOAT = 'FLOAT'
INTEGER = 'INTEGER'
LAT_LONG = 'LAT_LONG'
LONG = 'LONG'
TEXT = 'TEXT'
TIME = 'TIME'
TIMESTAMP = 'TIMESTAMP'
VECTOR = 'VECTOR'
@staticmethod
def values():
return [n for n in list(DataType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class AttributeForeignKeyDirection:
FROM = 'FROM'
TO = 'TO'
@staticmethod
def values():
return [n for n in list(AttributeForeignKeyDirection.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PrivacyRiskType:
IDENTIFICATION = 'IDENTIFICATION'
@staticmethod
def values():
return [n for n in list(PrivacyRiskType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PermissionType:
READ = 'READ'
WRITE = 'WRITE'
MANAGE = 'MANAGE'
@staticmethod
def values():
return [n for n in list(PermissionType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class AnonymizationTechnique:
ENCRYPTION = 'ENCRYPTION'
GENERALIZATION = 'GENERALIZATION'
SUPPRESSION = 'SUPPRESSION'
MASK = 'MASK'
NO_TECHNIQUE = 'NO_TECHNIQUE'
@staticmethod
def values():
return [n for n in list(AnonymizationTechnique.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PrivacyType:
IDENTIFIER = 'IDENTIFIER'
QUASI_IDENTIFIER = 'QUASI_IDENTIFIER'
SENSITIVE = 'SENSITIVE'
NON_SENSITIVE = 'NON_SENSITIVE'
@staticmethod
def values():
return [n for n in list(PrivacyType.__dict__.keys())
if n[0] != '_' and n != 'values']
# Association tables definition
class Attribute(db.Model):
""" Data source attribute. """
__tablename__ = 'attribute'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
description = Column(String(500))
type = Column(Enum(*list(DataType.values()),
name='DataTypeEnumType'), nullable=False)
size = Column(Integer)
precision = Column(Integer)
scale = Column(Integer)
nullable = Column(Boolean,
default=False, nullable=False)
enumeration = Column(Boolean,
default=False, nullable=False)
missing_representation = Column(String(200))
feature = Column(Boolean,
default=True, nullable=False)
label = Column(Boolean,
default=True, nullable=False)
distinct_values = Column(Integer)
mean_value = Column(Float)
median_value = Column(String(200))
max_value = Column(String(200))
min_value = Column(String(200))
std_deviation = Column(Float)
missing_total = Column(String(200))
deciles = Column(LONGTEXT)
format = Column(String(100))
key = Column(Boolean,
default=False, nullable=False)
# Associations
data_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_attribute_data_source_id"),
nullable=False,
index=True)
data_source = relationship(
"DataSource",
overlaps='attributes',
foreign_keys=[data_source_id],
backref=backref("attributes",
cascade="all, delete-orphan"))
attribute_privacy = relationship(
"AttributePrivacy", uselist=False,
back_populates="attribute", lazy='joined')
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributeForeignKey(db.Model):
""" Attribute that form a foreign key in data sources """
__tablename__ = 'attribute_foreign_key'
# Fields
id = Column(Integer, primary_key=True)
order = Column(Integer, nullable=False)
direction = Column(Enum(*list(AttributeForeignKeyDirection.values()),
name='AttributeForeignKeyDirectionEnumType'), nullable=False)
# Associations
foreign_key_id = Column(Integer,
ForeignKey("data_source_foreign_key.id",
name="fk_attribute_foreign_key_foreign_key_id"),
nullable=False,
index=True)
foreign_key = relationship(
"DataSourceForeignKey",
overlaps='attributes',
foreign_keys=[foreign_key_id],
backref=backref("attributes",
cascade="all, delete-orphan"))
from_attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_foreign_key_from_attribute_id"),
nullable=False,
index=True)
from_attribute = relationship(
"Attribute",
overlaps='foreign_keys',
foreign_keys=[from_attribute_id],
backref=backref("foreign_keys",
cascade="all, delete-orphan"))
to_attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_foreign_key_to_attribute_id"),
nullable=False,
index=True)
to_attribute = relationship(
"Attribute",
overlaps='references',
foreign_keys=[to_attribute_id],
backref=backref("references",
cascade="all, delete-orphan"))
def __str__(self):
return self.order
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributePrivacy(db.Model):
""" Privacy configuration for an attribute. """
__tablename__ = 'attribute_privacy'
# Fields
id = Column(Integer, primary_key=True)
attribute_name = Column(String(200), nullable=False)
data_type = Column(Enum(*list(DataType.values()),
name='DataTypeEnumType'))
privacy_type = Column(Enum(*list(PrivacyType.values()),
name='PrivacyTypeEnumType'), nullable=False)
category_technique = Column(String(100))
anonymization_technique = Column(Enum(*list(AnonymizationTechnique.values()),
name='AnonymizationTechniqueEnumType'), nullable=False)
hierarchical_structure_type = Column(String(100))
privacy_model_technique = Column(String(100))
hierarchy = Column(LONGTEXT)
category_model = Column(LONGTEXT)
privacy_model = Column(LONGTEXT)
privacy_model_parameters = Column(LONGTEXT)
unlock_privacy_key = Column(String(400))
is_global_law = Column(Boolean,
default=False)
# Associations
attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_privacy_attribute_id"),
index=True)
attribute = relationship(
"Attribute",
overlaps='attribute_privacy',
foreign_keys=[attribute_id],
back_populates="attribute_privacy")
attribute_privacy_group_id = Column(Integer,
ForeignKey("attribute_privacy_group.id",
name="fk_attribute_privacy_attribute_privacy_group_id"),
index=True)
attribute_privacy_group = relationship(
"AttributePrivacyGroup",
overlaps='attribute_privacy',
foreign_keys=[attribute_privacy_group_id],
backref=backref("attribute_privacy",
cascade="all, delete-orphan"))
def __str__(self):
return self.attribute_name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributePrivacyGroup(db.Model):
""" Groups attributes with same semantic """
__tablename__ = 'attribute_privacy_group'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
user_id = Column(Integer, nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSource(db.Model):
""" Data source in Lemonade system (anything that stores data. """
__tablename__ = 'data_source'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
description = Column(String(500))
enabled = Column(Boolean,
default=True, nullable=False)
statistics_process_counter = Column(Integer,
default=0, nullable=False)
read_only = Column(Boolean,
default=True, nullable=False)
privacy_aware = Column(Boolean,
default=False, nullable=False)
url = Column(String(200), nullable=False)
created = Column(DateTime,
default=func.now(), nullable=False)
updated = Column(DateTime,
default=datetime.datetime.utcnow, nullable=False,
onupdate=datetime.datetime.utcnow)
format = Column(Enum(*list(DataSourceFormat.values()),
name='DataSourceFormatEnumType'), nullable=False)
initialization = Column(Enum(*list(DataSourceInitialization.values()),
name='DataSourceInitializationEnumType'),
default=DataSourceInitialization.INITIALIZED, nullable=False)
initialization_job_id = Column(String(200))
provenience = Column(LONGTEXT)
estimated_rows = Column(Integer,
default=0)
estimated_size_in_mega_bytes = Column(Numeric(10, 2))
expiration = Column(String(200))
user_id = Column(Integer)
user_login = Column(String(50))
user_name = Column(String(200))
tags = Column(String(100))
temporary = Column(Boolean,
default=False, nullable=False)
workflow_id = Column(Integer)
task_id = Column(String(200))
attribute_delimiter = Column(String(20))
record_delimiter = Column(String(20))
text_delimiter = Column(String(20))
is_public = Column(Boolean,
default=False, nullable=False)
treat_as_missing = Column(LONGTEXT)
encoding = Column(String(200))
is_first_line_header = Column(Boolean,
default=0, nullable=False)
is_multiline = Column(Boolean,
default=0, nullable=False)
command = Column(LONGTEXT)
is_lookup = Column(Boolean,
default=0, nullable=False)
use_in_workflow = Column(Boolean,
default=0, nullable=False, index=True)
# Associations
storage_id = Column(Integer,
ForeignKey("storage.id",
name="fk_data_source_storage_id"),
nullable=False,
index=True)
storage = relationship(
"Storage",
overlaps='storage',
foreign_keys=[storage_id])
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSourceForeignKey(db.Model):
""" Foreign key in data sources """
__tablename__ = 'data_source_foreign_key'
# Fields
id = Column(Integer, primary_key=True)
# Associations
from_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_foreign_key_from_source_id"),
nullable=False,
index=True)
from_source = relationship(
"DataSource",
overlaps='foreign_keys',
foreign_keys=[from_source_id],
backref=backref("foreign_keys",
cascade="all, delete-orphan"))
to_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_foreign_key_to_source_id"),
nullable=False,
index=True)
to_source = relationship(
"DataSource",
overlaps='references',
foreign_keys=[to_source_id],
backref=backref("references",
cascade="all, delete-orphan"))
def __str__(self):
return 'DataSourceForeignKey'
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSourcePermission(db.Model):
""" Associate users and permissions """
__tablename__ = 'data_source_permission'
# Fields
id = Column(Integer, primary_key=True)
permission = Column(Enum(*list(PermissionType.values()),
name='PermissionTypeEnumType'), nullable=False)
user_id = Column(Integer, nullable=False)
user_login = Column(String(50), nullable=False)
user_name = Column(String(200), nullable=False)
# Associations
data_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_permission_data_source_id"),
nullable=False,
index=True)
data_source = relationship(
"DataSource",
overlaps='permissions',
foreign_keys=[data_source_id],
backref=backref("permissions",
cascade="all, delete-orphan"))
def __str__(self):
return self.permission
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class Model(db.Model):
""" Machine learning model """
__tablename__ = 'model'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
enabled = Column(Boolean,
default=True, nullable=False)
created = Column(DateTime,
default=func.now(), nullable=False)
path = Column(String(500), nullable=False)
class_name = Column(String(500), nullable=False)
type = Column(Enum(*list(ModelType.values()),
name='ModelTypeEnumType'),
default=ModelType.UNSPECIFIED, nullable=False)
deployment_status = Column(Enum(*list(DeploymentStatus.values()),
name='DeploymentStatusEnumType'),
default=DeploymentStatus.NOT_DEPLOYED, nullable=False)
user_id = Column(Integer, nullable=False)
user_login = Column(String(50), nullable=False)
user_name = Column(String(200), nullable=False)
workflow_id = Column(Integer)
workflow_name = Column(String(200))
task_id = Column(String(200))
job_id = Column(Integer)
# Associations
storage_id = Column(Integer,
ForeignKey("storage.id",
name="fk_model_storage_id"),
nullable=False,
index=True)
storage = relationship(
"Storage",
overlaps='storage',
foreign_keys=[storage_id])
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class ModelPermission(db.Model):
""" | |
<gh_stars>0
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Convenient library for data statistics generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import logging
import multiprocessing
import os
import tempfile
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from joblib import delayed
from joblib import Parallel
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow_data_validation import types
from tensorflow_data_validation.api import stats_api
from tensorflow_data_validation.coders import csv_decoder
from tensorflow_data_validation.coders import tf_example_decoder
from tensorflow_data_validation.statistics import stats_impl
from tensorflow_data_validation.statistics import stats_options as options
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.types_compat import Any, List, Optional, Text
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
def generate_statistics_from_tfrecord(
data_location,
output_path = None,
stats_options = options.StatsOptions(),
pipeline_options = None,
):
"""Compute data statistics from TFRecord files containing TFExamples.
Runs a Beam pipeline to compute the data statistics and return the result
data statistics proto.
This is a convenience method for users with data in TFRecord format.
Users with data in unsupported file/data formats, or users who wish
to create their own Beam pipelines need to use the 'GenerateStatistics'
PTransform API directly instead.
Args:
data_location: The location of the input data files.
output_path: The file path to output data statistics result to. If None, we
use a temporary directory. It will be a TFRecord file containing a single
data statistics proto, and can be read with the 'load_statistics' API.
If you run this function on Google Cloud, you must specify an
output_path. Specifying None may cause an error.
stats_options: `tfdv.StatsOptions` for generating data statistics.
pipeline_options: Optional beam pipeline options. This allows users to
specify various beam pipeline execution parameters like pipeline runner
(DirectRunner or DataflowRunner), cloud dataflow service project id, etc.
See https://cloud.google.com/dataflow/pipelines/specifying-exec-params for
more details.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if output_path is None:
output_path = os.path.join(tempfile.mkdtemp(), 'data_stats.tfrecord')
output_dir_path = os.path.dirname(output_path)
if not tf.gfile.Exists(output_dir_path):
tf.gfile.MakeDirs(output_dir_path)
# PyLint doesn't understand Beam PTransforms.
# pylint: disable=no-value-for-parameter
with beam.Pipeline(options=pipeline_options) as p:
# Auto detect tfrecord file compression format based on input data
# path suffix.
_ = (
p
| 'ReadData' >> beam.io.ReadFromTFRecord(file_pattern=data_location)
| 'DecodeData' >> tf_example_decoder.DecodeTFExample()
| 'GenerateStatistics' >> stats_api.GenerateStatistics(stats_options)
# TODO(b/112014711) Implement a custom sink to write the stats proto.
| 'WriteStatsOutput' >> beam.io.WriteToTFRecord(
output_path,
shard_name_template='',
coder=beam.coders.ProtoCoder(
statistics_pb2.DatasetFeatureStatisticsList)))
return load_statistics(output_path)
def generate_statistics_from_csv(
data_location,
column_names = None,
delimiter = ',',
output_path = None,
stats_options = options.StatsOptions(),
pipeline_options = None,
):
"""Compute data statistics from CSV files.
Runs a Beam pipeline to compute the data statistics and return the result
data statistics proto.
This is a convenience method for users with data in CSV format.
Users with data in unsupported file/data formats, or users who wish
to create their own Beam pipelines need to use the 'GenerateStatistics'
PTransform API directly instead.
Args:
data_location: The location of the input data files.
column_names: A list of column names to be treated as the CSV header. Order
must match the order in the input CSV files. If this argument is not
specified, we assume the first line in the input CSV files as the
header. Note that this option is valid only for 'csv' input file format.
delimiter: A one-character string used to separate fields in a CSV file.
output_path: The file path to output data statistics result to. If None, we
use a temporary directory. It will be a TFRecord file containing a single
data statistics proto, and can be read with the 'load_statistics' API.
If you run this function on Google Cloud, you must specify an
output_path. Specifying None may cause an error.
stats_options: `tfdv.StatsOptions` for generating data statistics.
pipeline_options: Optional beam pipeline options. This allows users to
specify various beam pipeline execution parameters like pipeline runner
(DirectRunner or DataflowRunner), cloud dataflow service project id, etc.
See https://cloud.google.com/dataflow/pipelines/specifying-exec-params for
more details.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if output_path is None:
output_path = os.path.join(tempfile.mkdtemp(), 'data_stats.tfrecord')
output_dir_path = os.path.dirname(output_path)
if not tf.gfile.Exists(output_dir_path):
tf.gfile.MakeDirs(output_dir_path)
# PyLint doesn't understand Beam PTransforms.
# pylint: disable=no-value-for-parameter
with beam.Pipeline(options=pipeline_options) as p:
# If a header is not provided, assume the first line in a file
# to be the header.
skip_header_lines = 1 if column_names is None else 0
if column_names is None:
column_names = get_csv_header(data_location, delimiter)
_ = (
p
| 'ReadData' >> beam.io.textio.ReadFromText(
file_pattern=data_location, skip_header_lines=skip_header_lines)
| 'DecodeData' >> csv_decoder.DecodeCSV(
column_names=column_names, delimiter=delimiter,
schema=stats_options.schema,
infer_type_from_schema=stats_options.infer_type_from_schema)
| 'GenerateStatistics' >> stats_api.GenerateStatistics(stats_options)
# TODO(b/112014711) Implement a custom sink to write the stats proto.
| 'WriteStatsOutput' >> beam.io.WriteToTFRecord(
output_path,
shard_name_template='',
coder=beam.coders.ProtoCoder(
statistics_pb2.DatasetFeatureStatisticsList)))
return load_statistics(output_path)
def generate_statistics_from_dataframe(
dataframe,
stats_options = options.StatsOptions(),
n_jobs = 1
):
"""Compute data statistics for the input pandas DataFrame.
This is a utility method for users with in-memory data represented
as a pandas DataFrame.
Args:
dataframe: Input pandas DataFrame.
stats_options: `tfdv.StatsOptions` for generating data statistics.
n_jobs: Number of processes to run (defaults to 1). If -1 is provided,
uses the same number of processes as the number of CPU cores.
Returns:
A DatasetFeatureStatisticsList proto.
"""
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('dataframe argument is of type {}. Must be a '
'pandas DataFrame.'.format(type(dataframe).__name__))
stats_generators = stats_impl.get_generators(stats_options, in_memory=True) # type: List[stats_generator.CombinerStatsGenerator]
if n_jobs < -1 or n_jobs == 0:
raise ValueError('Invalid n_jobs parameter {}. Should be either '
' -1 or >= 1.'.format(n_jobs))
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
n_jobs = max(min(n_jobs, multiprocessing.cpu_count()), 1)
if n_jobs == 1:
merged_partial_stats = _generate_partial_statistics_from_df(
dataframe, stats_options, stats_generators)
else:
# TODO(pachristopher): Investigate why we don't observe linear speedup after
# a certain number of processes.
splits = np.array_split(dataframe, n_jobs)
partial_stats = Parallel(n_jobs=n_jobs)(
delayed(_generate_partial_statistics_from_df)(
splits[i], stats_options, stats_generators) for i in range(n_jobs))
merged_partial_stats = [
gen.merge_accumulators(stats)
for gen, stats in zip(stats_generators, zip(*partial_stats))
]
return stats_impl.extract_statistics_output(
merged_partial_stats, stats_generators)
def _generate_partial_statistics_from_df(
dataframe,
stats_options,
stats_generators
):
"""Generate accumulators containing partial stats."""
inmemory_dicts = [{} for _ in range(len(dataframe))]
isnull = pd.isnull
# Initialize decoding fn based on column type.
int_fn = lambda x: np.array([x], dtype=np.integer)
float_fn = lambda x: None if isnull(x) else np.array([x], dtype=np.floating)
str_fn = lambda x: None if isnull(x) else np.array([x], dtype=np.object)
decode_fn = {
# int type.
'i': int_fn,
'u': int_fn,
# float type.
'f': float_fn,
# bool type.
'b': int_fn,
# string type.
'S': str_fn,
'O': str_fn,
'U': str_fn,
}
schema = schema_pb2.Schema()
for col_name, col_type in zip(dataframe.columns, dataframe.dtypes):
kind = col_type.kind
if kind not in decode_fn:
logging.warning('Ignoring feature %s of type %s', col_name, col_type)
continue
if kind == 'b':
# Track bool type feature as categorical.
schema.feature.add(
name=col_name, type=schema_pb2.INT,
bool_domain=schema_pb2.BoolDomain())
# Get decoding fn based on column type.
fn = decode_fn[kind]
# Iterate over the column and apply the decoding fn.
j = 0
for val in dataframe[col_name]:
inmemory_dicts[j][col_name] = fn(val)
j += 1
if schema.feature:
stats_options.schema = schema
return stats_impl.generate_partial_statistics_in_memory(
inmemory_dicts, stats_options, stats_generators)
def get_csv_header(data_location,
delimiter):
"""Gets the CSV header from the input files.
This function assumes that the header is present as the first line in all
the files in the input path.
Args:
data_location: Glob pattern(s) specifying the location of the input data
files.
delimiter: A one-character string used to separate fields in a CSV file.
Returns:
The list of column names.
Raises:
ValueError: If any of the input files is not found or empty, or if the files
have different headers.
"""
matched_files = tf.gfile.Glob(data_location)
if not matched_files:
raise ValueError(
'No file found in the input data location: %s' % data_location)
# Read the header line in the first file.
with tf.gfile.GFile(matched_files[0], 'r') as reader:
try:
result = next(csv.reader(reader, delimiter=delimiter))
except StopIteration:
raise ValueError('Found empty file when reading the header line: %s' %
matched_files[0])
# Make sure that all files have the same header.
for filename in matched_files[1:]:
with tf.gfile.GFile(filename, | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from typing import Any, AsyncIterable, Callable, Dict, Generic, IO, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
class FormRecognizerClientOperationsMixin(object):
async def begin_analyze_business_card_async(
self,
include_text_details: Optional[bool] = False,
locale: Optional[str] = None,
file_stream: Optional[Union[IO, "models.SourcePath"]] = None,
**kwargs
) -> AsyncLROPoller[None]:
"""Analyze Business Card.
Extract field text and semantic values from a given business card document. The input document
must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri) of the
document to be analyzed.
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param locale: Locale of the business card. Supported locales include: en-AU, en-CA, en-GB, en-
IN, en-US(default).
:type locale: str
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_analyze_business_card_async')
if api_version == '2.1-preview.1':
from ..v2_1_preview_1.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_analyze_business_card_async'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_analyze_business_card_async(include_text_details, locale, file_stream, **kwargs)
async def begin_analyze_layout_async(
self,
file_stream: Optional[Union[IO, "models.SourcePath"]] = None,
**kwargs
) -> AsyncLROPoller[None]:
"""Analyze Layout.
Extract text and layout information from a given document. The input document must be of one of
the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'.
Alternatively, use 'application/json' type to specify the location (Uri or local path) of the
document to be analyzed.
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_analyze_layout_async')
if api_version == '2.0':
from ..v2_0.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
elif api_version == '2.1-preview.1':
from ..v2_1_preview_1.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_analyze_layout_async'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_analyze_layout_async(file_stream, **kwargs)
async def begin_analyze_receipt_async(
self,
include_text_details: Optional[bool] = False,
locale: Optional[str] = None,
file_stream: Optional[Union[IO, "models.SourcePath"]] = None,
**kwargs
) -> AsyncLROPoller[None]:
"""Analyze Receipt.
Extract field text and semantic values from a given receipt document. The input document must
be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri) of the
document to be analyzed.
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param locale: Locale of the receipt. Supported locales include: en-AU, en-CA, en-GB, en-IN,
en-US(default).
:type locale: str
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_analyze_receipt_async')
if api_version == '2.0':
from ..v2_0.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
elif api_version == '2.1-preview.1':
from ..v2_1_preview_1.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_analyze_receipt_async'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
# FIXME: this is handwritten
if api_version == '2.0':
return await mixin_instance.begin_analyze_receipt_async(include_text_details, file_stream, **kwargs)
elif api_version == '2.1-preview.1':
return await mixin_instance.begin_analyze_receipt_async(include_text_details, locale, file_stream, **kwargs)
async def begin_analyze_with_custom_model(
self,
model_id: str,
include_text_details: Optional[bool] = False,
file_stream: Optional[Union[IO, "models.SourcePath"]] = None,
**kwargs
) -> AsyncLROPoller[None]:
"""Analyze Form.
Extract key-value pairs, tables, and semantic values from a given document. The input document
must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or
'image/tiff'. Alternatively, use 'application/json' type to specify the location (Uri or local
path) of the document to be analyzed.
:param model_id: Model identifier.
:type model_id: str
:param include_text_details: Include text lines and element references in the result.
:type include_text_details: bool
:param file_stream: .json, .pdf, .jpg, .png or .tiff type file stream.
:type file_stream: IO or ~azure.ai.formrecognizer.models.SourcePath
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_analyze_with_custom_model')
if api_version == '2.0':
from ..v2_0.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
elif api_version == '2.1-preview.1':
from ..v2_1_preview_1.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_analyze_with_custom_model'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_analyze_with_custom_model(model_id, include_text_details, file_stream, **kwargs)
async def begin_compose_custom_models_async(
self,
compose_request: "models.ComposeRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Compose trained with labels models into one composed model.
Compose request would include list of models ids.
It would validate what all models either trained with labels model or composed model.
It would validate limit of models put together.
:param compose_request: Compose models.
:type compose_request: ~azure.ai.formrecognizer.models.ComposeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
api_version = self._get_api_version('begin_compose_custom_models_async')
if api_version == '2.1-preview.1':
from ..v2_1_preview_1.aio.operations import FormRecognizerClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'begin_compose_custom_models_async'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return await mixin_instance.begin_compose_custom_models_async(compose_request, **kwargs)
async def | |
<filename>pyethapp/jsonrpc.py<gh_stars>0
from decorator import decorator
from collections import Iterable
import inspect
from ethereum.utils import is_numeric, is_string, int_to_big_endian, encode_hex, decode_hex, sha3
import ethereum.slogging as slogging
from ethereum.transactions import Transaction
from ethereum import processblock
import gevent
import gevent.wsgi
import gevent.queue
import rlp
from tinyrpc.dispatch import RPCDispatcher
from tinyrpc.dispatch import public as public_
from tinyrpc.exc import BadRequestError, MethodNotFoundError
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol, JSONRPCInvalidParamsError
from tinyrpc.server.gevent import RPCServerGreenlets
from tinyrpc.transports.wsgi import WsgiServerTransport
from devp2p.service import BaseService
from eth_protocol import ETHProtocol
log = slogging.get_logger('jsonrpc')
slogging.configure(config_string=':debug')
# route logging messages
class WSGIServerLogger(object):
_log = slogging.get_logger('jsonrpc.wsgi')
@classmethod
def log(cls, msg):
cls._log.debug(msg.strip())
write = log
def log_error(cls, msg, *args):
cls._log.error(msg % args)
gevent.wsgi.WSGIHandler.log_error = WSGIServerLogger.log_error
# hack to return the correct json rpc error code if the param count is wrong
# (see https://github.com/mbr/tinyrpc/issues/19)
def public(f):
def new_f(*args, **kwargs):
try:
inspect.getcallargs(f, *args, **kwargs)
except TypeError:
raise JSONRPCInvalidParamsError()
else:
return f(*args, **kwargs)
new_f.func_name = f.func_name
new_f.func_doc = f.func_doc
return public_(new_f)
class LoggingDispatcher(RPCDispatcher):
"""A dispatcher that logs every RPC method call."""
def __init__(self):
super(LoggingDispatcher, self).__init__()
self.logger = log.debug
def dispatch(self, request):
if isinstance(request, Iterable):
request_list = request
else:
request_list = [request]
for req in request_list:
self.logger('RPC call', method=req.method, args=req.args, kwargs=req.kwargs,
id=req.unique_id)
response = super(LoggingDispatcher, self).dispatch(request)
if isinstance(response, Iterable):
response_list = response
else:
response_list = [response]
for res in response_list:
try:
self.logger('RPC result', id=res.unique_id, result=res.result)
except AttributeError:
self.logger('RPC error', id=res.unique_id, error=res.error)
return response
class JSONRPCServer(BaseService):
"""Service providing an HTTP server with JSON RPC interface.
Other services can extend the JSON RPC interface by creating a
:class:`Subdispatcher` and registering it via
`Subdispatcher.register(self.app.services.json_rpc_server)`.
Alternatively :attr:`dispatcher` can be extended directly (see
https://tinyrpc.readthedocs.org/en/latest/dispatch.html).
"""
name = 'jsonrpc'
default_config = dict(jsonrpc=dict(listen_port=4000, listen_host='127.0.0.1'))
def __init__(self, app):
log.debug('initializing JSONRPCServer')
BaseService.__init__(self, app)
self.app = app
self.dispatcher = LoggingDispatcher()
# register sub dispatchers
for subdispatcher in (Web3, Net, Compilers, DB, Chain, Miner, FilterManager):
subdispatcher.register(self)
transport = WsgiServerTransport(queue_class=gevent.queue.Queue)
# start wsgi server as a background-greenlet
self.listen_port = app.config['jsonrpc']['listen_port']
self.listen_host = app.config['jsonrpc']['listen_host']
self.wsgi_server = gevent.wsgi.WSGIServer((self.listen_host, self.listen_port),
transport.handle, log=WSGIServerLogger)
self.rpc_server = RPCServerGreenlets(
transport,
JSONRPCProtocol(),
self.dispatcher
)
self.default_block = 'latest'
def _run(self):
log.info('starting JSONRPCServer', port=self.listen_port)
# in the main greenlet, run our rpc_server
self.wsgi_thread = gevent.spawn(self.wsgi_server.serve_forever)
self.rpc_server.serve_forever()
def stop(self):
log.info('stopping JSONRPCServer')
self.wsgi_thread.kill()
def get_block(self, chain, block_id=None):
"""Return the block identified by `block_id`.
This method also sets :attr:`default_block` to the value of `block_id`
which will be returned if, at later calls, `block_id` is not provided.
Subdispatchers using this function have to ensure sure that a
chainmanager is registered via :attr:`required_services`.
:param block_id: either the block number as integer or 'pending',
'earliest' or 'latest', or `None` for the default
block
:raises: `BadRequestError` if the block does not exist
"""
assert 'chain' in self.app.services
chain = self.app.services.chain.chain
if block_id is None:
block_id = self.default_block
else:
self.default_block = block_id
if block_id == 'pending':
return self.app.services.chain.chain.head_candidate
if block_id == 'latest':
return chain.head
if block_id == 'earliest':
return chain.genesis
try:
if is_numeric(block_id):
# by number
hash_ = chain.index.get_block_by_number(block_id)
else:
# by hash
assert is_string(block_id)
hash_ = block_id
return chain.get(hash_)
except KeyError:
raise BadRequestError('Unknown block')
class Subdispatcher(object):
"""A JSON RPC subdispatcher which can be registered at JSONRPCService.
:cvar prefix: common prefix shared by all rpc methods implemented by this
subdispatcher
:cvar required_services: a list of names of services the subdispatcher
is built on and will be made available as
instance variables
"""
prefix = ''
required_services = []
@classmethod
def register(cls, json_rpc_service):
"""Register a new instance at ``json_rpc_service.dispatcher``.
The subdispatcher will be able to access all required services as well
as the app object as attributes.
If one of the required services is not available, log this as warning
but don't fail.
"""
dispatcher = cls()
for service_name in cls.required_services:
try:
service = json_rpc_service.app.services[service_name]
except KeyError:
log.warning('No {} registered. Some RPC methods will not be '
'available'.format(service_name))
return
setattr(dispatcher, service_name, service)
dispatcher.app = json_rpc_service.app
dispatcher.json_rpc_server = json_rpc_service
json_rpc_service.dispatcher.register_instance(dispatcher, cls.prefix)
def quantity_decoder(data):
"""Decode `data` representing a quantity."""
if not is_string(data):
success = False
elif not data.startswith('0x'):
success = False # must start with 0x prefix
elif len(data) > 3 and data[2] == '0':
success = False # must not have leading zeros (except `0x0`)
else:
data = data[2:]
# ensure even length
if len(data) % 2 == 1:
data = '0' + data
try:
return int(data, 16)
except ValueError:
success = False
assert not success
raise BadRequestError('Invalid quantity encoding')
def quantity_encoder(i):
"""Encode interger quantity `data`."""
assert is_numeric(i)
data = int_to_big_endian(i)
return '0x' + (encode_hex(data).lstrip('0') or '0')
def data_decoder(data):
"""Decode `data` representing unformatted data."""
if not data.startswith('0x'):
data = '0x' + data
if len(data) % 2 != 0:
success = False # must be even length
else:
if len(data) % 2 != 0: # TODO: remove
data += '0' # TODO: remove
try:
return decode_hex(data[2:])
except TypeError:
success = False
assert not success
raise BadRequestError('Invalid data encoding')
def data_encoder(data):
"""Encode unformatted binary `data`."""
return '0x' + encode_hex(data)
def address_decoder(data):
"""Decode an address from hex with 0x prefix to 20 bytes."""
if not data.startswith('0x'):
data = '0x' + data
addr = data_decoder(data)
if len(addr) != 20:
raise BadRequestError('Addresses must be 20 bytes long')
return addr
def address_encoder(address):
assert len(address) == 20
return encode_hex(address)
def block_id_decoder(data):
"""Decode a block identifier as expected from :meth:`JSONRPCServer.get_block`."""
if data in (None, 'latest', 'earliest', 'pending'):
return data
else:
return quantity_decoder(data)
def block_hash_decoder(data):
"""Decode a block hash."""
decoded = data_decoder(data)
if len(decoded) != 32:
raise BadRequestError('Block hashes must be 32 bytes long')
return decoded
def tx_hash_decoder(data):
"""Decode a transaction hash."""
decoded = data_decoder(data)
if len(decoded) != 32:
raise BadRequestError('Transaction hashes must be 32 bytes long')
return decoded
def bool_decoder(data):
if not isinstance(data, bool):
raise BadRequestError('Paremter must be boolean')
return data
def block_encoder(block, include_transactions, pending=False):
"""Encode a block as JSON object.
:param block: a :class:`ethereum.blocks.Block`
:param include_transactions: if true transactions are included, otherwise
only their hashes
:returns: a json encodable dictionary
"""
d = {
'number': quantity_encoder(block.number),
'hash': data_encoder(block.hash),
'parentHash': data_encoder(block.prevhash),
'nonce': data_encoder(block.nonce),
'sha3Uncles': data_encoder(block.uncles_hash),
'logsBloom': data_encoder(int_to_big_endian(block.bloom)),
'transactionsRoot': data_encoder(block.tx_list_root),
'stateRoot': data_encoder(block.state_root),
'miner': data_encoder(block.coinbase),
'difficulty': quantity_encoder(block.difficulty),
'totalDifficulty': quantity_encoder(block.chain_difficulty()),
'extraData': data_encoder(block.extra_data),
'size': quantity_encoder(len(rlp.encode(block))),
'gasLimit': quantity_encoder(block.gas_limit),
'minGasPrice': quantity_encoder(0), # TODO quantity_encoder(block.gas_price),
'gasUsed': quantity_encoder(block.gas_used),
'timestamp': quantity_encoder(block.timestamp),
'uncles': [data_encoder(u.hash) for u in block.uncles]
}
if include_transactions:
d['transactions'] = []
for i, tx in enumerate(block.get_transactions()):
d['transactions'].append(tx_encoder(tx, block, i, pending))
else:
d['transactions'] = [quantity_encoder(tx.hash) for x in block.get_transactions()]
return d
def tx_encoder(transaction, block, i, pending):
"""Encode a transaction as JSON object.
`transaction` is the `i`th transaction in `block`. `pending` specifies if
the block is pending or already mined.
"""
return {
'hash': data_encoder(transaction.hash),
'nonce': quantity_encoder(transaction.nonce),
'blockHash': data_encoder(block.hash),
'blockNumber': quantity_encoder(block.number) if pending else None,
'transactionIndex': quantity_encoder(i),
'from': data_encoder(transaction.sender),
'to': data_encoder(transaction.to),
'value': quantity_encoder(transaction.value),
'gasPrice': quantity_encoder(transaction.gasprice),
'gas': quantity_encoder(transaction.startgas),
'input': data_encoder(transaction.data),
}
def loglist_encoder(loglist):
"""Encode a list of log"""
l = []
if len(loglist) > 0 and loglist[0] is None:
assert all(element is None for element in l)
return l
result = []
for log, index, block in loglist:
result.append({
'hash': data_encoder(log.hash),
'logIndex': quantity_encoder(index),
'transactionIndex': None,
'transactionHash': None,
'blockHash': data_encoder(block.hash),
'blockNumber': quantity_encoder(block.number),
'address': address_encoder(log.address),
'data': data_encoder(log.data),
'topics': [data_encoder(topic) for topic in log.topics]
})
return result
def decode_arg(name, decoder):
"""Create a decorator that applies `decoder` to argument `name`."""
@decorator
def new_f(f, *args, **kwargs):
call_args = inspect.getcallargs(f, *args, **kwargs)
call_args[name] = decoder(call_args[name])
return f(**call_args)
return new_f
def encode_res(encoder):
"""Create a decorator that applies `encoder` to the return value of the
decorated function.
"""
@decorator
def new_f(f, *args, **kwargs):
res = f(*args, **kwargs)
return encoder(res)
return new_f
class Web3(Subdispatcher):
"""Subdispatcher for some generic RPC methods."""
prefix = 'web3_'
@public
@decode_arg('data', data_decoder)
@encode_res(data_encoder)
def sha3(self, data):
return sha3(data)
@public
def clientVersion(self):
return self.app.client_version
class Net(Subdispatcher):
"""Subdispatcher for network related RPC methods."""
prefix = 'net_'
required_services = ['peermanager']
@public
def version(self):
return ETHProtocol.version
@public
def listening(self):
return self.peermanager.num_peers() < self.peermanager.config['p2p']['min_peers']
@public
@encode_res(quantity_encoder)
def peerCount(self):
return self.peermanager.num_peers()
class Compilers(Subdispatcher):
"""Subdispatcher for compiler related RPC methods."""
prefix = 'eth_'
required_services = []
def __init__(self):
super(Compilers, self).__init__()
self.compilers_ = None
@property
def compilers(self):
if self.compilers_ is None:
self.compilers_ = {}
try:
import serpent
self.compilers_['serpent'] = serpent.compile
self.compilers_['lll'] = serpent.compile_lll
except ImportError:
pass
try:
import solidity
self.compilers_['solidity'] = solidity.compile
except ImportError:
pass
return self.compilers_
@public
def getCompilers(self):
return self.compilers.keys()
@public
@encode_res(data_encoder)
def compileSolidity(self, code):
try:
return self.compilers['solidity'](code)
except KeyError:
raise MethodNotFoundError()
@public
@encode_res(data_encoder)
def compileSerpent(self, code):
try:
return self.compilers['serpent'](code)
| |
<gh_stars>0
"""Parse the model definition into a MCX graph.
When the user defines a model in MCX she uses the symbol `<~` to denote random
variable assignment, and the decorator `@mcx.model` to denote the definition of
a multivariate distribution. The constructs do not exist in Python and we parse
them into SampleOps and GraphicalModels respectively. Other python constructs
are transformed into Ops using a surjective mapping.
The ensuing graphical model can then be manipulated at runtime, and used to compile
samplers and logpdfs.
"""
import inspect
import textwrap
from collections import defaultdict
from functools import partial
from types import FunctionType
from typing import Dict, List, Optional, Tuple, Union
import libcst as cst
import networkx as nx
import mcx
from mcx.core.graph import GraphicalModel
from mcx.core.nodes import (
Constant,
FunctionOp,
ModelOp,
Name,
Op,
Placeholder,
SampleModelOp,
SampleOp,
)
MODEL_BADLY_FORMED_ERROR = (
"a MCX model should be defined in a single function. This exception is completely unexpected."
" Please file an issue on https://github.com/rlouf/mcx"
)
# TODO: Allow random variable assignments from models that return multiple variables
MULTIPLE_RETURNED_VALUES_ERROR = (
"only one variable is allowed on the left-hand-side of a random variable assignment "
" , several were provided"
)
TRANSFORMED_RETURNED_VALUE_ERROR = (
"only random variables can be returned from the model, found a transformed variable instead. "
"If you are interested in the posterior value of such a transformed variable, first sample "
" from the posterior distribution of random variables and apply the transformation to the variables "
" in the trace.\n"
"If you are looking to condition on a transformed variable, however, this is not yet possible. Please "
"open an issue on https://github.com/rlouf/mcx to signal your interest in having this feature"
)
DUPLICATE_VARIABLE_NAME_ERROR = "you cannot reuse the name of random variables."
def parse(model_fn: FunctionType) -> Tuple[GraphicalModel, dict]:
"""Parse the model definition to build a graphical model.
Parameter
---------
model
A live function object that contains the model definition.
Returns
-------
The intermediate representation of the model.
"""
source = inspect.getsource(model_fn)
source = textwrap.dedent(source) # TODO: do we need this with libcst?
tree = cst.parse_module(source)
namespace = model_fn.__globals__
definition_visitor = ModelDefinitionParser(namespace)
tree.visit(definition_visitor)
graph = definition_visitor.graph
return graph, namespace
class ModelDefinitionParser(cst.CSTVisitor):
"""Parses the model definition's Concrete Syntax Tree.
MCX models are expressed in the form of a function with the `@mcx.model`
decorator. MCX then parses the definition's code into an intermediate
representation, which is a graph where sample and deterministic operations
as well as the function's arguments are named nodes while intermediate
operations are part of the graph but unnamed.
This approach is similar to that of Theano. But unlike Theano, we do not
build the graph at runtime using side-effects. Instead, when the model is
instantiated its code source is read and translated into a graph. Every
subsequent operation is an operation on this graph.
This class contains all the parsing and graph building logic. Whenever a
sample statement is encountered, we perform a recursive visit of the
variables used to instantiate the distribution. The recursion stops
whenever we encounter a constant or a named variable. We then add node in
the reverse order to the graph; nodes contain a function that can
reconstruct the corresponding CST node when called with arguments.
Say the parser encounters the following line:
>>> var <~ Normal(0, x)
where `x` is an argument to the model. The recursion stops immediately
since both arguments are either a constant or a named node. So we create a
new `SampleOp` with name `var` and a cst_generator funtion defined as:
>>> def cst_generator(*args):
... return cst.Call(
... func='Normal',
... args=args
... )
And we create an edge between the constant `0` and this SampleOp, the
placeholder `x` and this SampleOp. Each edge is indexed by the position of
the argument. All the compiler has to do is to traverse the graph in
topological order, translate `0` and `x` to their equivalent CST nodes, and
pass these nodes to var's `cst_generator` function when translating it into its
AST equivalent.
When parsing the following:
>>> var <~ Normal(0, f(x))
The procedure is similar except we first add the unnamed `f` node to the
graph and the edge from `x` to `f` before adding the `var` node and the
edges 0 -> `var` and `f` -> `var`.
There are a few potential pitfalls that we need to be aware of:
1. When a random variable is distributed following another MCX model, the
model is merged with the current one. Since namespaces can overlap we add a
scope for variables, named after the model which introduced the variables.
Furthermore, since the same model can be used several times (this would be
common in deep learning), we append the model's current number of occurences
to the scope name.
2. As discussed in the docstring of `visit_FunctionDef` below, functions are
to be treated with care as they can either be standard functions, models
that are called within the current model, or models that are defined via
a closure.
Attributes
----------
current_scope:
Name of the model being currently parsed.
scopes:
Counts the number of times each scope has been encountered. Important in
situations like deep learning where the same module can be called
several times.
namespace:
Dictionnary that contains the global namespace in which the model is
called.
sample_this_op:
Whether the Op currently being traversed should appear in forward sampling
and whether we should include them in the posterior samples.
named_variables:
Dictionary that associates the name of each Op to its node.
"""
def __init__(self, namespace: Dict):
self.namespace = namespace
self.current_scope: Optional[str] = None
self.scopes: Dict = defaultdict(int)
self.named_variables: Dict = {}
self.sample_this_op = True
def visit_FunctionDef(self, node: cst.FunctionDef) -> Union[None, bool]:
"""Visit a function definition.
When we traverse the Concrete Syntax Tree of a MCX model, a function definition
can represent several objects.
The main model definition
~~~~~~~~~~~~~~~~~~~~~~~~~
>>> @mcx.model
... def my_model(*args):
... # do things
... return
A regular function
~~~~~~~~~~~~~~~~~~~
>>> @mcx.model
... def my_model(*args):
... x <~ Normal(0, 1)
... y <~ Normal(0, 1)
...
... def add(a, b):
... return a + b
...
... z = add(x, y)
... return z
A closure
~~~~~~~~~
It is perfectly reasonable (and is necessary to work with nonparametrics) to define
a model like the following:
>>> @mcx.model
... def mint_coin():
... p <~ Beta(1, 1)
...
... @mcx.model
... def coin():
... head <~ Bernoulli(p)
... return head
...
... return coin
A submodel
~~~~~~~~~~
>>> @mcx.model
... def linreg(x):
... scale <~ HalfCauchy(0, 1)
...
... @mcx.model
... def Horseshoe(mu=0, tau=1., s=1.):
... scale <~ HalfCauchy(0, s)
... noise <~ Normal(0, tau)
... res = scale * noise + mu
... return res
...
... coefs <~ Horseshoe(np.zeros(x.shape[1]))
... predictions <~ Normal(np.matmul(x, coefs), scale)
... return predictions
We can even have nested submodels.
"""
# Standard python functions defined within a model need to be included
# as is in the resulting source code. So do submodels.
if hasattr(self, "graph"):
if is_model_definition(node, self.namespace):
model_node = ModelOp(lambda: node, node.name.value)
self.graph.add(model_node)
self.named_variables[node.name] = model_node
else:
function_node = FunctionOp(lambda: node, node.name.value)
self.graph.add(function_node)
self.named_variables[node.name] = function_node
return False # don't visit the node's children
# Each time we enter a model definition we create a new GraphicalModel
# which is returned after the definition's children have been visited.
# The current version does not support nested models but will.
self.graph: GraphicalModel = GraphicalModel()
self.graph.name = node.name.value
self.scope = node.name.value
def argument_cst(name, default=None):
return cst.Param(cst.Name(name), default=default)
function_args = node.params.params
for _, argument in enumerate(function_args):
name = argument.name.value
try: # parse argument default value is any
default = self.recursive_visit(argument.default)
argument_node = Placeholder(
partial(argument_cst, name), name, False, True
)
self.graph.add(argument_node, default)
except TypeError:
argument_node = Placeholder(partial(argument_cst, name), name)
self.graph.add(argument_node)
self.named_variables[name] = argument_node
return None
# ----------------------------------------------------------------
# PARSE COMMENTS
# ----------------------------------------------------------------
def visit_SimpleStatementLine(self, node: cst.SimpleStatementLine) -> None:
"""Read comments.
LibCST includes each assignment statement in a statement line, to which
comments are attached.
We include an experimental feature | |
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResNet50(nn.Module):
def __init__(self, num_classes = 1000):
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3, bias = False) # 112x112x64, 1층
self.bn = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1) #56x56x64
self.conv_001 = nn.Conv2d(64, 64, kernel_size = 1, stride = 1, bias = False) #56x56x64, 2층
self.bn_001 = nn.BatchNorm2d(64)
self.conv_002 = nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1, bias = False) #56x56x64, 3층
self.bn_002 = nn.BatchNorm2d(64)
self.conv_003 = nn.Conv2d(64, 256, kernel_size = 1, stride = 1,bias = False) #56x56x256, 4층
self.bn_003 = nn.BatchNorm2d(256)
# shortcut 위치 (입력 64, 출력 256)
self.conv_short0 = nn.Conv2d(64, 256, kernel_size = 1, stride = 1, bias = False)
self.bn_short0 = nn.BatchNorm2d(256)
self.conv_011 = nn.Conv2d(256, 64, kernel_size = 1, stride = 1, bias = False) #56x56x256, 5층
self.bn_011 = nn.BatchNorm2d(64)
self.conv_012 = nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1, bias = False) #56x56x64, 6층
self.bn_012 = nn.BatchNorm2d(64)
self.conv_013 = nn.Conv2d(64, 256, kernel_size = 1, stride = 1,bias = False) #56x56x256, 7층
self.bn_013 = nn.BatchNorm2d(256)
# (입력 256, 출력 256)
self.conv_021 = nn.Conv2d(256, 64, kernel_size = 1, stride = 1, bias = False) #56x56x256, 8층
self.bn_021 = nn.BatchNorm2d(64)
self.conv_022 = nn.Conv2d(64, 64, kernel_size = 3, stride = 1, padding = 1, bias = False) #56x56x64, 9층
self.bn_022 = nn.BatchNorm2d(64)
self.conv_023 = nn.Conv2d(64, 256, kernel_size = 1, stride = 1,bias = False) #56x56x256, 10층
self.bn_023 = nn.BatchNorm2d(256)
# (입력 256, 출력 256)
self.conv_031 = nn.Conv2d(256, 128, kernel_size = 1, stride = 1, bias = False) #56x56x128, 11층
self.bn_031 = nn.BatchNorm2d(128)
self.conv_032 = nn.Conv2d(128, 128, kernel_size = 3, stride = 2, padding = 1, bias = False) #28x28x128, 12층
self.bn_032 = nn.BatchNorm2d(128)
self.conv_033 = nn.Conv2d(128, 512, kernel_size = 1, stride = 1,bias = False) #28x28x512, 13층
self.bn_033 = nn.BatchNorm2d(512)
# shortcut 위치 (입력 256, 출력 512) (stride == 2)
self.conv_short3 = nn.Conv2d(256, 512, kernel_size = 1, stride = 2, bias = False)
self.bn_short3 = nn.BatchNorm2d(512)
self.conv_041 = nn.Conv2d(512, 128, kernel_size = 1, stride = 1, bias = False) #28x28x128, 14층
self.bn_041 = nn.BatchNorm2d(128)
self.conv_042 = nn.Conv2d(128, 128, kernel_size = 3, stride = 1, padding = 1, bias = False) #28x28x128, 15층
self.bn_042 = nn.BatchNorm2d(128)
self.conv_043 = nn.Conv2d(128, 512, kernel_size = 1, stride = 1,bias = False) #28x28x512, 16층
self.bn_043 = nn.BatchNorm2d(512)
# (입력 512, 출력 512)
self.conv_051 = nn.Conv2d(512, 128, kernel_size = 1, stride = 1, bias = False) #28x28x128, 17층
self.bn_051 = nn.BatchNorm2d(128)
self.conv_052 = nn.Conv2d(128, 128, kernel_size = 3, stride = 1, padding = 1, bias = False) #28x28x128, 18층
self.bn_052 = nn.BatchNorm2d(128)
self.conv_053 = nn.Conv2d(128, 512, kernel_size = 1, stride = 1,bias = False) #28x28x512, 19층
self.bn_053 = nn.BatchNorm2d(512)
# (입력 512, 출력 512)
self.conv_061 = nn.Conv2d(512, 128, kernel_size = 1, stride = 1, bias = False) #28x28x128, 20층
self.bn_061 = nn.BatchNorm2d(128)
self.conv_062 = nn.Conv2d(128, 128, kernel_size = 3, stride = 1, padding = 1, bias = False) #28x28x128, 21층
self.bn_062 = nn.BatchNorm2d(128)
self.conv_063 = nn.Conv2d(128, 512, kernel_size = 1, stride = 1,bias = False) #28x28x512, 22층
self.bn_063 = nn.BatchNorm2d(512)
# (입력 512, 출력 512)
self.conv_071 = nn.Conv2d(512, 256, kernel_size = 1, stride = 1, bias = False) #28x28x256, 23층
self.bn_071 = nn.BatchNorm2d(256)
self.conv_072 = nn.Conv2d(256, 256, kernel_size = 3, stride = 2, padding = 1, bias = False) #14x14x256, 24층
self.bn_072 = nn.BatchNorm2d(256)
self.conv_073 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 25층
self.bn_073 = nn.BatchNorm2d(1024)
# shortcut 위치 (입력 512, 출력 1024) (stride == 2)
self.conv_short7 = nn.Conv2d(512, 1024, kernel_size = 1, stride = 2, bias = False)
self.bn_short7 = nn.BatchNorm2d(1024)
self.conv_081 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, bias = False) #14x14x256, 26층
self.bn_081 = nn.BatchNorm2d(256)
self.conv_082 = nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1, bias = False) #14x14x256, 27층
self.bn_082 = nn.BatchNorm2d(256)
self.conv_083 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 28층
self.bn_083 = nn.BatchNorm2d(1024)
# (입력 1024, 출력 1024)
self.conv_091 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, bias = False) #14x14x256, 29층
self.bn_091 = nn.BatchNorm2d(256)
self.conv_092 = nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1, bias = False) #14x14x256, 30층
self.bn_092 = nn.BatchNorm2d(256)
self.conv_093 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 31층
self.bn_093 = nn.BatchNorm2d(1024)
# (입력 1024, 출력 1024)
self.conv_101 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, bias = False) #14x14x256, 32층
self.bn_101 = nn.BatchNorm2d(256)
self.conv_102 = nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1, bias = False) #14x14x256, 33층
self.bn_102 = nn.BatchNorm2d(256)
self.conv_103 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 34층
self.bn_103 = nn.BatchNorm2d(1024)
# (입력 1024, 출력 1024)
self.conv_111 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, bias = False) #14x14x256, 35층
self.bn_111 = nn.BatchNorm2d(256)
self.conv_112 = nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1, bias = False) #14x14x256, 36층
self.bn_112 = nn.BatchNorm2d(256)
self.conv_113 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 37층
self.bn_113 = nn.BatchNorm2d(1024)
# (입력 1024, 출력 1024)
self.conv_121 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, bias = False) #14x14x256, 38층
self.bn_121 = nn.BatchNorm2d(256)
self.conv_122 = nn.Conv2d(256, 256, kernel_size = 3, stride = 1, padding = 1, bias = False) #14x14x256, 39층
self.bn_122 = nn.BatchNorm2d(256)
self.conv_123 = nn.Conv2d(256, 1024, kernel_size = 1, stride = 1,bias = False) #14x14x1024, 40층
self.bn_123 = nn.BatchNorm2d(1024)
# (입력 1024, 출력 1024)
self.conv_131 = nn.Conv2d(1024, 512, kernel_size = 1, stride = 1, bias = False) #14x14x512, 41층
self.bn_131 = nn.BatchNorm2d(512)
self.conv_132 = nn.Conv2d(512, 512, kernel_size = 3, stride = 2, padding = 1, bias = False) #7x7x512, 42층
self.bn_132 = nn.BatchNorm2d(512)
self.conv_133 = nn.Conv2d(512, 2048, kernel_size = 1, stride = 1,bias = False) #7x7x2048, 43층
self.bn_133 = nn.BatchNorm2d(2048)
# shortcut 위치 (입력 1024, 출력 2048) (stride == 2)
self.conv_short13 = nn.Conv2d(1024, 2048, kernel_size = 1, stride = 2, bias = False)
self.bn_short13 = nn.BatchNorm2d(2048)
self.conv_141 = nn.Conv2d(2048, 512, kernel_size = 1, stride = 1, bias = False) #7x7x512, 44층
self.bn_141 = nn.BatchNorm2d(512)
self.conv_142 = nn.Conv2d(512, 512, kernel_size = 3, stride = 1, padding = 1, bias = False) #7x7x512, 45층
self.bn_142 = nn.BatchNorm2d(512)
self.conv_143 = nn.Conv2d(512, 2048, kernel_size = 1, stride = 1,bias = False) #7x7x2048, 46층
self.bn_143 = nn.BatchNorm2d(2048)
# (입력 2048, 출력 2048)
self.conv_151 = nn.Conv2d(2048, 512, kernel_size = 1, stride = 1, bias = False) #7x7x512, 47층
self.bn_151 = nn.BatchNorm2d(512)
self.conv_152 = nn.Conv2d(512, 512, kernel_size = 3, stride = 1, padding = 1, bias = False) #7x7x512, 48층
self.bn_152 = nn.BatchNorm2d(512)
self.conv_153 = nn.Conv2d(512, 2048, kernel_size = 1, stride = 1,bias = False) #7x7x2048, 49층
self.bn_153 = nn.BatchNorm2d(2048)
# (입력 2048, 출력 2048)
self.linear = nn.Linear(2048, num_classes)# FC, 50층
def forward(self, x):
print("[input]:",x.shape)
#224
out = self.conv1(x)
out = self.bn(out)
print("[7x7 conv]:",out.shape)
out = F.relu(out)
#112
out = self.maxpool(out)
print("[max pooled] :",out.shape)
x = out
#layer0 - 1
out = self.conv_001(out) #1
out = self.bn_001(out)
out = F.relu(out)
out = self.conv_002(out) #2
out = self.bn_002(out)
out = F.relu(out)
out = self.conv_003(out) #3
out = self.bn_003(out)
shortcut =self.conv_short0(x)
shortcut = self.bn_short0(shortcut)
out = out + shortcut
out = F.relu(out)
#layer0 - 2
out = self.conv_011(out) #1
out = self.bn_011(out)
out = F.relu(out)
out = self.conv_012(out) #2
out = self.bn_012(out)
out = F.relu(out)
out = self.conv_013(out) #3
out = self.bn_013(out)
#layer0 - 3
out = self.conv_021(out) #1
out = self.bn_021(out)
out = F.relu(out)
out = self.conv_022(out) #2
out = self.bn_022(out)
out = F.relu(out)
out = self.conv_023(out) #3
out = self.bn_023(out)
print("[layer 0]:",out.shape)
#layer1 - 1
out = self.conv_031(out) #1
out = self.bn_031(out)
out = F.relu(out)
out = self.conv_032(out) #2
out = self.bn_032(out)
out = F.relu(out)
out = | |
<filename>Misc/functionalList.py
#=============================================
#=============================================
# A Functional-Paradigm List Implementation
# in Python 3
#
# <NAME>
#=============================================
#=============================================
"""
The following code implements the idea of a list
as would be experienced in a functional or logical
language such as Haskel or Prolog. Three List
classes inhert from the 'AbstractList' superclass
simply to avoid code duplication.
Framework:
AbstractList
|
|
|----------------|----------------|
| | |
EmptyList ConsList InfiniteList
An interface is enforced automatically towards the
bottom of this document. When interpreted, this
script imports runs `doctest` (also towards the
bottom), to demonstrate and exercise the dynamic
dispatch of shared functionality.
Index:
-- AbstractList --
> __repr__(self)
> isEmpty(self)
> __str__(self)
> takeWhile(self, predicate)
> do(self, function)
> detect(self, predicate)
> map(self, transformer)
> filter(self, predicate)
-- EmptyList --
> length(self)
> isEmpty(self)
> head(self)
> tail(self)
> concat(self, other)
> __add__(self, other)
> strChain(self)
> commaElements(self)
> takeWhile(self, predicate)
> dropWhile(self, predicate)
> do(self, function)
> detect(self, predicate)
-- ConsList --
> __init__(self, self, head, tail)
> length(self)
> head(self)
> tail(self)
> next(self)
> concat(self, other)
> __add__(self, other)
> strChain(self)
> commaElements(self)
> dropWhile(self, predicate)
-- InfiniteList --
> __init__(self, initial, nextFun)
> head(self)
> next(self)
> tail(self)
> length(self)
> concat(self, other)
> __add__(self, other)
> commaElements(self, ctr)
> __str__(self)
> dropWhile(self, predicate)
"""
#***********************
#***********************
# AbstractList
#***********************
#***********************
class AbstractList:
#~
def __repr__(self):
return self.__str__()
#~
def isEmpty(self):
return False
#~
def __str__(self):
"""
>>> str(EmptyList())
'[]'
"""
return '[' + self.commaElements() + ']'
#~
def takeWhile(self, predicate):
"""
returns a list containing the elements of self as far as
(but excluding) the first element for which predicate is
false; predicate is a function on the elements of self
that returns a Boolean.
>>> oneToFifty = nums(1, 51)
>>> oneToTwenty = oneToFifty.takeWhile(lambda x: x <= 20)
>>> oneToTwenty.length()
20
>>> oneToTwenty.head()
1
>>> oneToTwenty.tail().tail().head()
3
"""
return ConsList(self.head(), self.tail().takeWhile(predicate)) if predicate(self.head()) else EmptyList()
#~
def do(self, function):
"""
Applies function to every element of this list
Returns no result.
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).do(lambda each: print(each))
1
2
3
"""
function(self.head())
(self.tail()).do(function)
#~
def detect(self, predicate):
"""
Returns the first element of this list for which
predicate is true. If there is no such element,
raises the exception IndexError("No such object")
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).detect(lambda each: each > 2)
3
>>> (nums(1, 25)).detect(lambda each: each > 20)
21
"""
return self.head() if predicate(self.head()) else (self.tail()).detect(predicate)
#~
def __iter__(self):
"""
>>> for i in ConsList(1, ConsList(2, ConsList(3, ConsList(4, EmptyList())))): print(i)
1
2
3
4
>>> for i in (InfiniteList(0, lambda n: n + 1)).takeWhile(lambda m: m < 5): print(i)
0
1
2
3
4
"""
return ListIterator(self)
#~
def map(self, transformer):
"""
Returns a new list, the same length as self, whose
elements are obtained by applying the function transformer
to the elements of self.
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).map(lambda x: x * 2)
[2, 4, 6]
>>> EmptyList().map(lambda x: x * 2)
[]
"""
if self.length() == 0:
return EmptyList()
res = EmptyList()
for i in self:
res = res.concat(ConsList(transformer(i), EmptyList()))
return res
#~
def filter(self, predicate):
"""
returns a new list, no longer than self, that contains just
those elements of self for which the function predicate is true.
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).filter(lambda x: x%2 == 0)
[2]
"""
if self.length() == 0:
return EmptyList()
res = EmptyList()
for i in self:
if predicate(i):
res = res.concat(ConsList(i, EmptyList()))
return res
#***********************
# Empty List
#***********************
class EmptyList(AbstractList):
""" Represents an empty list. """
#~
def length(self):
"""
>>> EmptyList().length()
0
"""
return 0
#~
def isEmpty(self):
"""
>>> EmptyList().isEmpty()
True
"""
return True
#~
def head(self):
raise ValueError("Can't take `head` of the empty list")
#~
def tail(self):
raise ValueError("Can't take `tail` of the empty list")
#~
def concat(self, other):
"""
>>> EmptyList().concat(ConsList(1, ConsList(2, ConsList (3, EmptyList()))))
[1, 2, 3]
"""
return other
#~
def __add__(self, other):
"""
>>> EmptyList() + ConsList(1, ConsList(2, ConsList(3, EmptyList())))
[1, 2, 3]
>>> (EmptyList() + ConsList(8, ConsList(4, EmptyList()))).length()
2
"""
return other
#~
def strChain(self):
""" Utility function for commaElements(). """
return ''
#~
def commaElements(self):
""" Utility function for __str__(). """
return ''
#~
def takeWhile(self, predicate):
"""
>>> EmptyList().takeWhile(lambda n: n < 3)
[]
"""
return EmptyList()
#~
def dropWhile(self, predicate):
"""
>>> EmptyList().dropWhile(lambda n: n < 3)
[]
"""
return EmptyList()
#~
def do(self, function):
return
#~
def detect(self, predicate):
raise IndexError("No such object")
#***********************
# Cons List
#***********************
class ConsList(AbstractList):
""" Represents a non-empty list. """
#~
def __init__(self, head, tail):
self.__head = head
self.__tail = tail
#~
def length(self):
"""
>>> la = ConsList('a', EmptyList())
>>> la.length()
1
>>> ConsList(1, ConsList(2, EmptyList())).length()
2
"""
return 1 + self.tail().length()
#~
def head(self):
"""
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).head()
1
>>> (ConsList(90, ConsList(80, ConsList(70, EmptyList()))).tail()).head()
80
"""
return self.__head
#~
def tail(self):
"""
>>> ConsList(1, ConsList(2, ConsList(3, EmptyList()))).tail()
[2, 3]
"""
return self.__tail
#~
def next(self):
"""
>>> ConsList(90, ConsList(80, EmptyList())).next()
80
>>> (ConsList(90, ConsList(80, ConsList(70, EmptyList()))).tail()).next()
70
"""
return (self.tail()).head()
#~
def concat(self, other):
"""
>>> ConsList(3, ConsList(2, EmptyList())).concat(ConsList(1, ConsList(2, ConsList (3, EmptyList()))))
[3, 2, 1, 2, 3]
"""
return ConsList(self.head(), (self.tail()).concat(other))
#~
def __add__(self, other):
"""
>>> ConsList(3, ConsList(2, ConsList(1, EmptyList()))) + ConsList(2, ConsList(3, EmptyList()))
[3, 2, 1, 2, 3]
>>> (ConsList(16, EmptyList()) + ConsList(8, ConsList(4, EmptyList()))).length()
3
"""
return self.concat(other)
#~
def strChain(self):
return ', ' + str(self.head()) + (self.tail()).strChain()
#~
def commaElements(self):
return str(self.head()) + (self.tail()).strChain()
#~
def dropWhile(self, predicate):
"""
returns a list like self except that all of the leading
elements for which predicate is true have been removed.
Thus, predicate will be false on the first element of the
result; predicate is a function on the elements of self
that returns a Boolean.
>>> oneToFifty = nums(1, 51)
>>> twentyOneOn = oneToFifty.dropWhile(lambda x: x <= 20)
>>> twentyOneOn.length()
30
>>> twentyOneOn.head()
21
>>> twentyOneOn.tail().tail().head()
23
"""
return self.tail().dropWhile(predicate) if predicate(self.head()) else ConsList(self.head(), self.tail())
#***********************
# Infinite List
#***********************
class InfiniteList(AbstractList):
'''
Represents an infinite list, defined by an initial value, and a
function that generates the next value. So, for example, the Natural
numbers would be represented by the initial value 0, and the function
λn. n + 1.
'''
#~
def __init__(self, initial, nextFun):
self.__initial = initial
self.__nextFun = nextFun
#~
def head(self):
"""
>>> InfiniteList(0, lambda n: n + 1).head()
0
"""
return self.__initial
#~
def next(self):
return self.__nextFun(self.__initial)
#~
def tail(self):
"""
>>> InfiniteList(0, lambda n: n + 1).tail().head()
1
>>> InfiniteList(0, lambda n: n + 1).tail().tail().head()
2
>>> InfiniteList(0, lambda n: n + 1).tail().tail().tail().head()
3
"""
return InfiniteList(self.next(), self.__nextFun)
#~
def length(self):
"""
>>> InfiniteList(0, lambda n: n + 1).length()
inf
"""
return float('inf')
#~
def concat(self, other):
return self
#~
def __add__(self, other):
return self
#~
def commaElements(self, ctr):
return str(self.head()) + ", " + (self.tail()).commaElements(ctr+1) if (ctr < 7) else "..."
#~
def __str__(self):
"""
>>> InfiniteList(0, lambda n: n + 1)
[0, 1, 2, 3, 4, 5, 6, ...]
"""
return '[' + self.commaElements(0) + ']'
#~
def dropWhile(self, predicate):
"""
returns a list like self except that all of the leading
elements for which predicate is true have been removed.
Thus, predicate will be false on the first element of the
result; predicate is a function on the elements of self
that returns a Boolean.
>>> oneToFifty = InfiniteList(1, lambda n: n + 1)
>>> twentyOneOn = oneToFifty.dropWhile(lambda x: x <= 20)
>>> twentyOneOn.head()
21
>>> twentyOneOn.tail().tail().head()
23
"""
return self.tail().dropWhile(predicate) if predicate(self.head()) else InfiniteList(self.head(), self.__nextFun)
#***********************
# Iterator Engine
#***********************
""" allows for python list iterators such as `for i in iterable` """
class ListIterator:
def __init__(self, aList):
self.remainderOfList = aList
def __next__(self):
if self.remainderOfList.isEmpty():
raise StopIteration
result = self.remainderOfList.head()
self.remainderOfList = self.remainderOfList.tail()
return result
#=============================================
#=============================================
# Testing Engine
#=============================================
#=============================================
#***********************
# Interface Enforcement
#***********************
"""
>>> set(dir(EmptyList)).issuperset(listInterface)
True
"""
listInterface = {'head', 'tail', 'next', 'isEmpty', 'takeWhile',\
'dropWhile', 'length', 'do', 'detect', 'concat',\
'__add__', 'commaElements'}
def classImplements(c, ms):
"""
c is a class, and ms is a set of method names.
Returns True if c implements all the methods in c.
Complains otherwise, and returns False
"""
result = True
for n in ms:
m = getattr(c, n, False)
if not (m and callable(m)):
print(c, "does not have method", n)
result = False
return result
#***********************
# Generator Functions
#***********************
#~
def nums(lo, hi):
"""
returns a list containing the | |
<gh_stars>10-100
#!/usr/bin/env python
#------------------------------
def usage() : return "Use command: python hexanode/examples/ex-10-sort-data.py"
#------------------------------
import os
import sys
import hexanode
import numpy as np
from time import time
from math import sqrt
from pyimgalgos.GlobalUtils import print_ndarr
from expmon.HexDataIO import HexDataIO, do_print
#from pyimgalgos.HBins import HBins
OSQRT3 = 1./sqrt(3.)
#------------------------------
def py_sort(**kwargs) :
SRCCHS = kwargs.get('srcchs', {'AmoETOF.0:Acqiris.0':(6,7,8,9,10,11),'AmoITOF.0:Acqiris.0':(0,)})
DSNAME = kwargs.get('dsname', 'exp=xpptut15:run=390:smd') # or h5 file: 'xpptut15-r0390-e300000-n32-mpi.h5'
EVSKIP = kwargs.get('evskip', 0)
EVENTS = kwargs.get('events', 100) + EVSKIP
NUM_CHANNELS = kwargs.get('numchs', 7)
NUM_HITS = kwargs.get('numhits', 16)
CALIBTAB = kwargs.get('calibtab', 'calibration_table_data.txt')
VERBOSE = kwargs.get('verbose', False)
tdc_ns = np.zeros((NUM_CHANNELS, NUM_HITS), dtype=np.float64)
number_of_hits = np.zeros((NUM_CHANNELS,), dtype=np.int32)
command = -1;
# The "command"-value is set in the first line of configuration file "sorter_data_cfg.txt"
# 1 = sort and write new file
# 2 = calibrate fv, fw, w_offset
# 3 = create calibration table files
# create the sorter object:
sorter = hexanode.py_sort_class()
fname_cfg = "sorter_data_cfg.txt"
status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=\
hexanode.py_read_config_file(fname_cfg, sorter)
print 'read_config_file status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=',\
status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y
if not status :
print "WARNING: can't read config file %s" % fname_cfg
del sorter
sys.exit(0)
print 'use_sum_correction', sorter.use_sum_correction
print 'use_pos_correction', sorter.use_pos_correction
if sorter is not None :
if sorter.use_sum_correction or sorter.use_pos_correction :
status = hexanode.py_read_calibration_tables(CALIBTAB, sorter)
if command == -1 :
print "no config file was read. Nothing to do."
if sorter is not None : del sorter
sys.exit(0)
Cu1 = sorter.cu1
Cu2 = sorter.cu2
Cv1 = sorter.cv1
Cv2 = sorter.cv2
Cw1 = sorter.cw1
Cw2 = sorter.cw2
Cmcp = sorter.cmcp
print "Numeration of channels - u1:%i u2:%i v1:%i v2:%i w1:%i w2:%i mcp:%i"%\
(Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp)
inds_of_channels = (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2)
incr_of_consistence = ( 1, 2, 4, 8, 16, 32)
inds_incr = zip(inds_of_channels, incr_of_consistence)
DIO = HexDataIO(srcchs=SRCCHS, numchs=NUM_CHANNELS, numhits=NUM_HITS)
#=====================
if '.h5' in DSNAME : DIO.open_input_h5file(DSNAME)
else :
DIO.open_input_dataset(DSNAME, pbits=0)
DIO.set_wf_hit_finder_parameters(**kwargs)
DIO.print_wf_hit_finder_parameters()
#=====================
print 'DIO experiment : %s' % DIO.experiment()
print 'DIO run : %s' % DIO.run()
print 'DIO start time : %s' % DIO.start_time()
print 'DIO stop time : %s' % DIO.stop_time()
print 'DIO tdc_resolution : %.3f' % DIO.tdc_resolution()
print "init sorter... "
#sorter.set_tdc_resolution_ns(0.025)
sorter.set_tdc_resolution_ns(DIO.tdc_resolution())
sorter.set_tdc_array_row_length(NUM_HITS)
sorter.set_count(number_of_hits)
sorter.set_tdc_pointer(tdc_ns)
#sorter.set_use_reflection_filter_on_u1(False) # Achim recommended False
#sorter.set_use_reflection_filter_on_u2(False)
if command >= 2 :
sorter.create_scalefactors_calibrator(True,\
sorter.runtime_u,\
sorter.runtime_v,\
sorter.runtime_w, 0.78,\
sorter.fu, sorter.fv, sorter.fw)
error_code = sorter.init_after_setting_parameters()
if error_code :
print "sorter could not be initialized\n"
error_text = sorter.get_error_text(error_code, 512)
print 'Error %d: %s' % (error_code, error_text)
sys.exit(0)
print "Calibration factors:\n f_U (mm/ns) =%f\n f_V (mm/ns) =%f\n f_W (mm/ns) =%f\n Offset on layer W (ns) =%f\n"%\
(2*sorter.fu, 2*sorter.fv, 2*sorter.fw, w_offset)
print "ok for sorter initialization\n"
print "reading event data... \n"
evnum = 0
t_sec = time()
t1_sec = time()
while DIO.read_next_event() :
evnum = DIO.event_number()
if evnum < EVSKIP : continue
if evnum > EVENTS : break
if do_print(evnum) :
t1 = time()
print 'Event: %06d, dt(sec): %.3f' % (evnum, t1 - t1_sec)
t1_sec = t1
#==================================
# TODO by end user:
# Here you must read in a data block from your data file
# and fill the array tdc_ns[][] and number_of_hits[]
#nhits = np.zeros((NUMBER_OF_CHANNELS,), dtype=np.int32)
DIO.get_number_of_hits_array(number_of_hits)
if DIO.error_flag() :
error_text = DIO.get_error_text(DIO.error_flag())
print "DIO Error %d: %s" % (DIO.error_flag(), error_text)
sys.exit(0)
if VERBOSE : print ' number_of_hits_array', number_of_hits[:8]
DIO.get_tdc_data_array(tdc_ns)
if DIO.error_flag() :
error_text = DIO.get_error_text(DIO.error_flag())
print "DIO Error %d: %s" % (DIO.error_flag(), error_text)
sys.exit(0)
if VERBOSE : print ' TDC data:\n', tdc_ns[0:8,0:5]
# apply conversion of times to ns
if False : # DIO returns tdc_ns already in [ns]
tdc_ns *= DIO.tdc_resolution()
#==================================
# NHITS - number of hits per channel
if True :
nhits_u1 = number_of_hits[Cu1]
nhits_u2 = number_of_hits[Cu2]
nhits_v1 = number_of_hits[Cv1]
nhits_v2 = number_of_hits[Cv2]
nhits_w1 = number_of_hits[Cw1]
nhits_w2 = number_of_hits[Cw2]
nhits_mcp= number_of_hits[Cmcp]
# TIME_CH - time of the 1-st hit
if True :
t0_u1 = tdc_ns[Cu1,0]
t0_u2 = tdc_ns[Cu2,0]
t0_v1 = tdc_ns[Cv1,0]
t0_v2 = tdc_ns[Cv2,0]
t0_w1 = tdc_ns[Cw1,0]
t0_w2 = tdc_ns[Cw2,0]
t0_mcp= tdc_ns[Cmcp,0]
# REFLECTIONS
if True :
if number_of_hits[Cu2]>1 : refl_u1= tdc_ns[Cu2,1] - tdc_ns[Cu1,0]
if number_of_hits[Cu1]>1 : refl_u2= tdc_ns[Cu1,1] - tdc_ns[Cu2,0]
if number_of_hits[Cv2]>1 : refl_v1= tdc_ns[Cv2,1] - tdc_ns[Cv1,0]
if number_of_hits[Cv1]>1 : refl_v2= tdc_ns[Cv1,1] - tdc_ns[Cv2,0]
if number_of_hits[Cw2]>1 : refl_w1= tdc_ns[Cw2,1] - tdc_ns[Cw1,0]
if number_of_hits[Cw1]>1 : refl_w2= tdc_ns[Cw1,1] - tdc_ns[Cw2,0]
# TIME_SUMS
time_sum_u = tdc_ns[Cu1,0] + tdc_ns[Cu2,0] - 2*tdc_ns[Cmcp,0]
time_sum_v = tdc_ns[Cv1,0] + tdc_ns[Cv2,0] - 2*tdc_ns[Cmcp,0]
time_sum_w = tdc_ns[Cw1,0] + tdc_ns[Cw2,0] - 2*tdc_ns[Cmcp,0]
# UVW
u_ns = tdc_ns[Cu1,0] - tdc_ns[Cu2,0]
v_ns = tdc_ns[Cv1,0] - tdc_ns[Cv2,0]
w_ns = tdc_ns[Cw1,0] - tdc_ns[Cw2,0]
u = u_ns * sorter.fu
v = v_ns * sorter.fv
w = (w_ns + w_offset) * sorter.fw
Xuv = u
Xuw = u
Xvw = v + w
Yuv = (u - 2*v)*OSQRT3
Yuw = (2*w - u)*OSQRT3
Yvw = (w - v)*OSQRT3
dX = Xuv - Xvw
dY = Yuv - Yvw
Deviation = sqrt(dX*dX + dY*dY)
if sorter.use_hex :
# shift the time sums to zero:
sorter.shift_sums(+1, offset_sum_u, offset_sum_v, offset_sum_w)
#shift layer w so that the middle lines of all layers intersect in one point:
sorter.shift_layer_w(+1, w_offset)
else :
# shift the time sums to zero:
sorter.shift_sums(+1, offset_sum_u, offset_sum_v)
# shift all signals from the anode so that the center of the detector is at x=y=0:
sorter.shift_position_origin(+1, pos_offset_x, pos_offset_y)
sorter.feed_calibration_data(True, w_offset) # for calibration of fv, fw, w_offset and correction tables
#DIO.get_tdc_data_array(tdc_ns)
time_sum_u_corr = tdc_ns[Cu1,0] + tdc_ns[Cu2,0] - 2*tdc_ns[Cmcp,0]
time_sum_v_corr = tdc_ns[Cv1,0] + tdc_ns[Cv2,0] - 2*tdc_ns[Cmcp,0]
time_sum_w_corr = tdc_ns[Cw1,0] + tdc_ns[Cw2,0] - 2*tdc_ns[Cmcp,0]
#print 'map_is_full_enough', hexanode.py_sorter_scalefactors_calibration_map_is_full_enough(sorter)
sfco = hexanode.py_scalefactors_calibration_class(sorter)
# break loop if statistics is enough
if sfco :
if sfco.map_is_full_enough() :
print 'sfo.map_is_full_enough(): %s event number: %06d' % (sfco.map_is_full_enough(), evnum)
break
# XY_RESOLUTION :
if True :
#print " binx: %d biny: %d resolution(FWHM): %.6f" % (sfco.binx, sfco.biny, sfco.detector_map_resol_FWHM_fill)
if sfco.binx>=0 and sfco.biny>=0 :
binx= sfco.binx
biny= sfco.biny
resol_fwhm= sfco.detector_map_resol_FWHM_fill
# Sort the TDC-Data and reconstruct missing signals and apply the sum- and NL-correction.
# number_of_particles is the number of reconstructed particles
#========================================================
number_of_particles = sorter.sort() if command == 1 else\
sorter.run_without_sorting()
#========================================================
if True :
print " Event %5i number_of_particles: %i" % (evnum, number_of_particles)
for i in range(number_of_particles) :
hco= hexanode.py_hit_class(sorter, i)
print " p:%1i x:%.3f y:%.3f t:%.3f met:%d" % (i, hco.x, hco.y, hco.time, hco.method)
print " part1 u:%.3f v:%.3f w:%.3f" % (u, v, w)
#-------------------------
# TODO by the end user..."
if number_of_particles<1 : continue
hco= hexanode.py_hit_class(sorter, 0)
# MISC
if False :
# fill Consistence Indicator
consistenceIndicator = 0
for (ind, incr) in inds_incr :
if number_of_hits[ind]>0 : consistenceIndicator += incr
consist_indicator = consistenceIndicator
rec_method = hco.method
#print 'reconstruction method %d' % hco.method
# XY_2D :
if False :
# fill 2-d images
x1, y1 = hco.x, hco.y
x2, y2 = (-10,-10)
if number_of_particles > 1 :
hco2 = hexanode.py_hit_class(sorter, 1)
x2, y2 = hco2.x, hco2.y
ix1, ix2, ixuv, ixuw, ixvw = img_x_bins.bin_indexes((x1, x2, Xuv, Xuw, Xvw))
iy1, iy2, iyuv, iyuw, iyvw = img_y_bins.bin_indexes((y1, y2, Yuv, Yuw, Yvw))
img_xy_1 [iy1, ix1] += 1
img_xy_2 [iy2, ix2] += 1
img_xy_uv[iyuv, ixuv] += 1
img_xy_uw[iyuw, ixuw] += 1
img_xy_vw[iyvw, ixvw] += 1
# PHYSICS :
if False :
if number_of_hits[Cmcp]>1 :
t0, t1 = tdc_ns[Cmcp,:2]
it0, it1 = t_ns_bins.bin_indexes((t0, t1))
t1_vs_t0[it1, it0] += 1
ix, iy = x_mm_bins.bin_indexes((Xuv,Yuv))
#iy = y_mm_bins.bin_indexes((Yuv,))
x_vs_t0[ix, it0] += 1
y_vs_t0[iy, it0] += 1
# // write the results into a new data file.
# // the variable "number_of_particles" contains the number of reconstructed particles.
# // the x and y (in mm) and TOF (in ns) is stored in the array sorter->output_hit_array:
# // for the i-th particle (i starts from 0):
# // hco= hexanode.py_hit_class(sorter, i)
# // hco.x, hco.y, hco.time
# // for each particle you can also retrieve the information about how the particle
# // was reconstructed (tog et some measure of the confidence):
# // hco.method
# end of the event loop
if | |
find CLR method
"""
IsRequiredForFormCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject), if it's set; otherwise false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
"""
SetFocusCore(self: ComboBoxAutomationPeer)
Sets the keyboard input focus on the System.Windows.Controls.ComboBox control
that is associated with this
System.Windows.Automation.Peers.ComboBoxAutomationPeer object. This method is
called by System.Windows.Automation.Peers.AutomationPeer.SetFocus.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: ComboBox) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
IsVirtualized = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class ContentElementAutomationPeer(AutomationPeer):
"""
Exposes System.Windows.ContentElement types to UI Automation.
ContentElementAutomationPeer(owner: ContentElement)
"""
@staticmethod
def CreatePeerForElement(element):
"""
CreatePeerForElement(element: ContentElement) -> AutomationPeer
Creates a System.Windows.Automation.Peers.ContentElementAutomationPeer for the
specified System.Windows.ContentElement.
element: The System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer.
Returns: The System.Windows.Automation.Peers.ContentElementAutomationPeer for the
specified System.Windows.ContentElement.
"""
pass
@staticmethod
def FromElement(element):
"""
FromElement(element: ContentElement) -> AutomationPeer
Gets the System.Windows.Automation.Peers.ContentElementAutomationPeer for the
specified System.Windows.ContentElement.
element: The System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer.
Returns: The System.Windows.Automation.Peers.ContentElementAutomationPeer for the
specified System.Windows.ContentElement, or null if the
System.Windows.Automation.Peers.ContentElementAutomationPeer has not been
created by the
System.Windows.Automation.Peers.ContentElementAutomationPeer.CreatePeerForElemen
t(System.Windows.ContentElement) method.
"""
pass
def GetPattern(self, patternInterface):
"""
GetPattern(self: ContentElementAutomationPeer, patternInterface: PatternInterface) -> object
Gets the control pattern for the System.Windows.ContentElement that is
associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer.
patternInterface: One of the enumeration values.
Returns: An object that implements the
System.Windows.Automation.Provider.ISynchronizedInputProvider interface if
patternInterface is
System.Windows.Automation.Peers.PatternInterface.SynchronizedInput; otherwise,
null.
"""
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: ContentElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
Owner = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the System.Windows.ContentElement that is associated with this System.Windows.Automation.Peers.ContentElementAutomationPeer.
Get: Owner(self: ContentElementAutomationPeer) -> ContentElement
"""
class FrameworkContentElementAutomationPeer(ContentElementAutomationPeer):
"""
Exposes System.Windows.FrameworkContentElement types to UI Automation.
FrameworkContentElementAutomationPeer(owner: FrameworkContentElement)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: FrameworkContentElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class ContentTextAutomationPeer(FrameworkContentElementAutomationPeer):
""" Represents a base class for exposing System.Windows.Automation.TextPattern types to UI Automation. """
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, owner: FrameworkContentElement) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class ContextMenuAutomationPeer(FrameworkElementAutomationPeer):
"""
Exposes System.Windows.Controls.ContextMenu types to UI Automation.
ContextMenuAutomationPeer(owner: ContextMenu)
"""
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: ContextMenu) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
class DataGridAutomationPeer(ItemsControlAutomationPeer, IItemContainerProvider, IGridProvider, ISelectionProvider, ITableProvider):
"""
Exposes System.Windows.Controls.DataGrid types to UI Automation.
DataGridAutomationPeer(owner: DataGrid)
"""
def CreateItemAutomationPeer(self, *args): #cannot find CLR method
""" CreateItemAutomationPeer(self: DataGridAutomationPeer, item: object) -> ItemAutomationPeer """
pass
def FindOrCreateItemAutomationPeer(self, *args): #cannot find CLR method
""" FindOrCreateItemAutomationPeer(self: ItemsControlAutomationPeer, item: object) -> ItemAutomationPeer """
pass
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: UIElementAutomationPeer) -> str
Gets the accelerator key for the System.Windows.UIElement that is associated
with this System.Windows.Automation.Peers.UIElementAutomationPeer. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: The System.Windows.Automation.AutomationProperties.AcceleratorKey that is
returned by
System.Windows.Automation.AutomationProperties.GetAcceleratorKey(System.Windows.
DependencyObject).
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
""" GetAutomationControlTypeCore(self: DataGridAutomationPeer) -> AutomationControlType """
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.FrameworkElement
that is associated with this
System.Windows.Automation.Peers.FrameworkElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The automation identifier for the element associated with the
System.Windows.Automation.Peers.FrameworkElementAutomationPeer, or
System.String.Empty if there isn't an automation identifier.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally, if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource, this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
""" GetChildrenCore(self: DataGridAutomationPeer) -> List[AutomationPeer] """
pass
def GetClassNameCore(self, *args): #cannot find CLR method
""" GetClassNameCore(self: DataGridAutomationPeer) -> str """
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: UIElementAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: The System.Windows.Point on the element that allows a click. The point values
are (System.Double.NaN, System.Double.NaN) if the element is not both a
System.Windows.Interop.HwndSource and a System.Windows.PresentationSource.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item | |
3)
self.points_table = FCTable()
self.points_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
# self.points_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
grid_lay.addWidget(self.points_table, 16, 0, 1, 3)
self.points_table.setColumnCount(4)
self.points_table.setHorizontalHeaderLabels(
[
'#',
_("Name"),
_("Target"),
_("Found Delta")
]
)
self.points_table.setRowCount(8)
row = 0
# BOTTOM LEFT
id_item_1 = QtWidgets.QTableWidgetItem('%d' % 1)
flags = QtCore.Qt.ItemIsEnabled
id_item_1.setFlags(flags)
self.points_table.setItem(row, 0, id_item_1) # Tool name/id
self.bottom_left_coordx_lbl = QtWidgets.QLabel('%s' % _('Bot Left X'))
self.points_table.setCellWidget(row, 1, self.bottom_left_coordx_lbl)
self.bottom_left_coordx_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.bottom_left_coordx_tgt)
self.bottom_left_coordx_tgt.setReadOnly(True)
self.bottom_left_coordx_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.bottom_left_coordx_found)
row += 1
self.bottom_left_coordy_lbl = QtWidgets.QLabel('%s' % _('Bot Left Y'))
self.points_table.setCellWidget(row, 1, self.bottom_left_coordy_lbl)
self.bottom_left_coordy_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.bottom_left_coordy_tgt)
self.bottom_left_coordy_tgt.setReadOnly(True)
self.bottom_left_coordy_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.bottom_left_coordy_found)
self.bottom_left_coordx_found.setDisabled(True)
self.bottom_left_coordy_found.setDisabled(True)
row += 1
# BOTTOM RIGHT
id_item_2 = QtWidgets.QTableWidgetItem('%d' % 2)
flags = QtCore.Qt.ItemIsEnabled
id_item_2.setFlags(flags)
self.points_table.setItem(row, 0, id_item_2) # Tool name/id
self.bottom_right_coordx_lbl = QtWidgets.QLabel('%s' % _('Bot Right X'))
self.points_table.setCellWidget(row, 1, self.bottom_right_coordx_lbl)
self.bottom_right_coordx_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.bottom_right_coordx_tgt)
self.bottom_right_coordx_tgt.setReadOnly(True)
self.bottom_right_coordx_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.bottom_right_coordx_found)
row += 1
self.bottom_right_coordy_lbl = QtWidgets.QLabel('%s' % _('Bot Right Y'))
self.points_table.setCellWidget(row, 1, self.bottom_right_coordy_lbl)
self.bottom_right_coordy_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.bottom_right_coordy_tgt)
self.bottom_right_coordy_tgt.setReadOnly(True)
self.bottom_right_coordy_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.bottom_right_coordy_found)
row += 1
# TOP LEFT
id_item_3 = QtWidgets.QTableWidgetItem('%d' % 3)
flags = QtCore.Qt.ItemIsEnabled
id_item_3.setFlags(flags)
self.points_table.setItem(row, 0, id_item_3) # Tool name/id
self.top_left_coordx_lbl = QtWidgets.QLabel('%s' % _('Top Left X'))
self.points_table.setCellWidget(row, 1, self.top_left_coordx_lbl)
self.top_left_coordx_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.top_left_coordx_tgt)
self.top_left_coordx_tgt.setReadOnly(True)
self.top_left_coordx_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.top_left_coordx_found)
row += 1
self.top_left_coordy_lbl = QtWidgets.QLabel('%s' % _('Top Left Y'))
self.points_table.setCellWidget(row, 1, self.top_left_coordy_lbl)
self.top_left_coordy_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.top_left_coordy_tgt)
self.top_left_coordy_tgt.setReadOnly(True)
self.top_left_coordy_found = EvalEntry()
self.points_table.setCellWidget(row, 3, self.top_left_coordy_found)
row += 1
# TOP RIGHT
id_item_4 = QtWidgets.QTableWidgetItem('%d' % 4)
flags = QtCore.Qt.ItemIsEnabled
id_item_4.setFlags(flags)
self.points_table.setItem(row, 0, id_item_4) # Tool name/id
self.top_right_coordx_lbl = QtWidgets.QLabel('%s' % _('Top Right X'))
self.points_table.setCellWidget(row, 1, self.top_right_coordx_lbl)
self.top_right_coordx_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.top_right_coordx_tgt)
self.top_right_coordx_tgt.setReadOnly(True)
self.top_right_coordx_found = EvalEntry()
self.top_right_coordx_found.setDisabled(True)
self.points_table.setCellWidget(row, 3, self.top_right_coordx_found)
row += 1
self.top_right_coordy_lbl = QtWidgets.QLabel('%s' % _('Top Right Y'))
self.points_table.setCellWidget(row, 1, self.top_right_coordy_lbl)
self.top_right_coordy_tgt = EvalEntry()
self.points_table.setCellWidget(row, 2, self.top_right_coordy_tgt)
self.top_right_coordy_tgt.setReadOnly(True)
self.top_right_coordy_found = EvalEntry()
self.top_right_coordy_found.setDisabled(True)
self.points_table.setCellWidget(row, 3, self.top_right_coordy_found)
vertical_header = self.points_table.verticalHeader()
vertical_header.hide()
self.points_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
horizontal_header = self.points_table.horizontalHeader()
horizontal_header.setMinimumSectionSize(10)
horizontal_header.setDefaultSectionSize(70)
self.points_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
# for x in range(4):
# self.points_table.resizeColumnToContents(x)
self.points_table.resizeColumnsToContents()
self.points_table.resizeRowsToContents()
horizontal_header.setSectionResizeMode(0, QtWidgets.QHeaderView.Fixed)
horizontal_header.resizeSection(0, 20)
horizontal_header.setSectionResizeMode(1, QtWidgets.QHeaderView.Fixed)
horizontal_header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
horizontal_header.setSectionResizeMode(3, QtWidgets.QHeaderView.Stretch)
self.points_table.setMinimumHeight(self.points_table.getHeight() + 2)
self.points_table.setMaximumHeight(self.points_table.getHeight() + 3)
# ## Get Points Button
self.start_button = QtWidgets.QPushButton(_("Get Points"))
self.start_button.setToolTip(
_("Pick four points by clicking on canvas if the source choice\n"
"is 'free' or inside the object geometry if the source is 'object'.\n"
"Those four points should be in the four squares of\n"
"the object.")
)
self.start_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.start_button, 17, 0, 1, 3)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid_lay.addWidget(separator_line, 18, 0, 1, 3)
grid_lay.addWidget(QtWidgets.QLabel(''), 19, 0)
# STEP 2 #
step_2 = QtWidgets.QLabel('<b>%s</b>' % _("STEP 2: Verification GCode"))
step_2.setToolTip(
_("Generate GCode file to locate and align the PCB by using\n"
"the four points acquired above.\n"
"The points sequence is:\n"
"- first point -> set the origin\n"
"- second point -> alignment point. Can be: top-left or bottom-right.\n"
"- third point -> check point. Can be: top-left or bottom-right.\n"
"- forth point -> final verification point. Just for evaluation.")
)
grid_lay.addWidget(step_2, 20, 0, 1, 3)
# ## GCode Button
self.gcode_button = QtWidgets.QPushButton(_("Generate GCode"))
self.gcode_button.setToolTip(
_("Generate GCode file to locate and align the PCB by using\n"
"the four points acquired above.\n"
"The points sequence is:\n"
"- first point -> set the origin\n"
"- second point -> alignment point. Can be: top-left or bottom-right.\n"
"- third point -> check point. Can be: top-left or bottom-right.\n"
"- forth point -> final verification point. Just for evaluation.")
)
self.gcode_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.gcode_button, 21, 0, 1, 3)
separator_line1 = QtWidgets.QFrame()
separator_line1.setFrameShape(QtWidgets.QFrame.HLine)
separator_line1.setFrameShadow(QtWidgets.QFrame.Sunken)
grid_lay.addWidget(separator_line1, 22, 0, 1, 3)
grid_lay.addWidget(QtWidgets.QLabel(''), 23, 0, 1, 3)
# STEP 3 #
step_3 = QtWidgets.QLabel('<b>%s</b>' % _("STEP 3: Adjustments"))
step_3.setToolTip(
_("Calculate Scale and Skew factors based on the differences (delta)\n"
"found when checking the PCB pattern. The differences must be filled\n"
"in the fields Found (Delta).")
)
grid_lay.addWidget(step_3, 24, 0, 1, 3)
# ## Factors Button
self.generate_factors_button = QtWidgets.QPushButton(_("Calculate Factors"))
self.generate_factors_button.setToolTip(
_("Calculate Scale and Skew factors based on the differences (delta)\n"
"found when checking the PCB pattern. The differences must be filled\n"
"in the fields Found (Delta).")
)
self.generate_factors_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.generate_factors_button, 25, 0, 1, 3)
separator_line1 = QtWidgets.QFrame()
separator_line1.setFrameShape(QtWidgets.QFrame.HLine)
separator_line1.setFrameShadow(QtWidgets.QFrame.Sunken)
grid_lay.addWidget(separator_line1, 26, 0, 1, 3)
grid_lay.addWidget(QtWidgets.QLabel(''), 27, 0, 1, 3)
# STEP 4 #
step_4 = QtWidgets.QLabel('<b>%s</b>' % _("STEP 4: Adjusted GCode"))
step_4.setToolTip(
_("Generate verification GCode file adjusted with\n"
"the factors above.")
)
grid_lay.addWidget(step_4, 28, 0, 1, 3)
self.scalex_label = QtWidgets.QLabel(_("Scale Factor X:"))
self.scalex_label.setToolTip(
_("Factor for Scale action over X axis.")
)
self.scalex_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.scalex_entry.set_range(0, 10000.0000)
self.scalex_entry.set_precision(self.decimals)
self.scalex_entry.setSingleStep(0.1)
grid_lay.addWidget(self.scalex_label, 29, 0)
grid_lay.addWidget(self.scalex_entry, 29, 1, 1, 2)
self.scaley_label = QtWidgets.QLabel(_("Scale Factor Y:"))
self.scaley_label.setToolTip(
_("Factor for Scale action over Y axis.")
)
self.scaley_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.scaley_entry.set_range(0, 10000.0000)
self.scaley_entry.set_precision(self.decimals)
self.scaley_entry.setSingleStep(0.1)
grid_lay.addWidget(self.scaley_label, 30, 0)
grid_lay.addWidget(self.scaley_entry, 30, 1, 1, 2)
self.scale_button = QtWidgets.QPushButton(_("Apply Scale Factors"))
self.scale_button.setToolTip(
_("Apply Scale factors on the calibration points.")
)
self.scale_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.scale_button, 31, 0, 1, 3)
self.skewx_label = QtWidgets.QLabel(_("Skew Angle X:"))
self.skewx_label.setToolTip(
_("Angle, in degrees.\n"
"Float number between -360 and 359.")
)
self.skewx_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.skewx_entry.set_range(-360, 360)
self.skewx_entry.set_precision(self.decimals)
self.skewx_entry.setSingleStep(0.1)
grid_lay.addWidget(self.skewx_label, 32, 0)
grid_lay.addWidget(self.skewx_entry, 32, 1, 1, 2)
self.skewy_label = QtWidgets.QLabel(_("Skew Angle Y:"))
self.skewy_label.setToolTip(
_("Angle, in degrees.\n"
"Float number between -360 and 359.")
)
self.skewy_entry = FCDoubleSpinner(callback=self.confirmation_message)
self.skewy_entry.set_range(-360, 360)
self.skewy_entry.set_precision(self.decimals)
self.skewy_entry.setSingleStep(0.1)
grid_lay.addWidget(self.skewy_label, 33, 0)
grid_lay.addWidget(self.skewy_entry, 33, 1, 1, 2)
self.skew_button = QtWidgets.QPushButton(_("Apply Skew Factors"))
self.skew_button.setToolTip(
_("Apply Skew factors on the calibration points.")
)
self.skew_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.skew_button, 34, 0, 1, 3)
# final_factors_lbl = QtWidgets.QLabel('<b>%s</b>' % _("Final Factors"))
# final_factors_lbl.setToolTip(
# _("Generate verification GCode file adjusted with\n"
# "the factors above.")
# )
# grid_lay.addWidget(final_factors_lbl, 27, 0, 1, 3)
#
# self.fin_scalex_label = QtWidgets.QLabel(_("Scale Factor X:"))
# self.fin_scalex_label.setToolTip(
# _("Final factor for Scale action over X axis.")
# )
# self.fin_scalex_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.fin_scalex_entry.set_range(0, 10000.0000)
# self.fin_scalex_entry.set_precision(self.decimals)
# self.fin_scalex_entry.setSingleStep(0.1)
#
# grid_lay.addWidget(self.fin_scalex_label, 28, 0)
# grid_lay.addWidget(self.fin_scalex_entry, 28, 1, 1, 2)
#
# self.fin_scaley_label = QtWidgets.QLabel(_("Scale Factor Y:"))
# self.fin_scaley_label.setToolTip(
# _("Final factor for Scale action over Y axis.")
# )
# self.fin_scaley_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.fin_scaley_entry.set_range(0, 10000.0000)
# self.fin_scaley_entry.set_precision(self.decimals)
# self.fin_scaley_entry.setSingleStep(0.1)
#
# grid_lay.addWidget(self.fin_scaley_label, 29, 0)
# grid_lay.addWidget(self.fin_scaley_entry, 29, 1, 1, 2)
#
# self.fin_skewx_label = QtWidgets.QLabel(_("Skew Angle X:"))
# self.fin_skewx_label.setToolTip(
# _("Final value for angle for Skew action, in degrees.\n"
# "Float number between -360 and 359.")
# )
# self.fin_skewx_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.fin_skewx_entry.set_range(-360, 360)
# self.fin_skewx_entry.set_precision(self.decimals)
# self.fin_skewx_entry.setSingleStep(0.1)
#
# grid_lay.addWidget(self.fin_skewx_label, 30, 0)
# grid_lay.addWidget(self.fin_skewx_entry, 30, 1, 1, 2)
#
# self.fin_skewy_label = QtWidgets.QLabel(_("Skew Angle Y:"))
# self.fin_skewy_label.setToolTip(
# _("Final value for angle for Skew action, in degrees.\n"
# "Float number between -360 and 359.")
# )
# self.fin_skewy_entry = FCDoubleSpinner(callback=self.confirmation_message)
# self.fin_skewy_entry.set_range(-360, 360)
# self.fin_skewy_entry.set_precision(self.decimals)
# self.fin_skewy_entry.setSingleStep(0.1)
#
# grid_lay.addWidget(self.fin_skewy_label, 31, 0)
# grid_lay.addWidget(self.fin_skewy_entry, 31, 1, 1, 2)
# ## Adjusted GCode Button
self.adj_gcode_button = QtWidgets.QPushButton(_("Generate Adjusted GCode"))
self.adj_gcode_button.setToolTip(
_("Generate verification GCode file adjusted with\n"
"the factors set above.\n"
"The GCode parameters can be readjusted\n"
"before clicking this button.")
)
self.adj_gcode_button.setStyleSheet("""
QPushButton
{
font-weight: bold;
}
""")
grid_lay.addWidget(self.adj_gcode_button, 42, 0, 1, 3)
separator_line1 = QtWidgets.QFrame()
separator_line1.setFrameShape(QtWidgets.QFrame.HLine)
separator_line1.setFrameShadow(QtWidgets.QFrame.Sunken)
grid_lay.addWidget(separator_line1, 43, 0, 1, 3)
grid_lay.addWidget(QtWidgets.QLabel(''), 44, 0, 1, 3)
# STEP 5 #
step_5 = QtWidgets.QLabel('<b>%s</b>' % _("STEP 5: Calibrate FlatCAM Objects"))
step_5.setToolTip(
_("Adjust the FlatCAM objects\n"
"with the factors determined and verified above.")
)
grid_lay.addWidget(step_5, 45, 0, 1, 3)
self.adj_object_type_combo = FCComboBox()
self.adj_object_type_combo.addItems([_("Gerber"), _("Excellon"), _("Geometry")])
self.adj_object_type_combo.setItemIcon(0, QtGui.QIcon(self.app.resource_location + "/flatcam_icon16.png"))
self.adj_object_type_combo.setItemIcon(1, QtGui.QIcon(self.app.resource_location + "/drill16.png"))
self.adj_object_type_combo.setItemIcon(2, QtGui.QIcon(self.app.resource_location + "/geometry16.png"))
self.adj_object_type_label = QtWidgets.QLabel("%s:" % _("Adjusted object type"))
self.adj_object_type_label.setToolTip(_("Type of the FlatCAM Object to be adjusted."))
grid_lay.addWidget(self.adj_object_type_label, 46, 0, 1, 3)
grid_lay.addWidget(self.adj_object_type_combo, 47, 0, 1, 3)
self.adj_object_combo = FCComboBox()
self.adj_object_combo.setModel(self.app.collection)
self.adj_object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.adj_object_combo.is_last = True
self.adj_object_combo.obj_type = {
_("Gerber"): "Gerber", _("Excellon"): "Excellon", _("Geometry"): "Geometry"
}[self.adj_object_type_combo.get_value()]
self.adj_object_label = QtWidgets.QLabel("%s:" % _("Adjusted object selection"))
self.adj_object_label.setToolTip(
_("The FlatCAM Object to be adjusted.")
)
grid_lay.addWidget(self.adj_object_label, 48, 0, 1, 3)
| |
<filename>src/vrf.py
from src import common_ops
import json
def get_all_vrfs(**kwargs):
"""
Perform a GET call to get a list (or dictionary) of all entries in VRF table
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: List/dict of all VRFs in the table
"""
target_url = kwargs["url"] + "system/vrfs"
response = kwargs["s"].get(target_url, verify=False)
if not common_ops._response_ok(response, "GET"):
print("FAIL: Getting list/dict of all VRF table entries failed with status code %d"
% response.status_code)
vrfs = []
else:
print("SUCCESS: Getting list/dict of all VRF table entries succeeded")
vrfs = response.json()
return vrfs
def add_vrf(vrf_name, route_distinguisher=None, vrf_type="user", **kwargs):
"""
Perform a POST call to create a new VRF, and add a route distinguisher if desired.
:param vrf_name: Alphanumeric name of VRF
:param route_distinguisher: Optional route distinguisher to add. Defaults to nothing if not specified.
:param vrf_type: Optional VRF type. Defaults to "user" if not specified.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
if kwargs["url"].endswith("/v1/"):
add_vrf_v1(vrf_name, route_distinguisher, vrf_type, **kwargs)
else: # Updated else for when version is v10.04
_add_vrf(vrf_name, route_distinguisher, vrf_type, **kwargs)
def add_vrf_v1(vrf_name, route_distinguisher=None, vrf_type="user", **kwargs):
"""
Perform a POST call to create a new VRF, and add a route distinguisher if desired.
:param vrf_name: name of VRF
:param route_distinguisher: Optional route distinguisher to add. Defaults to nothing if not specified.
:param vrf_type: Optional VRF type. Defaults to "user" if not specified.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
vrfs_list = get_all_vrfs(**kwargs)
if "/rest/v1/system/vrfs/%s" % vrf_name not in vrfs_list:
vrf_data = {"name": vrf_name, "type": vrf_type}
if route_distinguisher is not None:
vrf_data["rd"] = route_distinguisher
target_url = kwargs["url"] + "system/vrfs"
post_data = json.dumps(vrf_data, sort_keys=True, indent=4)
response = kwargs["s"].post(target_url, data=post_data, verify=False)
if not common_ops._response_ok(response, "POST"):
print("FAIL: Creating new VRF '%s' failed with status code %d" % (vrf_name, response.status_code))
else:
print("SUCCESS: Creating new VRF '%s' succeeded" % vrf_name)
else:
print("SUCCESS: No need to create VRF '%s' since it already exists" % vrf_name)
def _add_vrf(vrf_name, route_distinguisher=None, vrf_type="user", **kwargs):
"""
Perform a POST call to create a new VRF, and add a route distinguisher if desired.
:param vrf_name: name of VRF
:param route_distinguisher: Optional route distinguisher to add. Defaults to nothing if not specified.
:param vrf_type: Optional VRF type. Defaults to "user" if not specified.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
vrfs_dict = get_all_vrfs(**kwargs)
if vrf_name not in vrfs_dict:
vrf_data = {"name": vrf_name, "type": vrf_type}
if route_distinguisher is not None:
vrf_data["rd"] = route_distinguisher
target_url = kwargs["url"] + "system/vrfs"
post_data = json.dumps(vrf_data, sort_keys=True, indent=4)
response = kwargs["s"].post(target_url, data=post_data, verify=False)
if not common_ops._response_ok(response, "POST"):
print("FAIL: Creating new VRF '%s' failed with status code %d" % (vrf_name, response.status_code))
else:
print("SUCCESS: Creating new VRF '%s' succeeded" % vrf_name)
else:
print("SUCCESS: No need to create VRF '%s' since it already exists" % vrf_name)
def get_vrf(vrf_name, depth=0, selector=None, **kwargs):
"""
Perform a GET call to get data for a VRF table entry
:param vrf_name: Alphanumeric name of the VRF
:param depth: Integer deciding how many levels into the API JSON that references will be returned.
:param selector: Alphanumeric option to select specific information to return. The options are 'configuration',
'status', 'statistics' or 'writable'.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Dictionary containing the VRF data
"""
if kwargs["url"].endswith("/v1/"):
return _get_vrf_v1(vrf_name, depth, selector, **kwargs)
else: # Updated else for when version is v10.04
return _get_vrf(vrf_name, depth, selector, **kwargs)
def _get_vrf_v1(vrf_name, depth=0, selector=None, **kwargs):
"""
Perform a GET call to get data for a VRF table entry
:param vrf_name: Alphanumeric name of the VRF
:param depth: Integer deciding how many levels into the API JSON that references will be returned.
:param selector: Alphanumeric option to select specific information to return. The options are 'configuration',
'status', 'statistics' or 'writable'.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Dictionary containing the VRF data
"""
if selector not in ['configuration', 'status', 'statistics', None]:
raise Exception("ERROR: Selector should be 'configuration', 'status', or 'statistics'")
target_url = kwargs["url"] + "system/vrfs/%s" % vrf_name
payload = {
"depth": depth,
"selector": selector
}
response = kwargs["s"].get(target_url, verify=False, params=payload, timeout=2)
if not common_ops._response_ok(response, "GET"):
print("FAIL: Getting VRF table entry '%s' failed with status code %d" % (vrf_name, response.status_code))
vrf = []
else:
print("SUCCESS: Getting VRF table entry '%s' succeeded" % vrf_name)
vrf = response.json()
return vrf
def _get_vrf(vrf_name, depth=1, selector=None, **kwargs):
"""
Perform a GET call to get data for a VRF table entry
:param vrf_name: Alphanumeric name of the VRF
:param depth: Integer deciding how many levels into the API JSON that references will be returned.
:param selector: Alphanumeric option to select specific information to return. The options are 'configuration',
'status', 'statistics' or 'writable'.
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Dictionary containing the VRF data
"""
if selector not in ['configuration', 'status', 'statistics', 'writable', None]:
raise Exception("ERROR: Selector should be 'configuration', 'status', 'statistics', or 'writable'")
target_url = kwargs["url"] + "system/vrfs/%s" % vrf_name
payload = {
"depth": depth,
"selector": selector
}
response = kwargs["s"].get(target_url, verify=False, params=payload, timeout=2)
if not common_ops._response_ok(response, "GET"):
print("FAIL: Getting VRF table entry '%s' failed with status code %d" % (vrf_name, response.status_code))
vrf = []
else:
print("SUCCESS: Getting VRF table entry '%s' succeeded" % vrf_name)
vrf = response.json()
return vrf
def delete_vrf(vrf_name, **kwargs):
"""
Perform a DELETE call to delete a VRF.
Note that this functions has logic that works for both v1 and v10.04
:param vrf_name: Alphanumeric name of VRF
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
vrf_list = get_all_vrfs(**kwargs)
if kwargs["url"].endswith("/v1/"):
vrf_check = "/rest/v1/system/vrfs/%s" % vrf_name
else:
# Else logic designed for v10.04 and later
vrf_check = vrf_name
if vrf_check in vrf_list:
target_url = kwargs["url"] + "system/vrfs/%s" % vrf_name
response = kwargs["s"].delete(target_url, verify=False)
if not common_ops._response_ok(response, "DELETE"):
print("FAIL: Deleting VRF '%s' failed with status code %d" % (vrf_name, response.status_code))
else:
print("SUCCESS: Deleting VRF '%s' succeeded" % vrf_name)
else:
print("SUCCESS: No need to delete VRF '%s' since it doesn't exist"
% vrf_name)
def add_vrf_address_family(vrf_name, family_type="ipv4_unicast", export_target=[], import_targets=[], **kwargs):
"""
Perform a POST call to create a new VRF, and add a route distinguisher if desired.
Note that this functions has logic that works for both v1 and v10.04
:param vrf_name: Alphanumeric name of VRF
:param family_type: Alphanumeric type of the Address Family. The options are 'ipv4_unicast' and 'ipv6_unicast'.
The default value is set to 'ipv4_unicast'.
:param export_target: Optional list of export route targets.
:param import_targets: Optional list of import route targets
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Nothing
"""
vrf_list = get_all_vrfs(**kwargs)
if family_type == "ipv4-unicast":
family_type = "ipv4_unicast"
elif family_type == "ipv6-unicast":
family_type = "ipv6_unicast"
if family_type not in ['ipv4_unicast', 'ipv6_unicast']:
raise Exception("ERROR: family_type should be 'ipv4_unicast', or 'ipv6_unicast'")
if kwargs["url"].endswith("/v1/"):
vrf_check = "/rest/v1/system/vrfs/%s" % vrf_name
else:
# Else logic designed for v10.04 and later
vrf_check = vrf_name
if vrf_check in vrf_list:
address_family_data = {
"address_family": family_type,
"export_route_targets": export_target,
"import_route_targets": import_targets,
"route_map": {}
}
target_url = kwargs["url"] + "system/vrfs/%s/vrf_address_families" % vrf_name
post_data = json.dumps(address_family_data, sort_keys=True, indent=4)
response = kwargs["s"].post(target_url, data=post_data, verify=False)
if not common_ops._response_ok(response, "POST"):
print("FAIL: Creating '%s' Address Family on VRF '%s' failed with status code %d" % (family_type, vrf_name,
response.status_code))
else:
print("SUCCESS: Creating '%s' Address Family on VRF '%s' succeeded" % (family_type, vrf_name))
else:
print("FAIL: Cannot add Address Family to VRF '%s' since the VRF has not been created yet" % vrf_name)
def delete_vrf_address_family(vrf_name, family_type="ipv4_unicast", **kwargs):
"""
Perform a DELETE call to remove a VRF address family.
Note that this functions has logic that works for both v1 and v10.04
:param vrf_name: Alphanumeric name of | |
from .explorers import *
from .jericho_env import *
from .utils import *
import time
from tqdm import tqdm
@dataclass
class GridDimension:
attr: str
div: int
@dataclass()
class ChainLink:
__slots__ = ['start_cell', 'end_cell', 'seed']
start_cell: typing.Any
end_cell: typing.Any
seed: int
@dataclass
class Cell:
# The list of ChainLink that can take us to this place
chain: typing.List[ChainLink] = copyfield([])
seen: list = copyfield({})
score: int = -infinity
# Number of times this was chosen and seen
seen_times: int = 0
chosen_times: int = 0
chosen_since_new: int = 0
action_times: int = 0 # This is the number of action that led to this cell
# Length of the trajectory
trajectory_len: int = infinity
# Saved restore state. In a purely deterministic environment,
# this allows us to fast-forward to the end state instead
# of replaying.
restore: typing.Any = None
# TODO: JH: This should not refer to a Montezuma-only data-structure
exact_pos: ZorkPos = None
trajectory: list = copyfield([])
real_cell: ZorkPos = None
@dataclass
class PosInfo:
__slots__ = ['exact', 'cell', 'state', 'restore']
exact: tuple
cell: tuple
state: typing.Any
restore: typing.Any
@dataclass
class TrajectoryElement:
__slots__ = ['from_', 'to', 'action', 'reward', 'done', 'real_pos']
from_: PosInfo
to: PosInfo
action: int
reward: float
done: bool
real_pos: ZorkPos
# ### Main
POOL = None
ENV = None
def get_env():
return ENV
class Explore:
def __init__(
self, explorer_policy, cell_selector, env,
grid_info: tuple,
explore_steps=50,
ignore_death: int = 1,
n_cpus=None,
optimize_score=True,
use_real_pos=False,
prob_override=0.0,
pool_class=multiprocessing.Pool,
reset_pool=False,
batch_size=100,
reset_cell_on_update=False,
qbert_params=None
):
global POOL, ENV
self.env_info = env
self.qbert_params = qbert_params
self.make_env()
self.pool_class = pool_class
self.reset_pool = reset_pool
if self.reset_pool:
POOL = self.pool_class(multiprocessing.cpu_count() * 2)
else:
POOL = self.pool_class(multiprocessing.cpu_count() * 2, maxtasksperchild=100)
self.use_real_pos = use_real_pos
self.n_cpus = n_cpus
self.batch_size = batch_size
self.explore_steps = explore_steps
self.explorer = explorer_policy
self.selector = cell_selector
self.grid_info = grid_info
self.grid = defaultdict(Cell)
self.ignore_death = ignore_death
self.frames_true = 0
self.frames_compute = 0
self.start = None
self.cycles = 0
self.seen_level_1 = False
self.optimize_score = optimize_score
self.prob_override = prob_override
self.state = None
self.reset()
self.grid[self.get_cell()].trajectory_len = 0
self.grid[self.get_cell()].score = 0
self.grid[self.get_cell()].exact_pos = self.get_pos()
self.grid[self.get_cell()].real_cell = self.get_real_cell()
self.real_grid = set()
self.pos_cache = None
self.reset_cell_on_update = reset_cell_on_update
self.max_score = 0
def make_env(self):
global ENV
if ENV is None:
ENV = self.env_info[0](self.qbert_params)
def reset(self):
self.pos_cache = None
self.make_env()
return ENV.reset()
# def step(self, action):
# self.pos_cache = None
# return ENV.step(action)
# def step(self):
# self.pos_cache = None
# return ENV.step()
def get_pos(self):
if self.use_real_pos:
return self.get_real_pos()
else:
if not self.pos_cache:
self.pos_cache = (ENV.state[-1].reshape((ENV.state[-1].size,)).tobytes(),)
return self.pos_cache
def get_real_pos(self):
return ENV.get_pos()
def get_pos_info(self, include_restore=True):
return PosInfo(self.get_pos() if self.use_real_pos else None, self.get_cell(), None, self.get_restore() if include_restore else None)
def get_restore(self):
x = ENV.get_restore()
return x
def restore(self, val):
self.make_env()
ENV.restore(val)
def get_real_cell(self):
pos = self.get_real_pos()
res = {}
for dimension in self.grid_info:
value = getattr(pos, dimension.attr)
if dimension.div == 1:
res[dimension.attr] = value
else:
res[dimension.attr] = (int(value / dimension.div))
return pos.__class__(**res)
def get_cell(self):
if self.use_real_pos:
return self.get_real_cell()
else:
pos = self.get_pos()
return pos
def run_explorer(self, explorer, start_cell=None, max_steps=1):
import sys
np.set_printoptions(threshold=sys.maxsize)
explorer.init_trajectory(start_cell, self.grid)
trajectory = []
# while True:
initial_pos_info = self.get_pos_info(include_restore=True)
# if ((max_steps > 0 and len(trajectory) >= max_steps) or
# initial_pos_info.cell == start_cell):
# break
# action = explorer.get_action(self.state, ENV)
# # self.state, reward, done, _ = self.step(action)
# print ("right before:{}".format(ENV.trainer.vec_env.get_score()[0]))
# print ("moves before:{}".format(ENV.trainer.vec_env.get_moves()[0]))
obs, rewards, dones, infos, graph_infos, scores, chosen_actions, IM = ENV.step(max_steps=32)
print (infos)
self.frames_true += 1
self.frames_compute += 1
trajectory.append(
TrajectoryElement(
initial_pos_info,
self.get_pos_info(),
chosen_actions[0], scores[0], dones[0],
self.get_real_cell()
)
)
# print (trajectory)
# explorer.seen_state(trajectory[-1])
# print (obs)
# print (rewards)
# print (dones)
# print (infos)
# print (graph_infos)
# print (scores)
# print (chosen_actions)
# if dones[0]:
# break
# print (trajectory)
# print (trajectory)
return trajectory
# def run_explorer(self, explorer, start_cell=None, max_steps=-1):
# explorer.init_trajectory(start_cell, self.grid)
# trajectory = explorer.get_trajectory()
# for action in trajectory:
# explorer.seen_state(action)
# return trajectory
def run_seed(self, seed, start_cell=None, max_steps=1):
with use_seed(seed):
self.explorer.init_seed()
return self.run_explorer(self.explorer, start_cell, max_steps)
def process_cell(self, info):
# This function runs in a SUBPROCESS, and processes a single cell.
cell_key, cell, seed= info
# self.env_info[0].TARGET_SHAPE = target_shape
# self.env_info[0].MAX_PIX_VALUE = max_pix
self.frames_true = 0
self.frames_compute = 0
#print (cell_key)
#time.sleep(300)
if cell.restore is not None:
self.restore(cell.restore)
self.frames_true += cell.trajectory_len
else:
# TODO: implement recovering the restore from, say, the trajectory on the cell, so that this
# isn't a problem anymore when recovering from a checkpoint.
# assert cell.trajectory_len == 0, 'Cells must have a restore unless they are the initial state'
self.reset()
start_cell = self.get_cell()
end_trajectory = self.run_seed(seed, start_cell=cell, max_steps=self.explore_steps)
# print (end_trajectory)
# We are not done, check that doing nothing for self.ignore_death steps won't kill us.
if self.ignore_death > 0:
if not end_trajectory[-1].done:
end_trajectory += self.run_explorer(DoNothingExplorer(), max_steps=self.ignore_death)
end_trajectory = end_trajectory[:-self.ignore_death]
seen_to = set()
#print (end_trajectory)
for e in end_trajectory:
e.from_.restore = None
e.from_.state = None
if e.to.cell in seen_to:
e.to.restore = None
e.to.state = None
seen_to.add(e.to.cell)
# known_room_data = {}
# if len(ENV.rooms) > known_rooms:
# known_room_data = ENV.rooms
return ((start_cell, end_trajectory))
return TimedPickle((start_cell, end_trajectory, self.frames_true, self.frames_compute, known_room_data), 'ret', enabled=info.enabled)
def run_cycle(self):
# Choose a bunch of cells, send them to the workers for processing, then combine the results.
# A lot of what this function does is only aimed at minimizing the amount of data that needs
# to be pickled to the workers, which is why it sets a lot of variables to None only to restore
# them later.
global POOL
if self.start is None:
self.start = time.time()
self.cycles += 1
chosen_cells = []
# print ('grid length: {}'.format(len(self.grid)))
# for i, s in enumerate(self.grid):
# print ('cell {}: {}'.format(i, self.grid[s].restore is not None))
cell_keys = self.selector.choose_cell(self.grid, size=self.batch_size)
old_trajectories = []
for i, cell_key in enumerate(cell_keys):
cell = self.grid[cell_key]
old_trajectories.append((cell.trajectory, cell.seen, cell.chain))
cell.trajectory = None
cell.seen = None
cell.chain = None
seed = random.randint(0, 2 ** 31)
#chosen_cells.append(TimedPickle((cell_key, cell, seed), 'args', enabled=(i == 0 and False)))
chosen_cells.append((cell_key, cell, seed))
# NB: self.grid is uncessecary for process_cell, and might be
# VERY large. We temporarily replace it with None so it doesn't
# need to be serialized by the pool.
old_grid = self.grid
self.grid = None
#print ("STARTING PROCESS_CELLS")
#trajectories = [e.data for e in POOL.map(self.process_cell, chosen_cells)]
#print (chosen_cells)
trajectories = [self.process_cell(e) for e in chosen_cells]
# if self.reset_pool and (self.cycles + 1) % 100 == 0:
# POOL.close()
# POOL.join()
# POOL = None
# gc.collect()
# POOL = self.pool_class(self.n_cpus)
#chosen_cells = [e for e in chosen_cells]
self.grid = old_grid
for ((_, cell, _), (old_traj, old_seen, old_chain)) in zip(chosen_cells, old_trajectories):
if old_traj is not None:
cell.trajectory = old_traj
if old_seen is not None:
cell.seen = old_seen
if old_chain is not None:
cell.chain = old_chain
# Note: we do this now because starting here we're going to be concatenating the trajectories
# of these cells, and they need to remain the same!
chosen_cells = [(k, copy.copy(c), s) for k, c, s in chosen_cells]
cells_to_reset = set()
for ((cell_key, cell, seed), (start_cell, end_trajectory)) in zip(chosen_cells, trajectories):
# self.frames_true += ft
# self.frames_compute += fc
if cell.seen is None:
continue
seen_cells = {}
#print ("END_TRAJECTORY: " + str(end_trajectory))
# Note(adrien): this changes the behavior of seen_times and action_times,
# but it makes the whole code slower and it isn't clear that the behavior
# implied by these next few lines is better anyway.
# for e in cell.seen:
# if e not in seen_cells:
# seen_cells[e] = cell.seen[e]
# self.grid[e].seen_times += 1
# self.grid[e].action_times += cell.seen[e]
# for k in known_rooms:
# if k not in ENV.rooms:
# ENV.rooms[k] = known_rooms[k]
self.grid[cell_key].chosen_times += 1
self.grid[cell_key].chosen_since_new += 1
cur_score = cell.score
self.max_score = max(cur_score, self.max_score)
#print ("CUR_SCORE: " + str(cur_score))
# tqdm.write(f'CUR_SCORE: {cur_score}')
# tqdm.write(f'length of grid: {len(self.grid)}')
for i, elem in enumerate(end_trajectory):
potential_cell_key = elem.to.cell
self.selector.reached_state(elem)
self.real_grid.add(elem.real_pos)
# if not isinstance(potential_cell_key, tuple) and potential_cell_key.level > 0:
# self.seen_level_1 = True
potential_cell = self.grid[potential_cell_key]
full_traj_len = cell.trajectory_len + i + 1
cur_score += elem.reward
for p in [potential_cell_key, elem.from_.cell]:
if p not in seen_cells:
seen_cells[p] = 0
| |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Functions and classes for array-like objects, implementing common numpy
array features for datasets or nested sequences, while trying to avoid copying
data.
Classes:
- :class:`DatasetView`: Similar to a numpy view, to access
a h5py dataset as if it was transposed, without casting it into a
numpy array (this lets h5py handle reading the data from the
file into memory, as needed).
- :class:`ListOfImages`: Similar to a numpy view, to access
a list of 2D numpy arrays as if it was a 3D array (possibly transposed),
without casting it into a numpy array.
Functions:
- :func:`is_array`
- :func:`is_list_of_arrays`
- :func:`is_nested_sequence`
- :func:`get_shape`
- :func:`get_dtype`
- :func:`get_concatenated_dtype`
"""
from __future__ import absolute_import, print_function, division
import sys
import numpy
import six
import numbers
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "26/04/2017"
def is_array(obj):
"""Return True if object implements necessary attributes to be
considered similar to a numpy array.
Attributes needed are "shape", "dtype", "__getitem__"
and "__array__".
:param obj: Array-like object (numpy array, h5py dataset...)
:return: boolean
"""
# add more required attribute if necessary
for attr in ("shape", "dtype", "__array__", "__getitem__"):
if not hasattr(obj, attr):
return False
return True
def is_list_of_arrays(obj):
"""Return True if object is a sequence of numpy arrays,
e.g. a list of images as 2D arrays.
:param obj: list of arrays
:return: boolean"""
# object must not be a numpy array
if is_array(obj):
return False
# object must have a __len__ method
if not hasattr(obj, "__len__"):
return False
# all elements in sequence must be arrays
for arr in obj:
if not is_array(arr):
return False
return True
def is_nested_sequence(obj):
"""Return True if object is a nested sequence.
A simple 1D sequence is considered to be a nested sequence.
Numpy arrays and h5py datasets are not considered to be nested sequences.
To test if an object is a nested sequence in a more general sense,
including arrays and datasets, use::
is_nested_sequence(obj) or is_array(obj)
:param obj: nested sequence (numpy array, h5py dataset...)
:return: boolean"""
# object must not be a numpy array
if is_array(obj):
return False
if not hasattr(obj, "__len__"):
return False
# obj must not be a list of (lists of) numpy arrays
subsequence = obj
while hasattr(subsequence, "__len__"):
if is_array(subsequence):
return False
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
return True
subsequence = subsequence[0]
# object has __len__ and is not an array
return True
def get_shape(array_like):
"""Return shape of an array like object.
In case the object is a nested sequence but not an array or dataset
(list of lists, tuples...), the size of each dimension is assumed to be
uniform, and is deduced from the length of the first sequence.
:param array_like: Array like object: numpy array, hdf5 dataset,
multi-dimensional sequence
:return: Shape of array, as a tuple of integers
"""
if hasattr(array_like, "shape"):
return array_like.shape
shape = []
subsequence = array_like
while hasattr(subsequence, "__len__"):
shape.append(len(subsequence))
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
break
subsequence = subsequence[0]
return tuple(shape)
def get_dtype(array_like):
"""Return dtype of an array like object.
In the case of a nested sequence, the type of the first value
is inspected.
:param array_like: Array like object: numpy array, hdf5 dataset,
multi-dimensional nested sequence
:return: numpy dtype of object
"""
if hasattr(array_like, "dtype"):
return array_like.dtype
subsequence = array_like
while hasattr(subsequence, "__len__"):
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
break
subsequence = subsequence[0]
return numpy.dtype(type(subsequence))
def get_concatenated_dtype(arrays):
"""Return dtype of array resulting of concatenation
of a list of arrays (without actually concatenating
them).
:param arrays: list of numpy arrays
:return: resulting dtype after concatenating arrays
"""
dtypes = {a.dtype for a in arrays}
dummy = []
for dt in dtypes:
dummy.append(numpy.zeros((1, 1), dtype=dt))
return numpy.array(dummy).dtype
class ListOfImages(object):
"""This class provides a way to access values and slices in a stack of
images stored as a list of 2D numpy arrays, without creating a 3D numpy
array first.
A transposition can be specified, as a 3-tuple of dimensions in the wanted
order. For example, to transpose from ``xyz`` ``(0, 1, 2)`` into ``yzx``,
the transposition tuple is ``(1, 2, 0)``
All the 2D arrays in the list must have the same shape.
The global dtype of the stack of images is the one that would be obtained
by casting the list of 2D arrays into a 3D numpy array.
:param images: list of 2D numpy arrays, or :class:`ListOfImages` object
:param transposition: Tuple of dimension numbers in the wanted order
"""
def __init__(self, images, transposition=None):
"""
"""
super(ListOfImages, self).__init__()
# if images is a ListOfImages instance, get the underlying data
# as a list of 2D arrays
if isinstance(images, ListOfImages):
images = images.images
# test stack of images is as expected
assert is_list_of_arrays(images), \
"Image stack must be a list of arrays"
image0_shape = images[0].shape
for image in images:
assert image.ndim == 2, \
"Images must be 2D numpy arrays"
assert image.shape == image0_shape, \
"All images must have the same shape"
self.images = images
"""List of images"""
self.shape = (len(images), ) + image0_shape
"""Tuple of array dimensions"""
self.dtype = get_concatenated_dtype(images)
"""Data-type of the global array"""
self.ndim = 3
"""Number of array dimensions"""
self.size = len(images) * image0_shape[0] * image0_shape[1]
"""Number of elements in the array."""
self.transposition = list(range(self.ndim))
"""List of dimension indices, in an order depending on the
specified transposition. By default this is simply
[0, ..., self.ndim], but it can be changed by specifying a different
``transposition`` parameter at initialization.
Use :meth:`transpose`, to create a new :class:`ListOfImages`
with a different :attr:`transposition`.
"""
if transposition is not None:
assert len(transposition) == self.ndim
assert set(transposition) == set(list(range(self.ndim))), \
"Transposition must be a sequence containing all dimensions"
self.transposition = transposition
self.__sort_shape()
def __sort_shape(self):
"""Sort shape in the order defined in :attr:`transposition`
"""
new_shape = tuple(self.shape[dim] for dim in self.transposition)
self.shape = new_shape
def __sort_indices(self, indices):
"""Return array indices sorted in the order needed
to access data in the original non-transposed images.
:param indices: Tuple of ndim indices, in the order needed
to access the transposed view
:return: Sorted tuple of indices, to access original data
"""
assert len(indices) == self.ndim
sorted_indices = tuple(idx for (_, idx) in
sorted(zip(self.transposition, indices)))
return sorted_indices
def __array__(self, dtype=None):
"""Cast the images into a numpy array, and return it.
If a transposition has been done on this images, return
a transposed view of a numpy array."""
return numpy.transpose(numpy.array(self.images, dtype=dtype),
self.transposition)
def __len__(self):
return self.shape[0]
def transpose(self, transposition=None):
"""Return a re-ordered (dimensions permutated)
:class:`ListOfImages`.
The returned object refers to
the same images but with a different :attr:`transposition`.
:param List[int] transposition: List/tuple of dimension numbers in the
wanted order.
If ``None`` (default), reverse the dimensions.
:return: new :class:`ListOfImages` object
"""
# by default, reverse the dimensions
if transposition is None:
transposition = list(reversed(self.transposition))
# If this ListOfImages is already transposed, sort new transposition
# relative to old transposition
elif list(self.transposition) != list(range(self.ndim)):
transposition = [self.transposition[i] | |
finger_jnt:
self.ring_list.append(finger_jnt)
elif "pinky" in finger_jnt:
self.pinkey_list.append(finger_jnt)
# Move the hand_cc, move the pivot to the wrist, and rename it
if self.type_flag == "hand":
self.hand_cc = cmds.duplicate(self.hand_cc, n="hand_" + self.side + "_cc")[0]
elif self.type_flag == "foot":
self.hand_cc = cmds.duplicate(self.hand_cc, n="foot_" + self.side + "_cc")[0]
self.hand_cc_grp = cmds.group(self.hand_cc, n=self.hand_cc + "_grp")
if self.type_flag == "hand":
cmds.parent(self.hand_cc_grp, self.hand_jnt)
cmds.setAttr(self.hand_cc_grp + ".translate", 0, 0, 0, type="double3")
cmds.parent(self.hand_cc_grp, w=True)
wrist_jnt_pos = cmds.xform(wrist_jnt, ws=True, t=True, q=True)
cmds.move(wrist_jnt_pos[0], wrist_jnt_pos[1], wrist_jnt_pos[2],
self.hand_cc_grp + ".rotatePivot", self.hand_cc_grp + ".scalePivot",
ws=True, a=True)
cmds.move(wrist_jnt_pos[0], wrist_jnt_pos[1], wrist_jnt_pos[2],
self.hand_cc + ".rotatePivot", self.hand_cc + ".scalePivot",
ws=True, a=True)
elif self.type_flag == "foot":
cmds.parent(self.hand_cc_grp, wrist_jnt)
cmds.setAttr(self.hand_cc_grp + ".translate", 0, 0, 0, type="double3")
cmds.parent(self.hand_cc_grp, w=True)
# Add the IK_FK_Switch attribute
cmds.select(self.hand_cc, r=True)
cmds.addAttr(ln="_____", at="float", k=True,)
cmds.addAttr(at="float", min=0.0, max=1.0, k=True,
ln="IK_FK_Switch", sn="IKFKSw")
# This will create the CC and its group while in the loop because we want the
# names to be consistent.
for curr_finger in self.fingers_list:
above_jnt = self.hand_jnt
for curr_jnt in curr_finger:
if "_tip_" in curr_jnt:
continue
else:
finger_cc = cmds.curve(n=curr_jnt.replace("_jnt", "_cc"), d=1,
p=[(0, 0.4, 0), (0, 0.4, 0.2), (0, 0.8, 0.2),
(0, 0.8, -0.2),
(0, 0.4, -0.2), (0, 0.4, 0), (0, 0, 0)],
k=[0, 1, 2, 3, 4, 5, 6])
if self.type_flag == "foot":
cmds.rotate(90, finger_cc, y=True)
cmds.makeIdentity(finger_cc, a=True, n=0, pn=True,
t=True, r=True, s=True)
finger_cc_grp = cmds.group(finger_cc,
n=curr_jnt.replace("_jnt", "_grp"), r=True)
cmds.move(0, 0, 0, finger_cc_grp + ".rotatePivot",
finger_cc_grp + ".scalePivot", ws=True)
cmds.parent(finger_cc_grp, curr_jnt)
cmds.setAttr(finger_cc_grp + ".translate", 0, 0, 0, type="double3")
cmds.setAttr(finger_cc_grp + ".rotate", 0, 0, 0, type="double3")
cmds.parent(finger_cc_grp, above_jnt)
cmds.parent(curr_jnt, finger_cc)
above_jnt = curr_jnt
# Parent the hand joint under the hand_cc, or the footRoot under the CC then
# parent the ball to the footRoot
if self.type_flag == "hand":
cmds.parent(self.hand_jnt, self.hand_cc)
elif self.type_flag == "foot":
footRoot_jnt = "footRoot_" + self.side + "_jnt"
cmds.parent(footRoot_jnt, self.hand_cc)
class CreateFoot(object):
"""
This class will create a hand rig.
"""
def __init__(self, footRoot_jnt, foot_cc, side, IK_flag):
self.footRoot_jnt = footRoot_jnt
self.ball_jnt = ""
self.heel_jnt = ""
self.foot_cc = foot_cc
self.side = side
self.IK_flag = IK_flag
self.extra_indicator = ""
def create_foot(self):
if self.IK_flag:
self.extra_indicator = "1"
# Find the ball, heel, and longest toe joint.
find_ball_list = cmds.listRelatives(self.footRoot_jnt, type="joint")
ball_index = find_ball_list.index("ball_" + self.side + "_jnt" +
self.extra_indicator)
heel_index = find_ball_list.index("heel_" + self.side + "_jnt" +
self.extra_indicator)
self.ball_jnt = find_ball_list[ball_index]
self.heel_jnt = find_ball_list[heel_index]
# Get the big toe joint and the pinky joint.
find_toes_list = cmds.listRelatives(self.ball_jnt, type="transform")
bigToe_index = find_toes_list.index("big_01_toe_" + self.side + "_grp" +
self.extra_indicator)
pinkey_index = find_toes_list.index("pinky_01_toe_" + self.side + "_grp" +
self.extra_indicator)
bigToe_pos = cmds.xform(find_toes_list[bigToe_index], ws=True, t=True, q=True)
pinky_pos = cmds.xform(find_toes_list[pinkey_index], ws=True, t=True, q=True)
# Get the position of the ball joint and heel joint position.
ball_pos = cmds.xform(self.ball_jnt, ws=True, t=True, q=True)
heel_pos = cmds.xform(self.heel_jnt, ws=True, t=True, q=True)
# Find the maximum Z length of the foot.
# calc_foot_list = cmds.xform(self.ball_jnt, bb=True, ws=True, r=True, q=True)
# z_difference = calc_foot_list[5] - calc_foot_list[2]
# max_toe_pos = [ball_pos[0], ball_pos[1] - .1, ball_pos[2]+z_difference]
max_toe_pos = cmds.xform("mid_tip_toe_" + self.side + "_jnt" +
self.extra_indicator,
ws=True, t=True, q=True)
max_toe_pos[2] = max_toe_pos[2] + 0.05
#inBall_pos = cmds.xform()
# Create the reverse foot groups, parented under each other.
revFoot = ["revBallFix", "revBall", "revToe", "revHeel",
"revOutBank", "revInBank"]
revFoot_grp_list = []
above_grp = ""
for curr_grp in revFoot:
if curr_grp == "revBallFix":
revFoot_grp_list.append(cmds.group(em=True,
n=curr_grp + "_" + self.side + "_grp" +
self.extra_indicator))
above_grp = curr_grp + "_" + self.side + "_grp" + self.extra_indicator
else:
revFoot_grp_list.append(cmds.group(above_grp,
n=curr_grp + "_" + self.side + "_grp" +
self.extra_indicator))
above_grp = curr_grp + "_" + self.side + "_grp" + self.extra_indicator
# Loop through the group list and move them to their respective positions.
# Reversed group because the list starts with the ball fix grp to the in bank grp.
for curr_grp in reversed(revFoot_grp_list):
if "revInBank" in curr_grp:
cmds.move(bigToe_pos[0], bigToe_pos[1], bigToe_pos[2], curr_grp,
a=True, ws=True)
elif "revOutBank" in curr_grp:
cmds.move(pinky_pos[0], pinky_pos[1], pinky_pos[2], curr_grp,
a=True, ws=True)
elif "revHeel" in curr_grp:
cmds.move(heel_pos[0], heel_pos[1], heel_pos[2], curr_grp,
a=True, ws=True)
elif "revToe" in curr_grp:
cmds.move(max_toe_pos[0], max_toe_pos[1], max_toe_pos[2], curr_grp,
a=True, ws=True)
elif "revBall" in curr_grp:
cmds.move(ball_pos[0], ball_pos[1], ball_pos[2], curr_grp,
a=True, ws=True)
# Parent the foot under the rev ball grp and the ball under the rev ball fix grp.
cmds.parent(revFoot_grp_list[5], self.foot_cc)
cmds.parent(self.footRoot_jnt, revFoot_grp_list[1])
cmds.parent(revFoot_grp_list[0], self.footRoot_jnt)
cmds.parent(self.ball_jnt, revFoot_grp_list[0])
# Create the controls on the foot.
cmds.select(self.foot_cc, r=True)
cmds.addAttr(ln="______", at="float", k=True,)
cmds.addAttr(ln="ballRoll", at="float", k=True)
cmds.addAttr(ln="toeRoll", at="float", k=True)
cmds.addAttr(ln="heelRoll", at="float", k=True)
cmds.addAttr(ln="bank", at="float", k=True)
cmds.addAttr(ln="toePivot", at="float", k=True)
cmds.addAttr(ln="heelPivot", at="float", k=True)
foot_cc_attrs = ["ballRoll", "toeRoll", "heelRoll", "bank",
"toePivot", "heelPivot"]
# Create the nodes to turn on and off the controls, these are only meant for IKs.
IK_switch_node1 = cmds.shadingNode("multiplyDivide",
n="foot_01_" + self.side + "_IKSwitch_multdiv", au=True)
IK_switch_node2 = cmds.shadingNode("multiplyDivide",
n="foot_01_" + self.side + "_IKSwitch_multdiv", au=True)
IK_switch_nodes = [IK_switch_node1, IK_switch_node2]
self.attach_foot_controls(IK_switch_nodes, foot_cc_attrs, revFoot_grp_list)
# Now duplicate the foot for the IK leg.
# foot_cc_grp = cmds.listRelatives(self.foot_cc, p=True)[0]
# selected = cmds.select("foot_" + self.side + "_cc", r=True)
# cmds.duplicate(rr=True, un=True, rc=True,
# ic=True, st=True,
# n="footDup_" + self.side + "_grp")
# dup_foot_cc = cmds.listRelatives(duplicated_foot_grp)[0]
# dup_foot_grp_children = cmds.listRelatives(duplicated_foot_grp, ad=True)
# Find revBall fix and parent it under the revBall to not get deleted.
# dup_revBall_index = dup_foot_grp_children.index(revFoot[1] + "_" +
# self.side + "_grp1")
# dup_revBallFix_index = dup_foot_grp_children.index(revFoot[0] + "_" +
# self.side + "_grp1")
# cmds.parent(dup_foot_grp_children[dup_revBallFix_index],
# dup_foot_grp_children[dup_revBall_index])
# Find all the joints and delete it, will also delete the toes.
# dup_foot_jnts = cmds.listRelatives(dup_foot_grp_children, ad=True, type="joint")
# cmds.delete(dup_foot_jnts)
#
# new_dup_cc_children = cmds.listRelatives(dup_foot_cc, ad=True)
# Find the rpIK object in the IK_cc
# rp_IK = cmds.listRelatives(IK_cc, ad=True, type="ikHandle")[0]
# Parent to the IK_cc
# cmds.parent(duplicated_foot_grp, IK_cc)
# cmds.parent(rp_IK, new_dup_cc_children[4])
# Connect all the attributes from the original foot cc to the duplicated one.
# cmds.connectAttr(self.foot_cc + ".translate", dup_foot_cc + ".translate")
# cmds.connectAttr(self.foot_cc + ".rotate", dup_foot_cc + ".rotate")
# cmds.connectAttr(self.foot_cc + ".IK_FK_Switch", dup_foot_cc + ".IK_FK_Switch")
# cmds.connectAttr(self.foot_cc + ".ballRoll", dup_foot_cc + ".ballRoll")
# cmds.connectAttr(self.foot_cc + ".toeRoll", dup_foot_cc + ".toeRoll")
# cmds.connectAttr(self.foot_cc + ".heelRoll", dup_foot_cc + ".heelRoll")
# cmds.connectAttr(self.foot_cc + ".bank", dup_foot_cc + ".bank")
# cmds.connectAttr(self.foot_cc + ".toePivot", dup_foot_cc + ".toePivot")
# cmds.connectAttr(self.foot_cc + ".heelPivot", dup_foot_cc + ".heelPivot")
def attach_foot_controls(self, IK_switch_nodes, foot_attrs, revFoot_grp_list):
transforms = ["X", "Y", "Z"]
negative_math_node = cmds.shadingNode("multiplyDivide",
n="foot_01_" + self.side + "_footRev_multdiv", au=True)
for curr_trans in transforms:
cmds.setAttr(negative_math_node + ".input2" + curr_trans, -1)
# Attach the IK_FK_Switch to the newly created nodes, for all of the first input.
# IK_switch_nodes = [IK_switch_node1, IK_switch_node2]
for curr_trans in transforms:
cmds.connectAttr(self.foot_cc + ".IK_FK_Switch",
IK_switch_nodes[0] + ".input1" + curr_trans, f=True)
for curr_trans in transforms:
cmds.connectAttr(self.foot_cc + ".IK_FK_Switch",
IK_switch_nodes[1] + ".input1" + curr_trans, f=True)
# Attach the foot attrs to the second inputs of the math nodes.
# foot_attrs = ["ballRoll", "toeRoll", "heelRoll", "bank",
# "toePivot", "heelPivot"]
cmds.connectAttr(self.foot_cc + "." + foot_attrs[0],
IK_switch_nodes[0] + ".input2X", f=True)
cmds.connectAttr(self.foot_cc + "." + foot_attrs[1],
IK_switch_nodes[0] + ".input2Y", f=True)
cmds.connectAttr(self.foot_cc + "." + foot_attrs[2],
IK_switch_nodes[0] + ".input2Z", f=True)
cmds.connectAttr(self.foot_cc + "." + foot_attrs[3],
IK_switch_nodes[1] + ".input2X", f=True)
cmds.connectAttr(self.foot_cc + "." + foot_attrs[4],
IK_switch_nodes[1] + ".input2Y", f=True)
cmds.connectAttr(self.foot_cc + "." + foot_attrs[5],
IK_switch_nodes[1] + ".input2Z", f=True)
# revFoot_grp_list = ["revBallFix_*SIDE*_grp", "revBall_*SIDE*_grp,
# "revToe_*SIDE*_grp, "revHeel_*SIDE*_grp,
# "revOutBank_*SIDE*_grp, "revInBank_*SIDE*_grp]
# The ball fix group, roll
cmds.connectAttr(IK_switch_nodes[0] + ".outputX",
negative_math_node + ".input1X", f=True)
cmds.connectAttr(negative_math_node + ".outputX",
revFoot_grp_list[0] + ".rx", f=True)
# The ball group, roll
cmds.connectAttr(IK_switch_nodes[0] + ".outputX",
revFoot_grp_list[1] + ".rx", f=True)
# The toe group, roll
cmds.connectAttr(IK_switch_nodes[0] + ".outputY",
revFoot_grp_list[2] + ".rx", f=True)
# The heel group, roll
cmds.connectAttr(IK_switch_nodes[0] + ".outputZ",
revFoot_grp_list[3] + ".rx", f=True)
# The out bank group
cmds.connectAttr(IK_switch_nodes[1] + ".outputX",
revFoot_grp_list[4] + ".rz", f=True)
# The in bank group
cmds.connectAttr(IK_switch_nodes[1] + ".outputX",
revFoot_grp_list[5] + ".rz", f=True)
if self.side == "l":
cmds.transformLimits(revFoot_grp_list[5], rz=[0, 45], erz=[1, 0])
cmds.transformLimits(revFoot_grp_list[4], rz=[0, 0], erz=[0, 1])
elif self.side == "r":
cmds.transformLimits(revFoot_grp_list[5], rz=[0, 0], erz=[0, 1])
cmds.transformLimits(revFoot_grp_list[4], rz=[0, 45], erz=[1, 0])
# The toe group, pivot
cmds.connectAttr(IK_switch_nodes[1] + ".outputY",
revFoot_grp_list[2] + ".ry", f=True)
# The heel group, roll
cmds.connectAttr(IK_switch_nodes[1] + ".outputZ",
revFoot_grp_list[3] + ".ry", f=True)
def clean_IK_foot(self, IK_cc):
# Delete all the joints of the IK cc. When creating the foot, | |
return Component('Multiply', arguments={'left': self, 'right': Component.of(other)})
def __rmul__(self, other) -> "Component":
return Component('Multiply', arguments={'left': Component.of(other), 'right': self})
def __floordiv__(self, other) -> "Component":
return Component('Divide', arguments={'left': self, 'right': Component.of(other)})
def __rfloordiv__(self, other) -> "Component":
return Component('Divide', arguments={'left': Component.of(other), 'right': self})
def __truediv__(self, other) -> "Component":
return Component('Divide', arguments={
'left': Component('Cast', arguments={'data': self}, options={"atomic_type": "float"}),
'right': Component('Cast', arguments={'data': Component.of(other)}, options={"atomic_type": "float"})})
def __rtruediv__(self, other) -> "Component":
return Component('Divide', arguments={
'left': Component('Cast', arguments={'data': Component.of(other)}, options={"atomic_type": "float"}),
'right': Component('Cast', arguments={'data': self}, options={"atomic_type": "float"})})
def __mod__(self, other) -> "Component":
return Component('Modulo', arguments={'left': self, 'right': Component.of(other)})
def __rmod__(self, other) -> "Component":
return Component('Modulo', arguments={'left': Component.of(other), 'right': self})
def __pow__(self, power, modulo=None) -> "Component":
return Component('Power', arguments={'data': self, 'radical': Component.of(power)})
def __rpow__(self, other) -> "Component":
return Component('Power', arguments={'left': Component.of(other), 'right': self})
def __or__(self, other) -> "Component":
return Component('Or', arguments={'left': self, 'right': Component.of(other)})
def __ror__(self, other) -> "Component":
return Component('Or', arguments={'left': Component.of(other), 'right': self})
def __and__(self, other) -> "Component":
return Component('And', arguments={'left': self, 'right': Component.of(other)})
def __rand__(self, other) -> "Component":
return Component('And', arguments={'left': Component.of(other), 'right': self})
def __invert__(self) -> "Component":
return Component('Negate', arguments={'data': self})
def __xor__(self, other) -> "Component":
return (self | other) & ~(self & other)
def __gt__(self, other) -> "Component":
return Component('GreaterThan', arguments={'left': self, 'right': Component.of(other)})
def __ge__(self, other) -> "Component":
return Component('GreaterThan', arguments={'left': self, 'right': Component.of(other)}) \
or Component('Equal', arguments={'left': self, 'right': Component.of(other)})
def __lt__(self, other) -> "Component":
return Component('LessThan', arguments={'left': self, 'right': Component.of(other)})
def __le__(self, other) -> "Component":
return Component('LessThan', arguments={'left': self, 'right': Component.of(other)}) \
or Component('Equal', arguments={'left': self, 'right': Component.of(other)})
def __eq__(self, other) -> "Component":
return Component('Equal', arguments={'left': self, 'right': Component.of(other)})
def __ne__(self, other) -> "Component":
return ~(self == other)
def __abs__(self) -> "Component":
return Component('Abs', arguments={'data': self})
def __getitem__(self, identifier) -> "Component":
return Component('Index', arguments={'names': Component.of(identifier), 'data': self})
def __hash__(self):
return id(self)
def __str__(self, depth=0):
value = self.analysis.release_values.get(self.component_id, {"value": None})["value"]
if value is not None and depth != 0:
return str(value).replace("\n", "")
inner = []
if self.arguments:
inner.append(",\n".join([
f'{(" " * (depth + 1))}{name}={value.__str__(depth + 1)}'
for name, value in self.arguments.items() if value is not None
]))
if self.options:
inner.append(",\n".join([
f'{(" " * (depth + 1))}{name}={str(value).replace(chr(10), "")}'
for name, value in self.options.items() if value is not None
]))
if self.name == "Literal":
inner = "released value: " + str(self.value).replace("\n", "")
elif inner:
inner = f'\n{("," + chr(10)).join(inner)}\n{(" " * depth)}'
else:
inner = ""
return f'{self.name}({inner})'
def __repr__(self):
return f'<{self.component_id}: {self.name} Component>'
@staticmethod
def of(value, value_format=None, public=True) -> typing.Optional["Component"]:
"""
Given an array, list of lists, or dictionary, attempt to wrap it in a component and place the value in the release.
Loose literals are by default public.
This is an alternative constructor for a Literal, that potentially doesn't wrap the value in a Literal component if it is None or already a Component
:param value: The value to be wrapped.
:param value_format: must be one of `array`, `indexmap`, `jagged`
:param public: Loose literals are by default public.
:return: A Literal component with the value attached to the parent analysis' release.
"""
if value is None:
return
# the dataset class
if type(value) == Dataset:
value = value.component
if type(value) == Component:
return value
return Component('Literal', value=value, value_format=value_format, value_public=public)
@staticmethod
def _expand_constraints(arguments, constraints):
"""
Helper function to insert additional nodes when _lower, _n, etc constraints are passed through to the component constructor utilities' kwargs.
:param arguments: {[argument name]: [component]}
:param constraints: restrictions on the data that will be translated into additional nodes components
:return: a modified argument set for the current component
"""
if not constraints:
return arguments
for argument in arguments.keys():
filtered = [i[len(argument) + 1:] for i in constraints.keys()
if i.startswith(argument)]
filtered = [i for i in filtered
if i in ALL_CONSTRAINTS]
if 'columns' in filtered:
if 'upper' in filtered and 'lower' in filtered:
arguments[argument] = Component('Resize', arguments={
"data": arguments[argument],
"upper": Component.of(constraints[argument + '_upper']),
"lower": Component.of(constraints[argument + '_lower']),
"number_columns": Component.of(constraints.get(argument + '_columns'))
})
elif 'categories' in filtered:
arguments[argument] = Component('Resize', arguments={
"data": arguments[argument],
"categories": Component.of(constraints[argument + '_categories']),
"number_columns": Component.of(constraints.get(argument + '_columns'))
})
else:
arguments[argument] = Component('Resize', arguments={
"data": arguments[argument],
"number_columns": Component.of(constraints.get(argument + '_columns'))
})
del constraints[argument + '_columns']
if 'upper' in filtered and 'lower' in filtered:
min_component = Component.of(constraints[argument + '_lower'])
max_component = Component.of(constraints[argument + '_upper'])
arguments[argument] = Component('Clamp', arguments={
"data": arguments[argument],
"lower": min_component,
"upper": max_component
})
# TODO: imputation on ints is unnecessary
arguments[argument] = Component('Impute', arguments={
"data": arguments[argument]
})
del constraints[argument + '_lower']
del constraints[argument + '_upper']
else:
if 'upper' in filtered:
arguments[argument] = Component('RowMax', arguments={
"left": arguments[argument],
"right": Component.of(constraints[argument + '_upper'])
})
del constraints[argument + '_upper']
if 'lower' in filtered:
arguments[argument] = Component('RowMin', arguments={
"left": arguments[argument],
"right": Component.of(constraints[argument + '_lower'])
})
del constraints[argument + '_lower']
if 'categories' in filtered:
arguments[argument] = Component('Clamp', arguments={
"data": arguments[argument],
"categories": Component.of(constraints[argument + '_categories'])
})
del constraints[argument + '_categories']
if 'n' in filtered:
warnings.warn("The `_n` constraint is deprecated. Use `_rows` or `_columns` instead.")
arguments[argument] = Component('Resize', arguments={
"data": arguments[argument],
"number_rows": Component.of(constraints[argument + '_n'])
})
del constraints[argument + '_n']
if 'rows' in filtered:
arguments[argument] = Component('Resize', arguments={
"data": arguments[argument],
"number_rows": Component.of(constraints.get(argument + '_rows'))
})
del constraints[argument + '_rows']
if constraints:
raise ValueError(f"unrecognized constraints: {list(constraints.keys())}")
return arguments
class Analysis(object):
"""
Top-level class that contains a definition of privacy and collection of statistics.
This class tracks cumulative privacy usage for all components within.
The dynamic flag makes the library easier to use, because multiple batches may be strung together before calling release().
However, it opens the execution up to potential side channel timing attacks. Disable this if side channels are a concern.
The eager flag makes the library easier to debug, because stack traces pass through malformed components.
As a library user, it may be useful to enable eager and find a small, similar public dataset to help shape your analysis.
Building an analysis with a large dataset and eager enabled is not recommended, because the analysis is re-executed for each additional node.
`filter_level` determines what data is included in the release:
- `public` only newly released public data is included in the release
- `public_and_prior` will also retain private values previously included in the release
- `all` for including all evaluations from all nodes, which is useful for system debugging
There are several arguments for enabling/disabling individual protections.
- `protect_floating_point` (enabled by default):
- if enabled, disables the runtime if the runtime was not compiled against mpfr
- if enabled, prevents the usage of the laplace and gaussian mechanisms
- if enabled, noise-addition statistics on floating point numbers default to the snapping mechanism
- `protect_sensitivity` (enabled by default):
- if enabled, users may not pass custom sensitivities to mechanisms
- `protect_elapsed_time` (disabled by default):
- if enabled, forces all computations to run in constant time, regardless of the private dataset
- WARNING: this feature is still in development. Some components (like resize) may still have different execution times on neighboring datasets.
- `strict_parameter_checks` (enabled by default):
- if enabled, analyses may not consume more then epsilon=1, or delta greater than a value proportional to the number of records
- `stack_traces` (enabled by default):
- Disable stack traces to limit the amount of private information leaked should an error be encountered.
- This only turns off stack traces from the runtime- the rest of the library is not affected.
- The library does not take epsilon consumed from errors into account
:param dynamic: flag for enabling dynamic validation
:param eager: release every time a component is added
:param neighboring: may be `substitute` or `add_remove`
:param group_size: number of individuals to protect simultaneously
:param filter_level: may be `public`, `public_and_prior` or `all`
:param protect_floating_point: enable for protection against floating point attacks
:param protect_elapsed_time: enable for protection against side-channel timing attacks
:param protect_sensitivity: disable to pass custom sensitivities
:param stack_traces: set to False to suppress potentially sensitive stack traces
:param strict_parameter_checks: enable this to fail when some soft privacy | |
content_cell = content_cell['content']
# spec. align property
SPEC_PROPS = ['align',]
if 'align' in cell_spec_style:
align = celstyle[i]['align']
# any property for cell, by OOXML specification
for cs, attrs in cell_spec_style.items():
if cs in SPEC_PROPS:
continue
cell_prop = makeelement(cs, attributes=attrs)
cellprops.append(cell_prop)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content_cell, (list, tuple)):
content_cell = [content_cell,]
for c in content_cell:
# cell.append(cellprops)
if isinstance(c, etree._Element):
cell.append(c)
else:
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
temp_dir=None):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image
and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
assert temp_dir
media_dir = join(temp_dir, _DOCX_DIR_NAME, 'word', 'media')
if not os.path.isdir(media_dir):
os.makedirs(media_dir)
shutil.copyfile(picname, join(media_dir,picname))
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth,pixelheight = Image.open(picname).size[0:2]
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# Set relationship ID to the first available
picid = '2'
picrelid = 'rId'+str(len(relationshiplist)+1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'media/'+picname])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill',nsprefix='pic')
blipfill.append(makeelement('blip',nsprefix='a',attrnsprefix='r',attributes={'embed':picrelid}))
stretch = makeelement('stretch',nsprefix='a')
stretch.append(makeelement('fillRect',nsprefix='a'))
blipfill.append(makeelement('srcRect',nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr',nsprefix='pic')
cnvpr = makeelement('cNvPr',nsprefix='pic',
attributes={'id':'0','name':'Picture 1','descr':picname})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr',nsprefix='pic')
cnvpicpr.append(makeelement('picLocks', nsprefix='a',
attributes={'noChangeAspect':str(int(nochangeaspect)),
'noChangeArrowheads':str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr',nsprefix='pic',attributes={'bwMode':'auto'})
xfrm = makeelement('xfrm',nsprefix='a')
xfrm.append(makeelement('off',nsprefix='a',attributes={'x':'0','y':'0'}))
xfrm.append(makeelement('ext',nsprefix='a',attributes={'cx':width,'cy':height}))
prstgeom = makeelement('prstGeom',nsprefix='a',attributes={'prst':'rect'})
prstgeom.append(makeelement('avLst',nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic',nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement('graphicData',nsprefix='a',
attributes={'uri':'http://schemas.openxmlformats.org/drawingml/2006/picture'})
graphicdata.append(pic)
graphic = makeelement('graphic',nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks',nsprefix='a',attributes={'noChangeAspect':'1'})
framepr = makeelement('cNvGraphicFramePr',nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr',nsprefix='wp',
attributes={'id':picid,'name':'Picture 1','descr':picdescription})
effectextent = makeelement('effectExtent',nsprefix='wp',
attributes={'l':'25400','t':'0','r':'0','b':'0'})
extent = makeelement('extent',nsprefix='wp',attributes={'cx':width,'cy':height})
inline = makeelement('inline',
attributes={'distT':"0",'distB':"0",'distL':"0",'distR':"0"},nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
return relationshiplist,paragraph
def search(document,search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document,search,replace):
'''Replace all occurences of string with a different string, return updated document'''
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search,replace,element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document,search,replace,bs=3):
'''Replace all occurences of string with a different string, return updated document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
'''
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in | |
<reponame>Askinkaty/text-readability<gh_stars>0
# -*- coding: utf-8 -*-
from process_gram import process_grammar
import codecs
first_pp = ['мы', 'я', 'наш', 'мой']
second_pp = ['ты', 'вы', 'ваш', 'твой']
third_pp = ['он', 'она', 'они', 'оно', 'их', 'ee', 'его', 'ихний', 'ихним', 'ихнем']
indef_pron = ['некто', 'некого', 'некому', 'некем', 'нечто', 'нечего', 'нечему', 'нечем', 'некоторый', 'некий', 'любой',
'никто', 'ничто', 'никакой', 'нисколько', 'нигде', 'негде', 'некуда', 'никуда', 'неоткуда', 'ниоткуда',
'некогда', 'никогда', 'никак', 'незачем', 'незачем']
place_adverbs = ['близко', 'ближе', 'вблизи', 'вверх', 'вверху', 'ввысь', 'вглубь', 'вдали', 'вдаль', 'везде', 'взад',
'влево', 'вне', 'вниз', 'внизу', 'внутри', 'внутрь', 'вовне', 'вовнутрь', 'вокруг', 'вперед',
'впереди', 'вправо', 'всюду', 'высоко', 'выше', 'глубоко', 'глубже', 'далеко', 'дальше', 'донизу',
'дома', 'здесь', 'издалека', 'издалече', 'издали', 'изнутри', 'кверху', 'книзу', 'кругом', 'левее',
'наверх', 'наверху', 'наискосок', 'налево', 'направо', 'напротив', 'наружно', 'наружу', 'невысоко',
'неглубоко', 'недалеко', 'неподалеку', 'низко', 'ниже', 'одаль', 'около', 'окрест', 'особняком',
'отдельно', 'откуда', 'отсюда', 'поближе', 'поверх', 'повсеместно', 'повсюду', 'повыше', 'поглубже',
'подальше', 'позади', 'пониже', 'понизу', 'посередке', 'посередине', 'посреди', 'посредине', 'поодаль',
'правее', 'рядом', 'сбоку', 'сверху', 'свыше', 'сзади', 'слева', 'снизу', 'снаружи', 'спереди',
'справа', 'стороной', 'супротив']
time_adverbs = ['бесконечно', 'беспрерывно', 'ввек', 'весной', 'вечно', 'вмиг', 'вначале', 'вовек', 'вовремя', 'впору',
'впоследствии',
'впредь', 'враз', 'временно', 'всечасно', 'вскоре', 'встарь', 'вчера', 'вчерась', 'давеча', 'давно',
'давненько', 'денно', 'длительно', 'днесь', 'доколе', 'долго', 'дольше', 'доныне',
'досветла', 'доселе', 'досрочно', 'дотемна', 'доутра', 'единовременно', 'ежеквартально', 'ежеминутно',
'еженощно', 'ежесекундно', 'ежечасно', 'еще', 'заблаговременно', 'завсегда', 'завтра', 'задолго',
'загодя', 'заранее', 'зараз', 'засим', 'затем', 'зимой', 'извечно', 'издревле', 'изначально', 'иногда',
'исконно', 'испокон', 'исстари', 'круглосуточно', 'кряду', 'летом', 'мимолетно', 'навек', 'навеки',
'навсегда', 'надолго', 'назавтра', 'накануне', 'наконец', 'намедни', 'наперед', 'напоследок',
'напролет', 'насовсем', 'наутро', 'недавно', 'недолго', 'незадолго', 'незамедлительно', 'ненадолго',
'нескоро', 'неоднократно', 'нонче', 'непрерывно', 'непродолжительно', 'нощно', 'ныне', 'нынче',
'однажды', 'одновременно', 'осенью', 'отколе', 'отныне', 'отродясь', 'первоначально', 'позавчера',
'позднее', 'поздно', 'поздновато', 'позже', 'подолгу', 'подряд', 'пожизненно', 'пока', 'покамест',
'поныне', 'поначалу', 'попозже', 'пораньше', 'после', 'послезавтра',
'поспешно', 'поскорее', 'постоянно', 'поутру', 'прежде', 'преждевременно', 'присно',
'продолжительно', 'редко', 'реже', 'ранее', 'рано', 'рановато', 'раньше', 'редко', 'своевременно',
'сегодня', 'скорее', 'скорей', 'скоро', 'смолоду', 'сначала', 'сперва', 'сразу', 'срочно', 'сроду',
'теперича', 'часто', 'уже', 'ужо']
interrogative_pronoun = ['кто', 'что', 'какой', 'каков', 'чей', 'который', 'почему', 'зачем', 'где', 'куда', 'откуда',
'отчего']
def is_have_grammar(e):
# try:
return e[1] != ''
# except KeyError as ke:
# print("Key error:" + str(e))
# raise ke
# 1
# test that the current word is a first person pronoun
def first_person_pronoun(t):
fpp1 = 0
for el in t:
if el[2] in first_pp:
fpp1 += 1
return fpp1
# 2
# test that the current word is a second person pronoun
def second_person_pronoun(t):
spp2 = 0
for el in t:
if el[2] in second_pp:
spp2 += 1
return spp2
# 3
# test that the current word is a third person pronoun
def third_person_pronoun(t):
tpp3 = 0
for el in t:
if el[2] in third_pp:
tpp3 += 1
return tpp3
# 4
# test that the current word is a pronoun
def is_pronoun(t):
pron = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'P':
pron += 1
else:
continue
return pron
# 5
# test that the current word is a finite verb
def is_finite_verb(t):
finite = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and d_el.get('vform') == 'i':
finite += 1
else:
continue
return finite
# 6
# test that the current word is an adjective or a participle
# may be we should leave only test for adjectives and add a test that they are modifiers and not parts of predicates
def is_modifier(t):
mod = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'A' or (d_el.get('pos') == 'V' and d_el.get('vform') == 'p'):
mod += 1
else:
continue
return mod
# 7
# test that the current word has a past tense form
def past_tense(t):
past = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and d_el.get('tense') == 's':
past += 1
else:
continue
return past
# 8
# test that the current word has a perfect aspect form
def perf_aspect(t):
perf = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and d_el.get('aspect') == 'p':
perf += 1
else:
continue
return perf
# 9
# test that the current word has a present tense form
def present_tense(t):
pres = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and d_el.get('tense') == 'p':
pres += 1
else:
continue
return pres
# 10
# test that the current word is an adverb
def total_adverb(t):
total_adv = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'R':
total_adv += 1
else:
continue
return total_adv
# nouns
# 11
# 12
# test that the current word a verbal noun (отглагольное сущ.) or not verbal noun
def is_nominalization(t):
nomz = 0
nouns = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'N':
with codecs.open('dictionaries/final_lemmas_nominalizations.txt', mode='r', encoding='utf-8') as f:
read_lines = set([s.strip() for s in f.readlines()])
if el[2].lower() in read_lines:
nomz += 1
else:
nouns += 1
else:
continue
return nomz, nouns
# 13
# test that the current word has a genitive case form
def is_genitive(t):
gen = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if (d_el.get('pos') == 'N' or d_el.get('pos') == 'P' or d_el.get('pos') == 'A') and d_el.get('case') == 'g':
gen += 1
else:
continue
return gen
# 14
# test that the current word has a neuter gender form
def is_neuter(t):
neuter = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if ((d_el.get('pos') == 'N' or d_el.get('pos') == 'P' or d_el.get('pos') == 'A')
and d_el.get('gender') == 'n'):
neuter += 1
else:
continue
return neuter
# 15
# test that the current word has a passive form
def is_passive(t):
passive = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and 'p' == d_el.get('voice'):
passive += 1
else:
continue
return passive
# 16
# test that the current verb is an infinitive
def infinitives(t):
infin = 0
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V' and d_el.get('vform') == 'n':
infin += 1
else:
continue
return infin
# 17
# test that the current word is a speech verb
def speech_verb(t):
sp_verb = 0
with codecs.open(r'dictionaries/all_lemmas_verb_speech.txt', mode='r', encoding='utf-8') as f:
read_lines = set([s.strip() for s in f.readlines()])
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V':
if el[2].lower() in read_lines:
sp_verb += 1
else:
continue
return sp_verb
# 18
# test that the current word is a mental verb
def mental_verb(t):
mntl_verb = 0
with codecs.open(r'dictionaries/all_lemmas_verb_mental.txt', mode='r', encoding='utf-8') as f:
read_lines = set([s.strip() for s in f.readlines()])
for i, el in enumerate(t):
if is_have_grammar(el):
d_el = process_grammar(el)
if d_el.get('pos') == 'V':
if el[2].lower() in read_lines:
mntl_verb += 1
else:
continue
return mntl_verb
# 19
# test that the current sentence includes that-complement clause
def that_complement(t):
that_compl = 0
l = len(t)
for i, el in enumerate(t):
if is_have_grammar(el):
if t[l - 1][0] != '?':
d_el = process_grammar(el)
d_next_el = {}
if i + 1 < len(t):
next_el = t[i + 1]
d_next_el = process_grammar(next_el)
d_next_el = d_next_el if d_next_el is not None else {}
# test that current word is verb or short-form adjective and the next word is not a verb or
# short-form adjective because of sentences like 'Я был счастлив, что она пришла'.
if d_el.get('pos') == 'V' or (d_el.get('pos') == 'A' and d_el.get('definiteness') == 's'):
if is_have_grammar(next_el):
if (d_next_el.get('pos') != 'V' and
(d_next_el.get('pos') != 'A' or d_next_el.get('definiteness') != 's')):
for j in range(4):
# test that there's no pronouns like 'то, это, такой' between the current word and comma
# because of sentences like 'Я не предвидел того, что вы приедете',
# which has relative meaning.
# test that conjunction like 'что', 'чтобы' directly follow after comma
if (i + j + 1 < len(t) and
t[i + j][2] not in ['весь', 'все', 'такой', 'то', 'это', 'тот',
'этот'] and
t[i + j + 1][0] == ',' and i + j + 2 < len(t) and
t[i + j + 2][2] in ['что', 'чтобы']):
if i + j + 3 < len(t):
# test that if the conjunction is 'чтобы', there's no infinitive verb after it
# to | |
or rev_big == 20 ):
version = 20.0;
elif ( rev_little == 21 or rev_big == 21 ):
version = 21.0;
elif ( rev_little == 23 or rev_big == 23 ):
version = 21.0;
elif ( rev_little == 24 or rev_big == 24 ):
version = 24.0;
elif ( rev_little == 26 or rev_big == 26 ):
version = 26.0;
else:
raise UnknownPfile("Unknown header structure for revision %s" % rev_little)
return version;
def dump_header_strarr(self):
dumped = self._dump_struct(self.hdr)
strarr = []
for info in dumped:
if (info.label.find("pad") == 0):
continue
# needed this because Gregor's UID values had some odd chars which
# caused errors on the import of Probep data
try:
val = info.value
# needed this because Pom's UID values were causing errors
# when VIFF was read back in
if info.label == 'rhe_study_uid': val = ' '
if info.label == 'rhs_series_uid': val = ' '
if info.label == 'rhs_landmark_uid': val = ' '
if info.label == 'rhi_image_uid': val = ' '
val = str(val)
except:
val = ' '
strarr.append(str(info.label)+' '+val)
return strarr
def dump_header(self):
dumped = self._dump_struct(self.hdr)
writer = csv.writer(sys.stdout, delimiter="\t")
writer.writerow(["\n\nHeader All - field", "value"])
for info in dumped:
if (info.label.find("pad") == 0):
continue
writer.writerow([info.label, str(info.value)])
def _dump_struct(self, struct, include_structs=False):
"""
Recursively travels through a ctypes.Structure and returns a list of
namedtuples, containing label, depth, value, size, and offset.
If include_structs is true, output will include lines for individual
structures and their sizes and offsets -- not just non-structure fields.
"""
output = []
self._dump_struct_rec(struct, output, include_structs)
return output
def _dump_struct_rec(self, struct, output, include_structs=False, prefix='', depth=0, base_offset=0):
"""
Internal recursive method for dumping structures.
Appends to the "output" parameter.
"""
struct_class = type(struct)
if include_structs:
output.append(StructInfo(
"%s (%s)" % (prefix, struct_class.__name__),
depth, '', str(struct_class), ctypes.sizeof(struct_class), base_offset))
for f in struct._fields_:
name = f[0]
field_type = f[1]
field_meta = getattr(struct_class, name)
field = getattr(struct, name)
cur_prefix = "%s%s." % (prefix, name)
field_offset = base_offset + field_meta.offset
if isinstance(field, ctypes.Structure):
self._dump_struct_rec(field, output, include_structs, cur_prefix,depth+1, field_offset)
else:
label = prefix+name
output.append(StructInfo(label, depth, field, field_type.__name__, field_meta.size, field_offset))
#------------------------------------------------------------------------------
# originally ge_pfile_mapper.py
#------------------------------------------------------------------------------
class PfileMapper(object):
def __init__(self, file_name, hdr, version, endian):
"""
Given a file name, its header, version number and endianess, this
class will parse the data section of the file for the suppressed and
unsuppressed data.
All 'timePts' (aka. FID data arrays) are stored in the raw_data
attribute. It is a numpy ndarray with shape of:
[cols, rows, slices, numTimePts, numCoils, numSpecPts], np.complex64
For SVS data, cols, rows and slices are all equal to 1.
- raw_suppressed is a view onto the water suppressed fids data
- raw_unsuppressed is a view onto the water unsuppressed fids data
- avg_suppressed and avg_unsuppressed are numpy arrays where the
relevant raw_ views have been summed along the numTimePts
dimension. shape = [cols, rows, slices, numCoils, numSpecPts]
For non-SVS data, only the raw_data attribute has data in it.
History:
Derived from SIVIC file svkGEPFileMapper.cc which was used to map data
from PROBE-P and PRESSCSI P-files. SIVIC has other mapper classes for
other types of P-file data. I will plan on using this model here, too.
"""
self.file_name = file_name
self.hdr = hdr
self.version = version
self.endian = endian
self.is_svs = False
self.raw_data = None
self.raw_suppressed = None
self.avg_suppressed = None
self.raw_unsuppressed = None
self.avg_unsuppressed = None
@property
def get_select_box_center(self):
"""
Center position is taken from user variables. The Z "slice"
position used to be taken from the image header "image.loc",
but with the LX architecture, this held the table position only,
so if Graphic RX was used to introduce an offset, it wouldn't
be successfully extracted.
"""
center0 = -1 * self.hdr.rhi_user11
center1 = -1 * self.hdr.rhi_user12
center2 = self.hdr.rhi_user13
return np.array([center0, center1, center2])
@property
def get_select_box_size(self):
boxsize = np.array([0.0, 0.0, 0.0])
dcos = self.get_dcos
if self.version > 9:
lMax = 0
pMax = 0
sMax = 0
lIndex = 0
pIndex = 0
sIndex = 0
for i in range(3):
if abs( dcos[i][0] ) > lMax:
lIndex = i
lMax = abs( dcos[i][0] )
if abs( dcos[i][1] ) > pMax:
pIndex = i
pMax = abs( dcos[i][1] )
if abs( dcos[i][2] ) > sMax:
sIndex = i
sMax = abs( dcos[i][2] )
boxsize[ lIndex ] = self.hdr.rhi_user8
boxsize[ pIndex ] = self.hdr.rhi_user9
boxsize[ sIndex ] = self.hdr.rhi_user10
else:
boxsize[0] = self.hdr.rhr_roilenx
boxsize[1] = self.hdr.rhr_roileny
boxsize[2] = self.hdr.rhr_roilenz
if self.is_swap_on:
ftemp = boxsize[0]
boxsize[0] = boxsize[1]
boxsize[1] = ftemp
return boxsize
@property
def get_voxel_spacing(self):
"""
Get the voxel spacing in 3D. Note that the slice spacing may include
a skip.
Swaps the FOV if necessary based on freq_dir setting.
"""
user19 = self.hdr.rhi_user19
voxspace = np.array([0.0, 0.0, 0.0])
if (user19 > 0) and (self.version > 9):
voxspace[0] = user19
voxspace[1] = user19
voxspace[2] = user19
else:
fov = self.get_fov
nvox = self.get_num_voxels
voxspace[0] = fov[0]/nvox[0]
voxspace[1] = fov[1]/nvox[1]
voxspace[2] = fov[2]/nvox[2]
return voxspace
@property
def get_fov(self):
fov = np.array([0.0, 0.0, 0.0])
nvox = self.get_num_voxels
dfov = self.hdr.rhi_dfov
if self.version > 9:
fov[0] = dfov
fov[1] = dfov
# 2D case vs 3D cases
if self.is_2d:
fov[2] = self.hdr.rhi_user10
else:
fov[2] = self.hdr.rhi_scanspacing * self.hdr.rhr_zcsi
else:
fov[0] = self.hdr.rhr_rh_user7
fov[1] = self.hdr.rhr_rh_user8
fov[2] = self.hdr.rhr_rh_user9
# Anisotropic voxels:
if (self.version > 9) and (nvox[0] != nvox[1]):
# CSI has already been reordered if needed - so fov calculated
# with this CSI will not need reordering, need next power of 2:
xdim = int(pow(2, math.ceil(math.log(nvox[0], 2))))
ydim = int(pow(2, math.ceil(math.log(nvox[1], 2))))
if( ydim > xdim ):
fov_spatial_resolution = dfov/ydim
else:
fov_spatial_resolution = dfov/xdim
fov[1] = fov_spatial_resolution * ydim
fov[0] = fov_spatial_resolution * xdim
elif self.is_swap_on:
# Swap the FOV if necessary based on freq dir:
temp = fov[0]
fov[0] = fov[1]
fov[1] = temp
return fov
@property
def get_num_voxels(self):
"""
Get the 3D spatial dimensionality of the data set
Returns an int array with 3 dimensions. Swaps
if necessary based on freq_dir setting.
"""
nvox = np.array([0, 0, 0])
if self.hdr.rhr_rh_file_contents == 0:
nvox[0] = 1
nvox[1] = 1
nvox[2] = 1
else:
nvox[0] = int(self.hdr.rhr_xcsi)
nvox[1] = int(self.hdr.rhr_ycsi)
nvox[2] = int(self.hdr.rhr_zcsi)
# Swap dimensions if necessary:
if self.is_swap_on:
temp = nvox[0]
nvox[0] = nvox[1]
nvox[1] = temp
return nvox
@property
def get_dcos(self):
dcos = np.zeros([3, 3], float)
dcos[0][0] = -( self.hdr.rhi_trhc_R - self.hdr.rhi_tlhc_R )
dcos[0][1] = -( self.hdr.rhi_trhc_A - self.hdr.rhi_tlhc_A )
dcos[0][2] = ( self.hdr.rhi_trhc_S - self.hdr.rhi_tlhc_S )
dcosLengthX = np.sqrt( dcos[0][0] * dcos[0][0]
+ dcos[0][1] * dcos[0][1]
+ dcos[0][2] * dcos[0][2] )
dcos[0][0] /= dcosLengthX
dcos[0][1] /= dcosLengthX
dcos[0][2] /= dcosLengthX
dcos[1][0] = -( self.hdr.rhi_brhc_R - self.hdr.rhi_trhc_R )
dcos[1][1] = -( self.hdr.rhi_brhc_A - self.hdr.rhi_trhc_A )
dcos[1][2] = ( self.hdr.rhi_brhc_S - self.hdr.rhi_trhc_S )
dcosLengthY = np.sqrt( dcos[1][0] * dcos[1][0]
+ dcos[1][1] * dcos[1][1]
+ dcos[1][2] * dcos[1][2] )
dcos[1][0] /= dcosLengthY
dcos[1][1] /= dcosLengthY
dcos[1][2] /= dcosLengthY
# third row is the vector product of the first two rows
# actually, -1* vector product, at least for the axial and axial oblique
# which is all that we support now
dcos[2][0] = - dcos[0][1] * dcos[1][2] + dcos[0][2] * dcos[1][1]
dcos[2][1] = - dcos[0][2] * dcos[1][0] + dcos[0][0] * dcos[1][2]
dcos[2][2] = - dcos[0][0] * dcos[1][1] + dcos[0][1] * dcos[1][0]
return dcos
@property
def is_swap_on(self):
""" Is frequency direction swapped? """
if self.hdr.rhi_freq_dir != 1:
return True
else:
return False
@property
def is_2d(self):
""" Is this a 2D or 3D data set (spatial dimensions)? """
is2D = False
ndims = self.hdr.rhr_csi_dims
if ndims == | |
"""A basic extended attributes (xattr) implementation for Linux, FreeBSD and MacOS X."""
import errno
import os
import re
import subprocess
import sys
import tempfile
from ctypes import CDLL, create_string_buffer, c_ssize_t, c_size_t, c_char_p, c_int, c_uint32, get_errno
from ctypes.util import find_library
from distutils.version import LooseVersion
from .helpers import Buffer
try:
ENOATTR = errno.ENOATTR
except AttributeError:
# on some platforms, ENOATTR is missing, use ENODATA there
ENOATTR = errno.ENODATA
buffer = Buffer(create_string_buffer, limit=2**24)
def is_enabled(path=None):
"""Determine if xattr is enabled on the filesystem
"""
with tempfile.NamedTemporaryFile(dir=path, prefix='borg-tmp') as fd:
try:
setxattr(fd.fileno(), 'user.name', b'value')
except OSError:
return False
return getxattr(fd.fileno(), 'user.name') == b'value'
def get_all(path, follow_symlinks=True):
try:
result = {}
names = listxattr(path, follow_symlinks=follow_symlinks)
for name in names:
try:
result[name] = getxattr(path, name, follow_symlinks=follow_symlinks)
except OSError as e:
# if we get ENOATTR, a race has happened: xattr names were deleted after list.
# we just ignore the now missing ones. if you want consistency, do snapshots.
if e.errno != ENOATTR:
raise
return result
except OSError as e:
if e.errno in (errno.ENOTSUP, errno.EPERM):
return {}
libc_name = find_library('c')
if libc_name is None:
# find_library didn't work, maybe we are on some minimal system that misses essential
# tools used by find_library, like ldconfig, gcc/cc, objdump.
# so we can only try some "usual" names for the C library:
if sys.platform.startswith('linux'):
libc_name = 'libc.so.6'
elif sys.platform.startswith(('freebsd', 'netbsd')):
libc_name = 'libc.so'
elif sys.platform == 'darwin':
libc_name = 'libc.dylib'
else:
msg = "Can't find C library. No fallback known. Try installing ldconfig, gcc/cc or objdump."
print(msg, file=sys.stderr) # logger isn't initialized at this stage
raise Exception(msg)
# If we are running with fakeroot on Linux, then use the xattr functions of fakeroot. This is needed by
# the 'test_extract_capabilities' test, but also allows xattrs to work with fakeroot on Linux in normal use.
# TODO: Check whether fakeroot supports xattrs on all platforms supported below.
# TODO: If that's the case then we can make Borg fakeroot-xattr-compatible on these as well.
XATTR_FAKEROOT = False
if sys.platform.startswith('linux'):
LD_PRELOAD = os.environ.get('LD_PRELOAD', '')
preloads = re.split("[ :]", LD_PRELOAD)
for preload in preloads:
if preload.startswith("libfakeroot"):
fakeroot_version = LooseVersion(subprocess.check_output(['fakeroot', '-v']).decode('ascii').split()[-1])
if fakeroot_version >= LooseVersion("1.20.2"):
# 1.20.2 has been confirmed to have xattr support
# 1.18.2 has been confirmed not to have xattr support
# Versions in-between are unknown
libc_name = preload
XATTR_FAKEROOT = True
break
try:
libc = CDLL(libc_name, use_errno=True)
except OSError as e:
msg = "Can't find C library [%s]. Try installing ldconfig, gcc/cc or objdump." % e
raise Exception(msg)
def split_string0(buf):
"""split a list of zero-terminated strings into python not-zero-terminated bytes"""
return buf.split(b'\0')[:-1]
def split_lstring(buf):
"""split a list of length-prefixed strings into python not-length-prefixed bytes"""
result = []
mv = memoryview(buf)
while mv:
length = mv[0]
result.append(bytes(mv[1:1 + length]))
mv = mv[1 + length:]
return result
class BufferTooSmallError(Exception):
"""the buffer given to an xattr function was too small for the result."""
def _check(rv, path=None, detect_buffer_too_small=False):
if rv < 0:
e = get_errno()
if detect_buffer_too_small and e == errno.ERANGE:
# listxattr and getxattr signal with ERANGE that they need a bigger result buffer.
# setxattr signals this way that e.g. a xattr key name is too long / inacceptable.
raise BufferTooSmallError
else:
try:
msg = os.strerror(e)
except ValueError:
msg = ''
if isinstance(path, int):
path = '<FD %d>' % path
raise OSError(e, msg, path)
if detect_buffer_too_small and rv >= len(buffer):
# freebsd does not error with ERANGE if the buffer is too small,
# it just fills the buffer, truncates and returns.
# so, we play sure and just assume that result is truncated if
# it happens to be a full buffer.
raise BufferTooSmallError
return rv
def _listxattr_inner(func, path):
if isinstance(path, str):
path = os.fsencode(path)
size = len(buffer)
while True:
buf = buffer.get(size)
try:
n = _check(func(path, buf, size), path, detect_buffer_too_small=True)
except BufferTooSmallError:
size *= 2
else:
return n, buf.raw
def _getxattr_inner(func, path, name):
if isinstance(path, str):
path = os.fsencode(path)
name = os.fsencode(name)
size = len(buffer)
while True:
buf = buffer.get(size)
try:
n = _check(func(path, name, buf, size), path, detect_buffer_too_small=True)
except BufferTooSmallError:
size *= 2
else:
return n, buf.raw
def _setxattr_inner(func, path, name, value):
if isinstance(path, str):
path = os.fsencode(path)
name = os.fsencode(name)
value = value and os.fsencode(value)
size = len(value) if value else 0
_check(func(path, name, value, size), path, detect_buffer_too_small=False)
if sys.platform.startswith('linux'): # pragma: linux only
libc.llistxattr.argtypes = (c_char_p, c_char_p, c_size_t)
libc.llistxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.lsetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_int)
libc.lsetxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_int)
libc.fsetxattr.restype = c_int
libc.lgetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t)
libc.lgetxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t)
libc.fgetxattr.restype = c_ssize_t
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.flistxattr(path, buf, size)
else:
if follow_symlinks:
return libc.listxattr(path, buf, size)
else:
return libc.llistxattr(path, buf, size)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_string0(buf[:n])
if name and not name.startswith(b'system.posix_acl_')]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.fgetxattr(path, name, buf, size)
else:
if follow_symlinks:
return libc.getxattr(path, name, buf, size)
else:
return libc.lgetxattr(path, name, buf, size)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
flags = 0
if isinstance(path, int):
return libc.fsetxattr(path, name, value, size, flags)
else:
if follow_symlinks:
return libc.setxattr(path, name, value, size, flags)
else:
return libc.lsetxattr(path, name, value, size, flags)
_setxattr_inner(func, path, name, value)
elif sys.platform == 'darwin': # pragma: darwin only
libc.listxattr.argtypes = (c_char_p, c_char_p, c_size_t, c_int)
libc.listxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.setxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.setxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fsetxattr.restype = c_int
libc.getxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.getxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fgetxattr.restype = c_ssize_t
XATTR_NOFLAGS = 0x0000
XATTR_NOFOLLOW = 0x0001
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.flistxattr(path, buf, size, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.listxattr(path, buf, size, XATTR_NOFLAGS)
else:
return libc.listxattr(path, buf, size, XATTR_NOFOLLOW)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_string0(buf[:n]) if name]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.fgetxattr(path, name, buf, size, 0, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.getxattr(path, name, buf, size, 0, XATTR_NOFLAGS)
else:
return libc.getxattr(path, name, buf, size, 0, XATTR_NOFOLLOW)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
if isinstance(path, int):
return libc.fsetxattr(path, name, value, size, 0, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.setxattr(path, name, value, size, 0, XATTR_NOFLAGS)
else:
return libc.setxattr(path, name, value, size, 0, XATTR_NOFOLLOW)
_setxattr_inner(func, path, name, value)
elif sys.platform.startswith('freebsd'): # pragma: freebsd only
libc.extattr_list_fd.argtypes = (c_int, c_int, c_char_p, c_size_t)
libc.extattr_list_fd.restype = c_ssize_t
libc.extattr_list_link.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_link.restype = c_ssize_t
libc.extattr_list_file.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_file.restype = c_ssize_t
libc.extattr_get_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_fd.restype = c_ssize_t
libc.extattr_get_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_link.restype = c_ssize_t
libc.extattr_get_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_file.restype = c_ssize_t
libc.extattr_set_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_fd.restype = c_int
libc.extattr_set_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_link.restype = c_int
libc.extattr_set_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_file.restype = c_int
ns = EXTATTR_NAMESPACE_USER = 0x0001
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.extattr_list_fd(path, ns, buf, size)
else:
if follow_symlinks:
return libc.extattr_list_file(path, ns, buf, size)
else:
return libc.extattr_list_link(path, ns, buf, size)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_lstring(buf[:n]) if name]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.extattr_get_fd(path, ns, name, buf, size)
else:
if follow_symlinks:
return libc.extattr_get_file(path, ns, name, buf, size)
else:
return libc.extattr_get_link(path, ns, name, buf, size)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
if isinstance(path, int):
return libc.extattr_set_fd(path, ns, name, value, size)
else:
if follow_symlinks:
return libc.extattr_set_file(path, ns, name, value, size)
else:
return | |
numbers.
portstr: does exactly that.
Example:
dfm r:logs service=port:port,proto
The above will create/overwrite columns 'service' with a port string
constructed from the port number and protocol number:
service,port,proto
80/tcp,"80","6"
53/udp,53,17
'''
# sanity check lhs, rhs
errors = []
if len(lhs) != 1:
errors.append('need exactly 1 lhs field')
if len(rhs) !=2:
errors.append('need exactly 2 rhs fields')
dst = lhs[0]
self.check_fields(errors, rhs)
self.fatal(errors, lhs, rhs)
fport, fproto = rhs
def safe_port(row):
try:
port, proto = row[fport], row[fproto]
return Ival.port_proto(port, proto).port()
except ValueError:
return np.nan
try:
self.dfm[dst] = self.dfm.apply(safe_port, axis=1)
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
return self
def cmd_servicename(self, lhs, rhs):
'''
syntax: fx=service:fportstr
info: fx := iana service name via portstring
descr:
Looks up the portstring (eg 80/tcp) or port,protocols nrs (eg 80, 6)
in a table and returns the iana assigned name and/or description.
Example:
dfm r:logs application,descr=portname:service
dfm r:logs ,descr=portname:service
dfm r:logs application,descr=portname:port,proto
dfm r:logs ,descr=portname:port,proto
The first command assigns the iana service name and its description
to (possibly) new fields application,descr using the df-column
service which should contain portstrings like '80/tcp'. The second
command only assigns the description.
The 3rd and 4th commands do the same, but using port nr and protocol
nr columns instead (where port, proto would refer to eg 80, 6).
'''
# sanity check lhs, rhs
errors = []
if len(lhs) > 2:
errors.append('need 1 or 2 lhs fields')
if len(rhs) != 1:
errors.append('need 1 rhs field')
self.check_fields(errors, rhs)
self.fatal(errors, lhs, rhs)
log.info('loading services ...')
ipp = Ip4Service()
log.info('... done!')
fdst, fport = lhs[0], rhs[0]
def get_name(row):
try:
portstr = row[fport]
if portstr is not np.nan and len(portstr):
name = ipp.getservbyport(portstr)
if name is None or len(name) == 0:
return portstr
return name
return 'unknown'
except ValueError:
return 'err'
try:
self.dfm[fdst] = self.dfm.apply(get_name, axis=1)
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
return self
def cmd_join(self, lhs, rhs):
'''
syntax: fx=join:sep,fy,fz,..
info: join 2+ fields using sep
descr:
Create new column fx (or overwrite existing one) by joining the
string values of columns fy,fz,.. using the string <sep>.
Only 1 lhs-field is allowed and a minimum of 3 rhs-fields are
required. All rhs-fiels, except the <sep>-string must be existing
fields in the dataframe.
Example:
| a num
| a 1
| a 2
Using b=join:\\:a,num will get you
| a num b
| a 1 a:1
| a 2 a:2
Usually you'll need to double the escape '\'-char on the command
line. (note: ':~=' are special characters for the command parser).
'''
# sanity check lhs, rhs
errors = []
if len(rhs) < 3:
errors.append('need 3+ fields in rhs: sep,f1,f2,...')
if len(lhs) != 1:
errors.append('need exactly 1 lhs field')
dst = lhs[0]
sep, srcs = rhs[0], rhs[1:]
self.check_fields(errors, srcs)
self.fatal(errors, lhs, rhs)
try:
self.dfm[dst] = self.dfm[srcs].apply(lambda x: sep.join(str(f) for f in x), axis=1)
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
return self
def cmd_add(self, lhs, rhs):
'''
syntax: fx=add:fy,fz,..
info: add fields after mapping them to numbers
descr:
Create new column fx (or overwrite existing one) by add the
values of columns fy,fz,.. after converting them to int(s)
A field value that fails to convert, defaults to nan.
Only 1 lhs-field is allowed and a minimum of 2 rhs-fields are
required. All rhs-fiels must be existing fields in the dataframe.
'''
# sanity check lhs, rhs
errors = []
if len(rhs) < 2:
errors.append('need 2+ fields in rhs: sep,f1,f2,...')
if len(lhs) != 1:
errors.append('need exactly 1 lhs field')
dst = lhs[0]
srcs = rhs
self.check_fields(errors, srcs)
self.fatal(errors, lhs, rhs)
# helper to safely convert & sum fields
def intsum(fields):
val = 0
log.debug('adding %s', fields)
for field in fields:
try:
val += int(float(field))
log.debug('val is %s', val)
except ValueError:
pass
return val
try:
self.dfm[dst] = self.dfm[srcs].apply(
lambda x: intsum(f for f in x), axis=1)
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
return self
def cmd_map(self, lhs, rhs):
'''
syntax: fx,..=map:fy
info: create (fy,fx)-map and apply to existing fx,..
descr:
'map:' is a sort of forced forward/backward fill, using column fy to
create a dictionary of fy->fx valid-values-mapping (retaining first mapping
found) and them apply that to column fx. The process is repeated for
any additional lhs-fields, which must all exist.
Only fx-nan-values are replaced by a known fx-valid-value given the
value of fy in that row.
Example:
| hostname ip count
| nan 172.16.17.32 10
| www.ietf.com 172.16.17.32 12
Using hostname=map:ip, will get you:
| hostname ip count
| www.ietf.com 172.16.17.32 10
| www.ietf.com 172.16.17.32 12
Mostly useful when the dataset is derived from events with common
fields, but where not all events have all the fields all the time.
'''
# sanity check lhs, rhs
errors = []
if len(lhs) < 1:
errors.append('need at least 1 lhs field to assign to')
if len(rhs) != 1:
errors.append('need exactly 1 rhs field as map source')
self.check_fields(errors, lhs + rhs)
self.fatal(errors, lhs, rhs)
src = rhs[0]
dst = [c for c in lhs if c != src] # avoid source control column
fix = self.dfm.set_index(src) # src value mappings to other columns
log.info('- control column {}'.format(src))
for col in dst:
dct = fix[[col]].dropna().to_dict()[col] # null's should be NaNs!
log.info('- mapping to {!r} with {} unique maps'.format(col,
len(dct)))
self.dfm[col] = self.dfm[src].map(dct)
return self
def cmd_regex(self, lhs, rhs):
'''
syntax: [fx=]fy~/abc/[ABC/][i]
info: create/modify fx or filter by fy
descr:
'regex:' can be used to either:
- filter rows by matching fy to a regular expression
- perform a substitution on fy via a regular expression, or
- assign the value of the substitution to a new/existing column.
Example:
status~/up/down/ - flip 'up' to 'down' in status column
host=name~/-[^-]+$// - bare hostname with last part stripped
status~/up/i - keep rows where status contains 'up' (case-insensitive)
The following flags are picked up on:
/i = re.I - case insensitive
/a = re.A - ascii-only matching instead of full unicode for \w, \W ..
/s = re.S - make '.' match newline as well
/m = re.M - make '^','$' also match at beginning/end of each line
/r = reverse meaning in case of matching/filtering
'''
# regexp work on strings, not numbers. At the moment, str(x) is used to
# ensure a column field value is a string. Not needed when its already
# string-like. So a speed-up is handy by first checking if the field
# being matched/search is already string-like (save str(x) on every
# value in a column ....
# sanity check lhs, rhs
errors = []
if len(lhs) < 1:
errors.append('need at least 1 field to work with')
if len(rhs) < 1:
errors.append('missing field or regexp')
self.check_fields(errors, rhs[:-1])
self.fatal(errors, lhs, rhs)
expression = rhs.pop()
parts = re.split('((?<!\\\)/)', expression) # keep delim / in parts
delim = parts[1::2] # either 2 or 3 /'s are valid!
terms = list(parts[::2])
rgx_inverse = False
flags = 0
for f in terms[-1]:
f = f.lower()
if f == 'i':
flags |= re.I
elif f == 'a':
flags |= re.A
elif f == 's':
flags |= re.S
elif f == 'm':
flags |= re.M
elif f == 'r':
rgx_inverse = True
else:
errors.append('regexp, unknown flag in {!r}'.format(f))
if len(errors):
self.fatal(errors, lhs, rhs)
try:
#pdh:new replace any escaped forward slash
rgx = re.compile(terms[1].replace('\/', '/'), flags)
except Exception as e:
errors.append('Failed to compile expression {!}'.format(expression))
errors.append(' - error: {}'.format(repr(e)))
self.fatal(errors, lhs, rhs)
log.info('- {!r}'.format(rgx))
if len(delim) == 2:
if len(rhs) == 0:
# f1[,f2,..]~/expr/ -> rows where expr matches 1 of f1[f2,..]
self.check_fields(errors, lhs) # ensure lhs-fields exist
self.fatal(errors, lhs, rhs)
log.info("- filter rows by re.search on '{}'".format(lhs))
n1 = len(self.dfm.index)
if rgx_inverse:
match = lambda r: any(not rgx.search(str(f)) for f in r)
else:
match = lambda r: any(rgx.search(str(f)) for f in r)
self.dfm = self.dfm[self.dfm[lhs].apply(match, axis=1)]
n2 = len(self.dfm.index)
fmt = 'filtering {!r}: | |
<reponame>Na2CuCl4/latex2sympy<filename>gen/PSLexer.py
# Generated from PS.g4 by ANTLR 4.7.2
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"\u0087\u06c3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6")
buf.write(u"\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t")
buf.write(u"\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4")
buf.write(u"\22\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27")
buf.write(u"\t\27\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t")
buf.write(u"\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"")
buf.write(u"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4")
buf.write(u"+\t+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62")
buf.write(u"\t\62\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t")
buf.write(u"\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4")
buf.write(u"@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH")
buf.write(u"\4I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\t")
buf.write(u"Q\4R\tR\4S\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z")
buf.write(u"\tZ\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\t")
buf.write(u"b\4c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k")
buf.write(u"\tk\4l\tl\4m\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4")
buf.write(u"t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|")
buf.write(u"\4}\t}\4~\t~\4\177\t\177\4\u0080\t\u0080\4\u0081\t\u0081")
buf.write(u"\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084\4\u0085")
buf.write(u"\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088")
buf.write(u"\4\u0089\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c")
buf.write(u"\t\u008c\4\u008d\t\u008d\4\u008e\t\u008e\4\u008f\t\u008f")
buf.write(u"\4\u0090\t\u0090\4\u0091\t\u0091\3\2\3\2\3\2\3\3\3\3")
buf.write(u"\3\4\6\4\u012a\n\4\r\4\16\4\u012b\3\4\3\4\3\5\3\5\3\5")
buf.write(u"\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13")
buf.write(u"\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r")
buf.write(u"\3\r\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\20\3")
buf.write(u"\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write(u"\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3")
buf.write(u"\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27")
buf.write(u"\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\31\3")
buf.write(u"\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32")
buf.write(u"\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3")
buf.write(u"\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35")
buf.write(u"\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3")
buf.write(u"\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write(u"\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!")
buf.write(u"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#")
buf.write(u"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3")
buf.write(u"%\3%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'")
buf.write(u"\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)")
buf.write(u"\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3")
buf.write(u")\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)")
buf.write(u"\3)\3)\3)\3)\3)\3)\3)\3)\3)\5)\u0233\n)\3*\3*\3*\3*\3")
buf.write(u"*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.")
buf.write(u"\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\61")
buf.write(u"\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\63\3")
buf.write(u"\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\65\3\65")
buf.write(u"\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3")
buf.write(u"\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38")
buf.write(u"\38\38\38\38\38\39\39\39\39\39\39\39\39\3:\3:\3:\3:\3")
buf.write(u":\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<")
buf.write(u"\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3")
buf.write(u"?\3?\3?\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A")
buf.write(u"\3A\3B\3B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3")
buf.write(u"C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3E")
buf.write(u"\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3G\3G\3H\3H\3")
buf.write(u"H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J")
buf.write(u"\3J\3J\3J\3K\3K\3K\3K\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3")
buf.write(u"N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3Q\3Q")
buf.write(u"\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3T\3")
buf.write(u"T\3T\3T\3T\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3V\3V\3W\3W")
buf.write(u"\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3")
buf.write(u"Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\")
buf.write(u"\3\\\3]\3]\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^\3^\3^")
buf.write(u"\3^\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3")
buf.write(u"`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b\3b")
buf.write(u"\3b\3b\3c\3c\3c\5c\u03b1\nc\3d\3d\3d\3d\3d\3d\3d\3d\3")
buf.write(u"d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f")
buf.write(u"\3f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3i\3")
buf.write(u"i\3i\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j")
buf.write(u"\3j\3j\3j\3j\3j\3j\3j\3j\5j\u03f8\nj\3k\3k\3k\3k\3k\3")
buf.write(u"k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k")
buf.write(u"\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\5k\u041e\nk\3")
buf.write(u"l\3l\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3o")
buf.write(u"\3o\3p\3p\3q\3q\3r\3r\3s\3s\3t\3t\3u\3u\3v\3v\7v\u0441")
buf.write(u"\nv\fv\16v\u0444\13v\3v\3v\3v\6v\u0449\nv\rv\16v\u044a")
buf.write(u"\5v\u044d\nv\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3")
buf.write(u"w\5w\u045d\nw\3x\3x\3y\3y\3z\3z\3{\3{\3|\6|\u0468\n|")
buf.write(u"\r|\16|\u0469\3|\3|\3|\3|\3|\7|\u0471\n|\f|\16|\u0474")
buf.write(u"\13|\3|\7|\u0477\n|\f|\16|\u047a\13|\3|\3|\3|\3|\3|\7")
buf.write(u"|\u0481\n|\f|\16|\u0484\13|\3|\3|\6|\u0488\n|\r|\16|")
buf.write(u"\u0489\5|\u048c\n|\3}\3}\3}\3}\5}\u0492\n}\3}\6}\u0495")
buf.write(u"\n}\r}\16}\u0496\3~\3~\3\177\3\177\3\177\3\177\3\177")
buf.write(u"\3\177\3\177\3\177\5\177\u04a3\n\177\3\u0080\3\u0080")
buf.write(u"\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write(u"\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write(u"\3\u0081\3\u0081\5\u0081\u04b7\n\u0081\3\u0082\3\u0082")
buf.write(u"\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write(u"\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write(u"\3\u0083\3\u0083\5\u0083\u04cb\n\u0083\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\5\u0084")
buf.write(u"\u04e3\n\u0084\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086")
buf.write(u"\3\u0087\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\5\u0088\u066e\n\u0088\3\u0089")
buf.write(u"\3\u0089\5\u0089\u0672\n\u0089\3\u008a\3\u008a\3\u008a")
buf.write(u"\3\u008a\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b")
buf.write(u"\3\u008b\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c")
buf.write(u"\3\u008c\5\u008c\u0686\n\u008c\3\u008d\3\u008d\3\u008d")
buf.write(u"\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write(u"\3\u008e\3\u008e\3\u008e\5\u008e\u0695\n\u008e\3\u008f")
buf.write(u"\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f")
buf.write(u"\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090\6\u0090\u06a4")
buf.write(u"\n\u0090\r\u0090\16\u0090\u06a5\3\u0090\3\u0090\3\u0090")
buf.write(u"\3\u0090\3\u0090\3\u0090\6\u0090\u06ae\n\u0090\r\u0090")
buf.write(u"\16\u0090\u06af\3\u0090\3\u0090\3\u0090\3\u0090\3\u0090")
buf.write(u"\5\u0090\u06b7\n\u0090\5\u0090\u06b9\n\u0090\5\u0090")
buf.write(u"\u06bb\n\u0090\3\u0091\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write(u"\5\u0091\u06c2\n\u0091\3\u0442\2\u0092\3\3\5\4\7\5\t")
buf.write(u"\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write(u"\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write(u"\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.")
buf.write(u"[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@")
buf.write(u"\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008d")
buf.write(u"H\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d")
buf.write(u"P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00ad")
buf.write(u"X\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd")
buf.write(u"`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cd")
buf.write(u"h\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd")
buf.write(u"p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9\2\u00ebv\u00ed")
buf.write(u"w\u00efx\u00f1y\u00f3\2\u00f5\2\u00f7z\u00f9{\u00fb|")
buf.write(u"\u00fd}\u00ff~\u0101\177\u0103\u0080\u0105\u0081\u0107")
buf.write(u"\u0082\u0109\u0083\u010b\2\u010d\u0084\u010f\2\u0111")
buf.write(u"\u0085\u0113\2\u0115\2\u0117\2\u0119\2\u011b\u0086\u011d")
buf.write(u"\2\u011f\2\u0121\u0087\3\2\b\5\2\13\f\17\17\"\"\4\2e")
buf.write(u"ett\4\2C\\c|\6\2CFH\\cfh|\3\2\62;\3\2\"\"\2\u071d\2\3")
buf.write(u"\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2")
buf.write(u"\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write(u"\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2")
buf.write(u"\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2")
buf.write(u"%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2")
buf.write(u"\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2")
buf.write(u"\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2")
buf.write(u"\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3")
buf.write(u"\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2")
buf.write(u"S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2")
buf.write(u"\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2")
buf.write(u"\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2")
buf.write(u"\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3")
buf.write(u"\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3")
buf.write(u"\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2")
buf.write(u"\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2")
buf.write(u"\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095")
buf.write(u"\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2")
buf.write(u"\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2")
buf.write(u"\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9")
buf.write(u"\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2")
buf.write(u"\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2")
buf.write(u"\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd")
buf.write(u"\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2")
buf.write(u"\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2")
buf.write(u"\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1")
buf.write(u"\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2")
buf.write(u"\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2")
buf.write(u"\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5")
buf.write(u"\3\2\2\2\2\u00e7\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2")
buf.write(u"\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f7\3\2\2\2")
buf.write(u"\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff")
buf.write(u"\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2")
buf.write(u"\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010d\3\2\2\2")
buf.write(u"\2\u0111\3\2\2\2\2\u011b\3\2\2\2\2\u0121\3\2\2\2\3\u0123")
buf.write(u"\3\2\2\2\5\u0126\3\2\2\2\7\u0129\3\2\2\2\t\u012f\3\2")
buf.write(u"\2\2\13\u0134\3\2\2\2\r\u0136\3\2\2\2\17\u0138\3\2\2")
buf.write(u"\2\21\u013a\3\2\2\2\23\u013c\3\2\2\2\25\u013e\3\2\2\2")
buf.write(u"\27\u0140\3\2\2\2\31\u0148\3\2\2\2\33\u0150\3\2\2\2\35")
buf.write(u"\u0152\3\2\2\2\37\u0154\3\2\2\2!\u0157\3\2\2\2#\u015a")
buf.write(u"\3\2\2\2%\u0162\3\2\2\2\'\u016a\3\2\2\2)\u016c\3\2\2")
buf.write(u"\2+\u016e\3\2\2\2-\u0176\3\2\2\2/\u017e\3\2\2\2\61\u0180")
buf.write(u"\3\2\2\2\63\u0187\3\2\2\2\65\u018e\3\2\2\2\67\u0194\3")
buf.write(u"\2\2\29\u019c\3\2\2\2;\u01a4\3\2\2\2=\u01ae\3\2\2\2?")
buf.write(u"\u01b8\3\2\2\2A\u01bf\3\2\2\2C\u01c6\3\2\2\2E\u01d0\3")
buf.write(u"\2\2\2G\u01da\3\2\2\2I\u01e0\3\2\2\2K\u01e7\3\2\2\2M")
buf.write(u"\u01ee\3\2\2\2O\u01f6\3\2\2\2Q\u0232\3\2\2\2S\u0234\3")
buf.write(u"\2\2\2U\u0239\3\2\2\2W\u023e\3\2\2\2Y\u0244\3\2\2\2[")
buf.write(u"\u0249\3\2\2\2]\u024d\3\2\2\2_\u0252\3\2\2\2a\u0257\3")
buf.write(u"\2\2\2c\u025c\3\2\2\2e\u0261\3\2\2\2g\u0266\3\2\2\2i")
buf.write(u"\u026b\3\2\2\2k\u0270\3\2\2\2m\u0278\3\2\2\2o\u0280\3")
buf.write(u"\2\2\2q\u0288\3\2\2\2s\u0290\3\2\2\2u\u0298\3\2\2\2w")
buf.write(u"\u02a0\3\2\2\2y\u02a6\3\2\2\2{\u02ac\3\2\2\2}\u02b2\3")
buf.write(u"\2\2\2\177\u02ba\3\2\2\2\u0081\u02c2\3\2\2\2\u0083\u02ca")
buf.write(u"\3\2\2\2\u0085\u02d3\3\2\2\2\u0087\u02dc\3\2\2\2\u0089")
buf.write(u"\u02e5\3\2\2\2\u008b\u02ec\3\2\2\2\u008d\u02f4\3\2\2")
buf.write(u"\2\u008f\u02fb\3\2\2\2\u0091\u0303\3\2\2\2\u0093\u030a")
buf.write(u"\3\2\2\2\u0095\u0312\3\2\2\2\u0097\u0316\3\2\2\2\u0099")
buf.write(u"\u031a\3\2\2\2\u009b\u0320\3\2\2\2\u009d\u0325\3\2\2")
buf.write(u"\2\u009f\u032b\3\2\2\2\u00a1\u0330\3\2\2\2\u00a3\u0335")
buf.write(u"\3\2\2\2\u00a5\u033c\3\2\2\2\u00a7\u0342\3\2\2\2\u00a9")
buf.write(u"\u0347\3\2\2\2\u00ab\u034c\3\2\2\2\u00ad\u0353\3\2\2")
buf.write(u"\2\u00af\u0359\3\2\2\2\u00b1\u035e\3\2\2\2\u00b3\u0364")
buf.write(u"\3\2\2\2\u00b5\u036b\3\2\2\2\u00b7\u0373\3\2\2\2\u00b9")
buf.write(u"\u0378\3\2\2\2\u00bb\u0380\3\2\2\2\u00bd\u038e\3\2\2")
buf.write(u"\2\u00bf\u0395\3\2\2\2\u00c1\u039d\3\2\2\2\u00c3\u03a5")
buf.write(u"\3\2\2\2\u00c5\u03b0\3\2\2\2\u00c7\u03b2\3\2\2\2\u00c9")
buf.write(u"\u03bd\3\2\2\2\u00cb\u03c6\3\2\2\2\u00cd\u03d1\3\2\2")
buf.write(u"\2\u00cf\u03da\3\2\2\2\u00d1\u03dc\3\2\2\2\u00d3\u03f7")
buf.write(u"\3\2\2\2\u00d5\u041d\3\2\2\2\u00d7\u041f\3\2\2\2\u00d9")
buf.write(u"\u0421\3\2\2\2\u00db\u042b\3\2\2\2\u00dd\u0430\3\2\2")
buf.write(u"\2\u00df\u0432\3\2\2\2\u00e1\u0434\3\2\2\2\u00e3\u0436")
buf.write(u"\3\2\2\2\u00e5\u0438\3\2\2\2\u00e7\u043a\3\2\2\2\u00e9")
buf.write(u"\u043c\3\2\2\2\u00eb\u043e\3\2\2\2\u00ed\u045c\3\2\2")
buf.write(u"\2\u00ef\u045e\3\2\2\2\u00f1\u0460\3\2\2\2\u00f3\u0462")
buf.write(u"\3\2\2\2\u00f5\u0464\3\2\2\2\u00f7\u048b\3\2\2\2\u00f9")
buf.write(u"\u048d\3\2\2\2\u00fb\u0498\3\2\2\2\u00fd\u04a2\3\2\2")
buf.write(u"\2\u00ff\u04a4\3\2\2\2\u0101\u04b6\3\2\2\2\u0103\u04b8")
buf.write(u"\3\2\2\2\u0105\u04ca\3\2\2\2\u0107\u04e2\3\2\2\2\u0109")
buf.write(u"\u04e4\3\2\2\2\u010b\u04e6\3\2\2\2\u010d\u04e9\3\2\2")
buf.write(u"\2\u010f\u066d\3\2\2\2\u0111\u066f\3\2\2\2\u0113\u0673")
buf.write(u"\3\2\2\2\u0115\u0677\3\2\2\2\u0117\u0685\3\2\2\2\u0119")
buf.write(u"\u0687\3\2\2\2\u011b\u0694\3\2\2\2\u011d\u0696\3\2\2")
buf.write(u"\2\u011f\u06a3\3\2\2\2\u0121\u06bc\3\2\2\2\u0123\u0124")
buf.write(u"\7`\2\2\u0124\u0125\7V\2\2\u0125\4\3\2\2\2\u0126\u0127")
buf.write(u"\7)\2\2\u0127\6\3\2\2\2\u0128\u012a\t\2\2\2\u0129\u0128")
buf.write(u"\3\2\2\2\u012a\u012b\3\2\2\2\u012b\u0129\3\2\2\2\u012b")
buf.write(u"\u012c\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012e\b\4\2")
buf.write(u"\2\u012e\b\3\2\2\2\u012f\u0130\7^\2\2\u0130\u0131\7&")
buf.write(u"\2\2\u0131\u0132\3\2\2\2\u0132\u0133\b\5\2\2\u0133\n")
buf.write(u"\3\2\2\2\u0134\u0135\7-\2\2\u0135\f\3\2\2\2\u0136\u0137")
buf.write(u"\7/\2\2\u0137\16\3\2\2\2\u0138\u0139\7,\2\2\u0139\20")
buf.write(u"\3\2\2\2\u013a\u013b\7\61\2\2\u013b\22\3\2\2\2\u013c")
buf.write(u"\u013d\7*\2\2\u013d\24\3\2\2\2\u013e\u013f\7+\2\2\u013f")
buf.write(u"\26\3\2\2\2\u0140\u0141\7^\2\2\u0141\u0142\7n\2\2\u0142")
buf.write(u"\u0143\7i\2\2\u0143\u0144\7t\2\2\u0144\u0145\7q\2\2\u0145")
buf.write(u"\u0146\7w\2\2\u0146\u0147\7r\2\2\u0147\30\3\2\2\2\u0148")
buf.write(u"\u0149\7^\2\2\u0149\u014a\7t\2\2\u014a\u014b\7i\2\2\u014b")
buf.write(u"\u014c\7t\2\2\u014c\u014d\7q\2\2\u014d\u014e\7w\2\2\u014e")
buf.write(u"\u014f\7r\2\2\u014f\32\3\2\2\2\u0150\u0151\7}\2\2\u0151")
buf.write(u"\34\3\2\2\2\u0152\u0153\7\177\2\2\u0153\36\3\2\2\2\u0154")
buf.write(u"\u0155\7^\2\2\u0155\u0156\7}\2\2\u0156 \3\2\2\2\u0157")
buf.write(u"\u0158\7^\2\2\u0158\u0159\7\177\2\2\u0159\"\3\2\2\2\u015a")
buf.write(u"\u015b\7^\2\2\u015b\u015c\7n\2\2\u015c\u015d\7d\2\2\u015d")
buf.write(u"\u015e\7t\2\2\u015e\u015f\7c\2\2\u015f\u0160\7e\2\2\u0160")
buf.write(u"\u0161\7g\2\2\u0161$\3\2\2\2\u0162\u0163\7^\2\2\u0163")
buf.write(u"\u0164\7t\2\2\u0164\u0165\7d\2\2\u0165\u0166\7t\2\2\u0166")
buf.write(u"\u0167\7c\2\2\u0167\u0168\7e\2\2\u0168\u0169\7g\2\2\u0169")
buf.write(u"&\3\2\2\2\u016a\u016b\7]\2\2\u016b(\3\2\2\2\u016c\u016d")
buf.write(u"\7_\2\2\u016d*\3\2\2\2\u016e\u016f\7^\2\2\u016f\u0170")
buf.write(u"\7n\2\2\u0170\u0171\7d\2\2\u0171\u0172\7t\2\2\u0172\u0173")
buf.write(u"\7c\2\2\u0173\u0174\7e\2\2\u0174\u0175\7m\2\2\u0175,")
buf.write(u"\3\2\2\2\u0176\u0177\7^\2\2\u0177\u0178\7t\2\2\u0178")
buf.write(u"\u0179\7d\2\2\u0179\u017a\7t\2\2\u017a\u017b\7c\2\2\u017b")
buf.write(u"\u017c\7e\2\2\u017c\u017d\7m\2\2\u017d.\3\2\2\2\u017e")
buf.write(u"\u017f\7~\2\2\u017f\60\3\2\2\2\u0180\u0181\7^\2\2\u0181")
buf.write(u"\u0182\7n\2\2\u0182\u0183\7x\2\2\u0183\u0184\7g\2\2\u0184")
buf.write(u"\u0185\7t\2\2\u0185\u0186\7v\2\2\u0186\62\3\2\2\2\u0187")
buf.write(u"\u0188\7^\2\2\u0188\u0189\7t\2\2\u0189\u018a\7x\2\2\u018a")
buf.write(u"\u018b\7g\2\2\u018b\u018c\7t\2\2\u018c\u018d\7v\2\2\u018d")
buf.write(u"\64\3\2\2\2\u018e\u018f\7^\2\2\u018f\u0190\7x\2\2\u0190")
buf.write(u"\u0191\7g\2\2\u0191\u0192\7t\2\2\u0192\u0193\7v\2\2\u0193")
buf.write(u"\66\3\2\2\2\u0194\u0195\7^\2\2\u0195\u0196\7n\2\2\u0196")
buf.write(u"\u0197\7h\2\2\u0197\u0198\7n\2\2\u0198\u0199\7q\2\2\u0199")
buf.write(u"\u019a\7q\2\2\u019a\u019b\7t\2\2\u019b8\3\2\2\2\u019c")
buf.write(u"\u019d\7^\2\2\u019d\u019e\7t\2\2\u019e\u019f\7h\2\2\u019f")
buf.write(u"\u01a0\7n\2\2\u01a0\u01a1\7q\2\2\u01a1\u01a2\7q\2\2\u01a2")
buf.write(u"\u01a3\7t\2\2\u01a3:\3\2\2\2\u01a4\u01a5\7^\2\2\u01a5")
buf.write(u"\u01a6\7n\2\2\u01a6\u01a7\7n\2\2\u01a7\u01a8\7e\2\2\u01a8")
buf.write(u"\u01a9\7q\2\2\u01a9\u01aa\7t\2\2\u01aa\u01ab\7p\2\2\u01ab")
buf.write(u"\u01ac\7g\2\2\u01ac\u01ad\7t\2\2\u01ad<\3\2\2\2\u01ae")
buf.write(u"\u01af\7^\2\2\u01af\u01b0\7n\2\2\u01b0\u01b1\7t\2\2\u01b1")
buf.write(u"\u01b2\7e\2\2\u01b2\u01b3\7q\2\2\u01b3\u01b4\7t\2\2\u01b4")
buf.write(u"\u01b5\7p\2\2\u01b5\u01b6\7g\2\2\u01b6\u01b7\7t\2\2\u01b7")
buf.write(u">\3\2\2\2\u01b8\u01b9\7^\2\2\u01b9\u01ba\7n\2\2\u01ba")
buf.write(u"\u01bb\7e\2\2\u01bb\u01bc\7g\2\2\u01bc\u01bd\7k\2\2\u01bd")
buf.write(u"\u01be\7n\2\2\u01be@\3\2\2\2\u01bf\u01c0\7^\2\2\u01c0")
buf.write(u"\u01c1\7t\2\2\u01c1\u01c2\7e\2\2\u01c2\u01c3\7g\2\2\u01c3")
buf.write(u"\u01c4\7k\2\2\u01c4\u01c5\7n\2\2\u01c5B\3\2\2\2\u01c6")
buf.write(u"\u01c7\7^\2\2\u01c7\u01c8\7w\2\2\u01c8\u01c9\7n\2\2\u01c9")
buf.write(u"\u01ca\7e\2\2\u01ca\u01cb\7q\2\2\u01cb\u01cc\7t\2\2\u01cc")
buf.write(u"\u01cd\7p\2\2\u01cd\u01ce\7g\2\2\u01ce\u01cf\7t\2\2\u01cf")
buf.write(u"D\3\2\2\2\u01d0\u01d1\7^\2\2\u01d1\u01d2\7w\2\2\u01d2")
buf.write(u"\u01d3\7t\2\2\u01d3\u01d4\7e\2\2\u01d4\u01d5\7q\2\2\u01d5")
buf.write(u"\u01d6\7t\2\2\u01d6\u01d7\7p\2\2\u01d7\u01d8\7g\2\2\u01d8")
buf.write(u"\u01d9\7t\2\2\u01d9F\3\2\2\2\u01da\u01db\7^\2\2\u01db")
buf.write(u"\u01dc\7n\2\2\u01dc\u01dd\7g\2\2\u01dd\u01de\7h\2\2\u01de")
buf.write(u"\u01df\7v\2\2\u01dfH\3\2\2\2\u01e0\u01e1\7^\2\2\u01e1")
buf.write(u"\u01e2\7t\2\2\u01e2\u01e3\7k\2\2\u01e3\u01e4\7i\2\2\u01e4")
buf.write(u"\u01e5\7j\2\2\u01e5\u01e6\7v\2\2\u01e6J\3\2\2\2\u01e7")
buf.write(u"\u01e8\7^\2\2\u01e8\u01e9\7o\2\2\u01e9\u01ea\7n\2\2\u01ea")
buf.write(u"\u01eb\7g\2\2\u01eb\u01ec\7h\2\2\u01ec\u01ed\7v\2\2\u01ed")
buf.write(u"L\3\2\2\2\u01ee\u01ef\7^\2\2\u01ef\u01f0\7o\2\2\u01f0")
buf.write(u"\u01f1\7t\2\2\u01f1\u01f2\7k\2\2\u01f2\u01f3\7i\2\2\u01f3")
buf.write(u"\u01f4\7j\2\2\u01f4\u01f5\7v\2\2\u01f5N\3\2\2\2\u01f6")
buf.write(u"\u01f7\7^\2\2\u01f7\u01f8\7n\2\2\u01f8\u01f9\7k\2\2\u01f9")
buf.write(u"\u01fa\7o\2\2\u01faP\3\2\2\2\u01fb\u01fc\7^\2\2\u01fc")
buf.write(u"\u01fd\7v\2\2\u01fd\u0233\7q\2\2\u01fe\u01ff\7^\2\2\u01ff")
buf.write(u"\u0200\7t\2\2\u0200\u0201\7k\2\2\u0201\u0202\7i\2\2\u0202")
buf.write(u"\u0203\7j\2\2\u0203\u0204\7v\2\2\u0204\u0205\7c\2\2\u0205")
buf.write(u"\u0206\7t\2\2\u0206\u0207\7t\2\2\u0207\u0208\7q\2\2\u0208")
buf.write(u"\u0233\7y\2\2\u0209\u020a\7^\2\2\u020a\u020b\7T\2\2\u020b")
buf.write(u"\u020c\7k\2\2\u020c\u020d\7i\2\2\u020d\u020e\7j\2\2\u020e")
buf.write(u"\u020f\7v\2\2\u020f\u0210\7c\2\2\u0210\u0211\7t\2\2\u0211")
buf.write(u"\u0212\7t\2\2\u0212\u0213\7q\2\2\u0213\u0233\7y\2\2\u0214")
buf.write(u"\u0215\7^\2\2\u0215\u0216\7n\2\2\u0216\u0217\7q\2\2\u0217")
buf.write(u"\u0218\7p\2\2\u0218\u0219\7i\2\2\u0219\u021a\7t\2\2\u021a")
buf.write(u"\u021b\7k\2\2\u021b\u021c\7i\2\2\u021c\u021d\7j\2\2\u021d")
buf.write(u"\u021e\7v\2\2\u021e\u021f\7c\2\2\u021f\u0220\7t\2\2\u0220")
buf.write(u"\u0221\7t\2\2\u0221\u0222\7q\2\2\u0222\u0233\7y\2\2\u0223")
buf.write(u"\u0224\7^\2\2\u0224\u0225\7N\2\2\u0225\u0226\7q\2\2\u0226")
buf.write(u"\u0227\7p\2\2\u0227\u0228\7i\2\2\u0228\u0229\7t\2\2\u0229")
buf.write(u"\u022a\7k\2\2\u022a\u022b\7i\2\2\u022b\u022c\7j\2\2\u022c")
buf.write(u"\u022d\7v\2\2\u022d\u022e\7c\2\2\u022e\u022f\7t\2\2\u022f")
buf.write(u"\u0230\7t\2\2\u0230\u0231\7q\2\2\u0231\u0233\7y\2\2\u0232")
buf.write(u"\u01fb\3\2\2\2\u0232\u01fe\3\2\2\2\u0232\u0209\3\2\2")
buf.write(u"\2\u0232\u0214\3\2\2\2\u0232\u0223\3\2\2\2\u0233R\3\2")
buf.write(u"\2\2\u0234\u0235\7^\2\2\u0235\u0236\7k\2\2\u0236\u0237")
buf.write(u"\7p\2\2\u0237\u0238\7v\2\2\u0238T\3\2\2\2\u0239\u023a")
buf.write(u"\7^\2\2\u023a\u023b\7u\2\2\u023b\u023c\7w\2\2\u023c\u023d")
buf.write(u"\7o\2\2\u023dV\3\2\2\2\u023e\u023f\7^\2\2\u023f\u0240")
buf.write(u"\7r\2\2\u0240\u0241\7t\2\2\u0241\u0242\7q\2\2\u0242\u0243")
buf.write(u"\7f\2\2\u0243X\3\2\2\2\u0244\u0245\7^\2\2\u0245\u0246")
buf.write(u"\7n\2\2\u0246\u0247\7q\2\2\u0247\u0248\7i\2\2\u0248Z")
buf.write(u"\3\2\2\2\u0249\u024a\7^\2\2\u024a\u024b\7n\2\2\u024b")
buf.write(u"\u024c\7p\2\2\u024c\\\3\2\2\2\u024d\u024e\7^\2\2\u024e")
buf.write(u"\u024f\7g\2\2\u024f\u0250\7z\2\2\u0250\u0251\7r\2\2\u0251")
buf.write(u"^\3\2\2\2\u0252\u0253\7^\2\2\u0253\u0254\7u\2\2\u0254")
buf.write(u"\u0255\7k\2\2\u0255\u0256\7p\2\2\u0256`\3\2\2\2\u0257")
buf.write(u"\u0258\7^\2\2\u0258\u0259\7e\2\2\u0259\u025a\7q\2\2\u025a")
buf.write(u"\u025b\7u\2\2\u025bb\3\2\2\2\u025c\u025d\7^\2\2\u025d")
buf.write(u"\u025e\7v\2\2\u025e\u025f\7c\2\2\u025f\u0260\7p\2\2\u0260")
buf.write(u"d\3\2\2\2\u0261\u0262\7^\2\2\u0262\u0263\7e\2\2\u0263")
buf.write(u"\u0264\7u\2\2\u0264\u0265\7e\2\2\u0265f\3\2\2\2\u0266")
buf.write(u"\u0267\7^\2\2\u0267\u0268\7u\2\2\u0268\u0269\7g\2\2\u0269")
buf.write(u"\u026a\7e\2\2\u026ah\3\2\2\2\u026b\u026c\7^\2\2\u026c")
buf.write(u"\u026d\7e\2\2\u026d\u026e\7q\2\2\u026e\u026f\7v\2\2\u026f")
buf.write(u"j\3\2\2\2\u0270\u0271\7^\2\2\u0271\u0272\7c\2\2\u0272")
buf.write(u"\u0273\7t\2\2\u0273\u0274\7e\2\2\u0274\u0275\7u\2\2\u0275")
buf.write(u"\u0276\7k\2\2\u0276\u0277\7p\2\2\u0277l\3\2\2\2\u0278")
buf.write(u"\u0279\7^\2\2\u0279\u027a\7c\2\2\u027a\u027b\7t\2\2\u027b")
buf.write(u"\u027c\7e\2\2\u027c\u027d\7e\2\2\u027d\u027e\7q\2\2\u027e")
buf.write(u"\u027f\7u\2\2\u027fn\3\2\2\2\u0280\u0281\7^\2\2\u0281")
buf.write(u"\u0282\7c\2\2\u0282\u0283\7t\2\2\u0283\u0284\7e\2\2\u0284")
buf.write(u"\u0285\7v\2\2\u0285\u0286\7c\2\2\u0286\u0287\7p\2\2\u0287")
buf.write(u"p\3\2\2\2\u0288\u0289\7^\2\2\u0289\u028a\7c\2\2\u028a")
buf.write(u"\u028b\7t\2\2\u028b\u028c\7e\2\2\u028c\u028d\7e\2\2\u028d")
buf.write(u"\u028e\7u\2\2\u028e\u028f\7e\2\2\u028fr\3\2\2\2\u0290")
buf.write(u"\u0291\7^\2\2\u0291\u0292\7c\2\2\u0292\u0293\7t\2\2\u0293")
buf.write(u"\u0294\7e\2\2\u0294\u0295\7u\2\2\u0295\u0296\7g\2\2\u0296")
buf.write(u"\u0297\7e\2\2\u0297t\3\2\2\2\u0298\u0299\7^\2\2\u0299")
buf.write(u"\u029a\7c\2\2\u029a\u029b\7t\2\2\u029b\u029c\7e\2\2\u029c")
buf.write(u"\u029d\7e\2\2\u029d\u029e\7q\2\2\u029e\u029f\7v\2\2\u029f")
buf.write(u"v\3\2\2\2\u02a0\u02a1\7^\2\2\u02a1\u02a2\7u\2\2\u02a2")
buf.write(u"\u02a3\7k\2\2\u02a3\u02a4\7p\2\2\u02a4\u02a5\7j\2\2\u02a5")
buf.write(u"x\3\2\2\2\u02a6\u02a7\7^\2\2\u02a7\u02a8\7e\2\2\u02a8")
buf.write(u"\u02a9\7q\2\2\u02a9\u02aa\7u\2\2\u02aa\u02ab\7j\2\2\u02ab")
buf.write(u"z\3\2\2\2\u02ac\u02ad\7^\2\2\u02ad\u02ae\7v\2\2\u02ae")
buf.write(u"\u02af\7c\2\2\u02af\u02b0\7p\2\2\u02b0\u02b1\7j\2\2\u02b1")
buf.write(u"|\3\2\2\2\u02b2\u02b3\7^\2\2\u02b3\u02b4\7c\2\2\u02b4")
buf.write(u"\u02b5\7t\2\2\u02b5\u02b6\7u\2\2\u02b6\u02b7\7k\2\2\u02b7")
buf.write(u"\u02b8\7p\2\2\u02b8\u02b9\7j\2\2\u02b9~\3\2\2\2\u02ba")
buf.write(u"\u02bb\7^\2\2\u02bb\u02bc\7c\2\2\u02bc\u02bd\7t\2\2\u02bd")
buf.write(u"\u02be\7e\2\2\u02be\u02bf\7q\2\2\u02bf\u02c0\7u\2\2\u02c0")
buf.write(u"\u02c1\7j\2\2\u02c1\u0080\3\2\2\2\u02c2\u02c3\7^\2\2")
buf.write(u"\u02c3\u02c4\7c\2\2\u02c4\u02c5\7t\2\2\u02c5\u02c6\7")
buf.write(u"v\2\2\u02c6\u02c7\7c\2\2\u02c7\u02c8\7p\2\2\u02c8\u02c9")
buf.write(u"\7j\2\2\u02c9\u0082\3\2\2\2\u02ca\u02cb\7^\2\2\u02cb")
buf.write(u"\u02cc\7c\2\2\u02cc\u02cd\7t\2\2\u02cd\u02ce\7e\2\2\u02ce")
buf.write(u"\u02cf\7u\2\2\u02cf\u02d0\7k\2\2\u02d0\u02d1\7p\2\2\u02d1")
buf.write(u"\u02d2\7j\2\2\u02d2\u0084\3\2\2\2\u02d3\u02d4\7^\2\2")
buf.write(u"\u02d4\u02d5\7c\2\2\u02d5\u02d6\7t\2\2\u02d6\u02d7\7")
buf.write(u"e\2\2\u02d7\u02d8\7e\2\2\u02d8\u02d9\7q\2\2\u02d9\u02da")
buf.write(u"\7u\2\2\u02da\u02db\7j\2\2\u02db\u0086\3\2\2\2\u02dc")
buf.write(u"\u02dd\7^\2\2\u02dd\u02de\7c\2\2\u02de\u02df\7t\2\2\u02df")
buf.write(u"\u02e0\7e\2\2\u02e0\u02e1\7v\2\2\u02e1\u02e2\7c\2\2\u02e2")
buf.write(u"\u02e3\7p\2\2\u02e3\u02e4\7j\2\2\u02e4\u0088\3\2\2\2")
buf.write(u"\u02e5\u02e6\7c\2\2\u02e6\u02e7\7t\2\2\u02e7\u02e8\7")
buf.write(u"u\2\2\u02e8\u02e9\7k\2\2\u02e9\u02ea\7p\2\2\u02ea\u02eb")
buf.write(u"\7j\2\2\u02eb\u008a\3\2\2\2\u02ec\u02ed\7c\2\2\u02ed")
buf.write(u"\u02ee\7t\2\2\u02ee\u02ef\7e\2\2\u02ef\u02f0\7u\2\2\u02f0")
buf.write(u"\u02f1\7k\2\2\u02f1\u02f2\7p\2\2\u02f2\u02f3\7j\2\2\u02f3")
buf.write(u"\u008c\3\2\2\2\u02f4\u02f5\7c\2\2\u02f5\u02f6\7t\2\2")
buf.write(u"\u02f6\u02f7\7e\2\2\u02f7\u02f8\7q\2\2\u02f8\u02f9\7")
buf.write(u"u\2\2\u02f9\u02fa\7j\2\2\u02fa\u008e\3\2\2\2\u02fb\u02fc")
buf.write(u"\7c\2\2\u02fc\u02fd\7t\2\2\u02fd\u02fe\7e\2\2\u02fe\u02ff")
buf.write(u"\7e\2\2\u02ff\u0300\7q\2\2\u0300\u0301\7u\2\2\u0301\u0302")
buf.write(u"\7j\2\2\u0302\u0090\3\2\2\2\u0303\u0304\7c\2\2\u0304")
buf.write(u"\u0305\7t\2\2\u0305\u0306\7v\2\2\u0306\u0307\7c\2\2\u0307")
buf.write(u"\u0308\7p\2\2\u0308\u0309\7j\2\2\u0309\u0092\3\2\2\2")
buf.write(u"\u030a\u030b\7c\2\2\u030b\u030c\7t\2\2\u030c\u030d\7")
buf.write(u"e\2\2\u030d\u030e\7v\2\2\u030e\u030f\7c\2\2\u030f\u0310")
buf.write(u"\7p\2\2\u0310\u0311\7j\2\2\u0311\u0094\3\2\2\2\u0312")
buf.write(u"\u0313\7i\2\2\u0313\u0314\7e\2\2\u0314\u0315\7f\2\2\u0315")
buf.write(u"\u0096\3\2\2\2\u0316\u0317\7n\2\2\u0317\u0318\7e\2\2")
buf.write(u"\u0318\u0319\7o\2\2\u0319\u0098\3\2\2\2\u031a\u031b\7")
buf.write(u"h\2\2\u031b\u031c\7n\2\2\u031c\u031d\7q\2\2\u031d\u031e")
buf.write(u"\7q\2\2\u031e\u031f\7t\2\2\u031f\u009a\3\2\2\2\u0320")
buf.write(u"\u0321\7e\2\2\u0321\u0322\7g\2\2\u0322\u0323\7k\2\2\u0323")
buf.write(u"\u0324\7n\2\2\u0324\u009c\3\2\2\2\u0325\u0326\7^\2\2")
buf.write(u"\u0326\u0327\7u\2\2\u0327\u0328\7s\2\2\u0328\u0329\7")
buf.write(u"t\2\2\u0329\u032a\7v\2\2\u032a\u009e\3\2\2\2\u032b\u032c")
buf.write(u"\7^\2\2\u032c\u032d\7i\2\2\u032d\u032e\7e\2\2\u032e\u032f")
buf.write(u"\7f\2\2\u032f\u00a0\3\2\2\2\u0330\u0331\7^\2\2\u0331")
buf.write(u"\u0332\7n\2\2\u0332\u0333\7e\2\2\u0333\u0334\7o\2\2\u0334")
buf.write(u"\u00a2\3\2\2\2\u0335\u0336\7^\2\2\u0336\u0337\7h\2\2")
buf.write(u"\u0337\u0338\7n\2\2\u0338\u0339\7q\2\2\u0339\u033a\7")
buf.write(u"q\2\2\u033a\u033b\7t\2\2\u033b\u00a4\3\2\2\2\u033c\u033d")
buf.write(u"\7^\2\2\u033d\u033e\7e\2\2\u033e\u033f\7g\2\2\u033f\u0340")
buf.write(u"\7k\2\2\u0340\u0341\7n\2\2\u0341\u00a6\3\2\2\2\u0342")
buf.write(u"\u0343\7^\2\2\u0343\u0344\7o\2\2\u0344\u0345\7c\2\2\u0345")
buf.write(u"\u0346\7z\2\2\u0346\u00a8\3\2\2\2\u0347\u0348\7^\2\2")
buf.write(u"\u0348\u0349\7o\2\2\u0349\u034a\7k\2\2\u034a\u034b\7")
buf.write(u"p\2\2\u034b\u00aa\3\2\2\2\u034c\u034d\7^\2\2\u034d\u034e")
buf.write(u"\7v\2\2\u034e\u034f\7k\2\2\u034f\u0350\7o\2\2\u0350\u0351")
buf.write(u"\7g\2\2\u0351\u0352\7u\2\2\u0352\u00ac\3\2\2\2\u0353")
buf.write(u"\u0354\7^\2\2\u0354\u0355\7e\2\2\u0355\u0356\7f\2\2\u0356")
buf.write(u"\u0357\7q\2\2\u0357\u0358\7v\2\2\u0358\u00ae\3\2\2\2")
buf.write(u"\u0359\u035a\7^\2\2\u035a\u035b\7f\2\2\u035b\u035c\7")
buf.write(u"k\2\2\u035c\u035d\7x\2\2\u035d\u00b0\3\2\2\2\u035e\u035f")
buf.write(u"\7^\2\2\u035f\u0360\7h\2\2\u0360\u0361\7t\2\2\u0361\u0362")
buf.write(u"\7c\2\2\u0362\u0363\7e\2\2\u0363\u00b2\3\2\2\2\u0364")
buf.write(u"\u0365\7^\2\2\u0365\u0366\7d\2\2\u0366\u0367\7k\2\2\u0367")
buf.write(u"\u0368\7p\2\2\u0368\u0369\7q\2\2\u0369\u036a\7o\2\2\u036a")
buf.write(u"\u00b4\3\2\2\2\u036b\u036c\7^\2\2\u036c\u036d\7e\2\2")
buf.write(u"\u036d\u036e\7j\2\2\u036e\u036f\7q\2\2\u036f\u0370\7")
buf.write(u"q\2\2\u0370\u0371\7u\2\2\u0371\u0372\7g\2\2\u0372\u00b6")
buf.write(u"\3\2\2\2\u0373\u0374\7^\2\2\u0374\u0375\7o\2\2\u0375")
buf.write(u"\u0376\7q\2\2\u0376\u0377\7f\2\2\u0377\u00b8\3\2\2\2")
buf.write(u"\u0378\u0379\7^\2\2\u0379\u037a\7o\2\2\u037a\u037b\7")
buf.write(u"c\2\2\u037b\u037c\7v\2\2\u037c\u037d\7j\2\2\u037d\u037e")
buf.write(u"\7k\2\2\u037e\u037f\7v\2\2\u037f\u00ba\3\2\2\2\u0380")
buf.write(u"\u0381\7^\2\2\u0381\u0382\7q\2\2\u0382\u0383\7r\2\2\u0383")
buf.write(u"\u0384\7g\2\2\u0384\u0385\7t\2\2\u0385\u0386\7c\2\2\u0386")
buf.write(u"\u0387\7v\2\2\u0387\u0388\7q\2\2\u0388\u0389\7t\2\2\u0389")
buf.write(u"\u038a\7p\2\2\u038a\u038b\7c\2\2\u038b\u038c\7o\2\2\u038c")
buf.write(u"\u038d\7g\2\2\u038d\u00bc\3\2\2\2\u038e\u038f\7o\2\2")
buf.write(u"\u038f\u0390\7c\2\2\u0390\u0391\7v\2\2\u0391\u0392\7")
buf.write(u"t\2\2\u0392\u0393\7k\2\2\u0393\u0394\7z\2\2\u0394\u00be")
buf.write(u"\3\2\2\2\u0395\u0396\7r\2\2\u0396\u0397\7o\2\2\u0397")
buf.write(u"\u0398\7c\2\2\u0398\u0399\7v\2\2\u0399\u039a\7t\2\2\u039a")
buf.write(u"\u039b\7k\2\2\u039b\u039c\7z\2\2\u039c\u00c0\3\2\2\2")
buf.write(u"\u039d\u039e\7d\2\2\u039e\u039f\7o\2\2\u039f\u03a0\7")
buf.write(u"c\2\2\u03a0\u03a1\7v\2\2\u03a1\u03a2\7t\2\2\u03a2\u03a3")
buf.write(u"\7k\2\2\u03a3\u03a4\7z\2\2\u03a4\u00c2\3\2\2\2\u03a5")
buf.write(u"\u03a6\7x\2\2\u03a6\u03a7\7o\2\2\u03a7\u03a8\7c\2\2\u03a8")
buf.write(u"\u03a9\7v\2\2\u03a9\u03aa\7t\2\2\u03aa\u03ab\7k\2\2\u03ab")
buf.write(u"\u03ac\7z\2\2\u03ac\u00c4\3\2\2\2\u03ad\u03b1\5\u00bd")
buf.write(u"_\2\u03ae\u03b1\5\u00bf`\2\u03af\u03b1\5\u00c1a\2\u03b0")
buf.write(u"\u03ad\3\2\2\2\u03b0\u03ae\3\2\2\2\u03b0\u03af\3\2\2")
buf.write(u"\2\u03b1\u00c6\3\2\2\2\u03b2\u03b3\7^\2\2\u03b3\u03b4")
buf.write(u"\7d\2\2\u03b4\u03b5\7g\2\2\u03b5\u03b6\7i\2\2\u03b6\u03b7")
buf.write(u"\7k\2\2\u03b7\u03b8\7p\2\2\u03b8\u03b9\3\2\2\2\u03b9")
buf.write(u"\u03ba\5\33\16\2\u03ba\u03bb\5\u00c5c\2\u03bb\u03bc\5")
buf.write(u"\35\17\2\u03bc\u00c8\3\2\2\2\u03bd\u03be\7^\2\2\u03be")
buf.write(u"\u03bf\7g\2\2\u03bf\u03c0\7p\2\2\u03c0\u03c1\7f\2\2\u03c1")
buf.write(u"\u03c2\3\2\2\2\u03c2\u03c3\5\33\16\2\u03c3\u03c4\5\u00c5")
buf.write(u"c\2\u03c4\u03c5\5\35\17\2\u03c5\u00ca\3\2\2\2\u03c6\u03c7")
buf.write(u"\7^\2\2\u03c7\u03c8\7d\2\2\u03c8\u03c9\7g\2\2\u03c9\u03ca")
buf.write(u"\7i\2\2\u03ca\u03cb\7k\2\2\u03cb\u03cc\7p\2\2\u03cc\u03cd")
buf.write(u"\3\2\2\2\u03cd\u03ce\5\33\16\2\u03ce\u03cf\5\u00c3b\2")
buf.write(u"\u03cf\u03d0\5\35\17\2\u03d0\u00cc\3\2\2\2\u03d1\u03d2")
buf.write(u"\7^\2\2\u03d2\u03d3\7g\2\2\u03d3\u03d4\7p\2\2\u03d4\u03d5")
buf.write(u"\7f\2\2\u03d5\u03d6\3\2\2\2\u03d6\u03d7\5\33\16\2\u03d7")
buf.write(u"\u03d8\5\u00c3b\2\u03d8\u03d9\5\35\17\2\u03d9\u00ce\3")
buf.write(u"\2\2\2\u03da\u03db\7(\2\2\u03db\u00d0\3\2\2\2\u03dc\u03dd")
buf.write(u"\7^\2\2\u03dd\u03de\7^\2\2\u03de\u00d2\3\2\2\2\u03df")
buf.write(u"\u03e0\7^\2\2\u03e0\u03e1\7z\2\2\u03e1\u03e2\7t\2\2\u03e2")
buf.write(u"\u03e3\7k\2\2\u03e3\u03e4\7i\2\2\u03e4\u03e5\7j\2\2\u03e5")
buf.write(u"\u03e6\7v\2\2\u03e6\u03e7\7c\2\2\u03e7\u03e8\7t\2\2\u03e8")
buf.write(u"\u03e9\7t\2\2\u03e9\u03ea\7q\2\2\u03ea\u03f8\7y\2\2\u03eb")
buf.write(u"\u03ec\7^\2\2\u03ec\u03ed\7z\2\2\u03ed\u03ee\7T\2\2\u03ee")
buf.write(u"\u03ef\7k\2\2\u03ef\u03f0\7i\2\2\u03f0\u03f1\7j\2\2\u03f1")
buf.write(u"\u03f2\7v\2\2\u03f2\u03f3\7c\2\2\u03f3\u03f4\7t\2\2\u03f4")
buf.write(u"\u03f5\7t\2\2\u03f5\u03f6\7q\2\2\u03f6\u03f8\7y\2\2\u03f7")
buf.write(u"\u03df\3\2\2\2\u03f7\u03eb\3\2\2\2\u03f8\u00d4\3\2\2")
buf.write(u"\2\u03f9\u03fa\7>\2\2\u03fa\u03fb\7/\2\2\u03fb\u041e")
buf.write(u"\7@\2\2\u03fc\u03fd\7>\2\2\u03fd\u03fe\7?\2\2\u03fe\u041e")
buf.write(u"\7@\2\2\u03ff\u0400\7^\2\2\u0400\u0401\7n\2\2\u0401\u0402")
buf.write(u"\7g\2\2\u0402\u0403\7h\2\2\u0403\u0404\7v\2\2\u0404\u0405")
buf.write(u"\7t\2\2\u0405\u0406\7k\2\2\u0406\u0407\7i\2\2\u0407\u0408")
buf.write(u"\7j\2\2\u0408\u0409\7v\2\2\u0409\u040a\7c\2\2\u040a\u040b")
buf.write(u"\7t\2\2\u040b\u040c\7t\2\2\u040c\u040d\7q\2\2\u040d\u041e")
buf.write(u"\7y\2\2\u040e\u040f\7^\2\2\u040f\u0410\7N\2\2\u0410\u0411")
buf.write(u"\7g\2\2\u0411\u0412\7h\2\2\u0412\u0413\7v\2\2\u0413\u0414")
buf.write(u"\7t\2\2\u0414\u0415\7k\2\2\u0415\u0416\7i\2\2\u0416\u0417")
buf.write(u"\7j\2\2\u0417\u0418\7v\2\2\u0418\u0419\7c\2\2\u0419\u041a")
buf.write(u"\7t\2\2\u041a\u041b\7t\2\2\u041b\u041c\7q\2\2\u041c\u041e")
buf.write(u"\7y\2\2\u041d\u03f9\3\2\2\2\u041d\u03fc\3\2\2\2\u041d")
buf.write(u"\u03ff\3\2\2\2\u041d\u040e\3\2\2\2\u041e\u00d6\3\2\2")
buf.write(u"\2\u041f\u0420\t\3\2\2\u0420\u00d8\3\2\2\2\u0421\u0422")
buf.write(u"\7^\2\2\u0422\u0423\7q\2\2\u0423\u0424\7x\2\2\u0424\u0425")
buf.write(u"\7g\2\2\u0425\u0426\7t\2\2\u0426\u0427\7n\2\2\u0427\u0428")
buf.write(u"\7k\2\2\u0428\u0429\7p\2\2\u0429\u042a\7g\2\2\u042a\u00da")
buf.write(u"\3\2\2\2\u042b\u042c\7^\2\2\u042c\u042d\7d\2\2\u042d")
buf.write(u"\u042e\7c\2\2\u042e\u042f\7t\2\2\u042f\u00dc\3\2\2\2")
buf.write(u"\u0430\u0431\7a\2\2\u0431\u00de\3\2\2\2\u0432\u0433\7")
buf.write(u"`\2\2\u0433\u00e0\3\2\2\2\u0434\u0435\7<\2\2\u0435\u00e2")
buf.write(u"\3\2\2\2\u0436\u0437\7=\2\2\u0437\u00e4\3\2\2\2\u0438")
buf.write(u"\u0439\7.\2\2\u0439\u00e6\3\2\2\2\u043a\u043b\7\60\2")
buf.write(u"\2\u043b\u00e8\3\2\2\2\u043c\u043d\t\2\2\2\u043d\u00ea")
buf.write(u"\3\2\2\2\u043e\u0442\7f\2\2\u043f\u0441\5\u00e9u\2\u0440")
buf.write(u"\u043f\3\2\2\2\u0441\u0444\3\2\2\2\u0442\u0443\3\2\2")
buf.write(u"\2\u0442\u0440\3\2\2\2\u0443\u044c\3\2\2\2\u0444\u0442")
buf.write(u"\3\2\2\2\u0445\u044d\t\4\2\2\u0446\u0448\7^\2\2\u0447")
buf.write(u"\u0449\t\4\2\2\u0448\u0447\3\2\2\2\u0449\u044a\3\2\2")
buf.write(u"\2\u044a\u0448\3\2\2\2\u044a\u044b\3\2\2\2\u044b\u044d")
buf.write(u"\3\2\2\2\u044c\u0445\3\2\2\2\u044c\u0446\3\2\2\2\u044d")
buf.write(u"\u00ec\3\2\2\2\u044e\u045d\7g\2\2\u044f\u0450\7^\2\2")
buf.write(u"\u0450\u0451\7g\2\2\u0451\u0452\7z\2\2\u0452\u0453\7")
buf.write(u"r\2\2\u0453\u0454\7q\2\2\u0454\u0455\7p\2\2\u0455\u0456")
buf.write(u"\7g\2\2\u0456\u0457\7p\2\2\u0457\u0458\7v\2\2\u0458\u0459")
buf.write(u"\7k\2\2\u0459\u045a\7c\2\2\u045a\u045b\7n\2\2\u045b\u045d")
buf.write(u"\7G\2\2\u045c\u044e\3\2\2\2\u045c\u044f\3\2\2\2\u045d")
buf.write(u"\u00ee\3\2\2\2\u045e\u045f\7G\2\2\u045f\u00f0\3\2\2\2")
buf.write(u"\u0460\u0461\t\5\2\2\u0461\u00f2\3\2\2\2\u0462\u0463")
buf.write(u"\t\4\2\2\u0463\u00f4\3\2\2\2\u0464\u0465\t\6\2\2\u0465")
buf.write(u"\u00f6\3\2\2\2\u0466\u0468\5\u00f5{\2\u0467\u0466\3\2")
buf.write(u"\2\2\u0468\u0469\3\2\2\2\u0469\u0467\3\2\2\2\u0469\u046a")
buf.write(u"\3\2\2\2\u046a\u0472\3\2\2\2\u046b\u046c\5\u00e5s\2\u046c")
buf.write(u"\u046d\5\u00f5{\2\u046d\u046e\5\u00f5{\2\u046e\u046f")
buf.write(u"\5\u00f5{\2\u046f\u0471\3\2\2\2\u0470\u046b\3\2\2\2\u0471")
buf.write(u"\u0474\3\2\2\2\u0472\u0470\3\2\2\2\u0472\u0473\3\2\2")
buf.write(u"\2\u0473\u048c\3\2\2\2\u0474\u0472\3\2\2\2\u0475\u0477")
buf.write(u"\5\u00f5{\2\u0476\u0475\3\2\2\2\u0477\u047a\3\2\2\2\u0478")
buf.write(u"\u0476\3\2\2\2\u0478\u0479\3\2\2\2\u0479\u0482\3\2\2")
buf.write(u"\2\u047a\u0478\3\2\2\2\u047b\u047c\5\u00e5s\2\u047c\u047d")
buf.write(u"\5\u00f5{\2\u047d\u047e\5\u00f5{\2\u047e\u047f\5\u00f5")
buf.write(u"{\2\u047f\u0481\3\2\2\2\u0480\u047b\3\2\2\2\u0481\u0484")
buf.write(u"\3\2\2\2\u0482\u0480\3\2\2\2\u0482\u0483\3\2\2\2\u0483")
buf.write(u"\u0485\3\2\2\2\u0484\u0482\3\2\2\2\u0485\u0487\5\u00e7")
buf.write(u"t\2\u0486\u0488\5\u00f5{\2\u0487\u0486\3\2\2\2\u0488")
buf.write(u"\u0489\3\2\2\2\u0489\u0487\3\2\2\2\u0489\u048a\3\2\2")
buf.write(u"\2\u048a\u048c\3\2\2\2\u048b\u0467\3\2\2\2\u048b\u0478")
buf.write(u"\3\2\2\2\u048c\u00f8\3\2\2\2\u048d\u048e\5\u00f7|\2\u048e")
buf.write(u"\u0491\5\u00efx\2\u048f\u0492\5\r\7\2\u0490\u0492\5\13")
buf.write(u"\6\2\u0491\u048f\3\2\2\2\u0491\u0490\3\2\2\2\u0491\u0492")
buf.write(u"\3\2\2\2\u0492\u0494\3\2\2\2\u0493\u0495\5\u00f5{\2\u0494")
buf.write(u"\u0493\3\2\2\2\u0495\u0496\3\2\2\2\u0496\u0494\3\2\2")
buf.write(u"\2\u0496\u0497\3\2\2\2\u0497\u00fa\3\2\2\2\u0498\u0499")
buf.write(u"\7?\2\2\u0499\u00fc\3\2\2\2\u049a\u049b\7?\2\2\u049b")
buf.write(u"\u04a3\7?\2\2\u049c\u049d\7^\2\2\u049d\u049e\7g\2\2\u049e")
buf.write(u"\u049f\7s\2\2\u049f\u04a0\7w\2\2\u04a0\u04a1\7k\2\2\u04a1")
buf.write(u"\u04a3\7x\2\2\u04a2\u049a\3\2\2\2\u04a2\u049c\3\2\2\2")
buf.write(u"\u04a3\u00fe\3\2\2\2\u04a4\u04a5\7>\2\2\u04a5\u0100\3")
buf.write(u"\2\2\2\u04a6\u04a7\7^\2\2\u04a7\u04a8\7n\2\2\u04a8\u04a9")
buf.write(u"\7g\2\2\u04a9\u04b7\7s\2\2\u04aa\u04ab\7^\2\2\u04ab\u04ac")
buf.write(u"\7n\2\2\u04ac\u04b7\7g\2\2\u04ad\u04ae\7^\2\2\u04ae\u04af")
buf.write(u"\7n\2\2\u04af\u04b0\7g\2\2\u04b0\u04b1\7s\2\2\u04b1\u04b2")
buf.write(u"\7u\2\2\u04b2\u04b3\7n\2\2\u04b3\u04b4\7c\2\2\u04b4\u04b5")
buf.write(u"\7p\2\2\u04b5\u04b7\7v\2\2\u04b6\u04a6\3\2\2\2\u04b6")
buf.write(u"\u04aa\3\2\2\2\u04b6\u04ad\3\2\2\2\u04b7\u0102\3\2\2")
buf.write(u"\2\u04b8\u04b9\7@\2\2\u04b9\u0104\3\2\2\2\u04ba\u04bb")
buf.write(u"\7^\2\2\u04bb\u04bc\7i\2\2\u04bc\u04bd\7g\2\2\u04bd\u04cb")
buf.write(u"\7s\2\2\u04be\u04bf\7^\2\2\u04bf\u04c0\7i\2\2\u04c0\u04cb")
buf.write(u"\7g\2\2\u04c1\u04c2\7^\2\2\u04c2\u04c3\7i\2\2\u04c3\u04c4")
buf.write(u"\7g\2\2\u04c4\u04c5\7s\2\2\u04c5\u04c6\7u\2\2\u04c6\u04c7")
buf.write(u"\7n\2\2\u04c7\u04c8\7c\2\2\u04c8\u04c9\7p\2\2\u04c9\u04cb")
buf.write(u"\7v\2\2\u04ca\u04ba\3\2\2\2\u04ca\u04be\3\2\2\2\u04ca")
buf.write(u"\u04c1\3\2\2\2\u04cb\u0106\3\2\2\2\u04cc\u04cd\7#\2\2")
buf.write(u"\u04cd\u04e3\7?\2\2\u04ce\u04cf\7#\2\2\u04cf\u04d0\7")
buf.write(u"?\2\2\u04d0\u04e3\7?\2\2\u04d1\u04d2\7^\2\2\u04d2\u04d3")
buf.write(u"\7p\2\2\u04d3\u04e3\7g\2\2\u04d4\u04d5\7^\2\2\u04d5\u04d6")
buf.write(u"\7p\2\2\u04d6\u04d7\7g\2\2\u04d7\u04e3\7s\2\2\u04d8\u04d9")
buf.write(u"\7^\2\2\u04d9\u04da\7p\2\2\u04da\u04db\7q\2\2\u04db\u04dc")
buf.write(u"\7v\2\2\u04dc\u04dd\7^\2\2\u04dd\u04de\7g\2\2\u04de\u04df")
buf.write(u"\7s\2\2\u04df\u04e0\7w\2\2\u04e0\u04e1\7k\2\2\u04e1\u04e3")
buf.write(u"\7x\2\2\u04e2\u04cc\3\2\2\2\u04e2\u04ce\3\2\2\2\u04e2")
buf.write(u"\u04d1\3\2\2\2\u04e2\u04d4\3\2\2\2\u04e2\u04d8\3\2\2")
buf.write(u"\2\u04e3\u0108\3\2\2\2\u04e4\u04e5\7#\2\2\u04e5\u010a")
buf.write(u"\3\2\2\2\u04e6\u04e7\7^\2\2\u04e7\u04e8\7\'\2\2\u04e8")
buf.write(u"\u010c\3\2\2\2\u04e9\u04ea\5\u00f7|\2\u04ea\u04eb\5\u010b")
buf.write(u"\u0086\2\u04eb\u010e\3\2\2\2\u04ec\u04ed\7^\2\2\u04ed")
buf.write(u"\u04ee\7e\2\2\u04ee\u04ef\7j\2\2\u04ef\u04f0\7c\2\2\u04f0")
buf.write(u"\u04f1\7t\2\2\u04f1\u04f2\7$\2\2\u04f2\u04f3\7\62\2\2")
buf.write(u"\u04f3\u04f4\7\62\2\2\u04f4\u04f5\7\62\2\2\u04f5\u04f6")
buf.write(u"\7\65\2\2\u04f6\u04f7\7;\2\2\u04f7\u066e\7\63\2\2\u04f8")
buf.write(u"\u04f9\7^\2\2\u04f9\u04fa\7c\2\2\u04fa\u04fb\7n\2\2\u04fb")
buf.write(u"\u04fc\7r\2\2\u04fc\u04fd\7j\2\2\u04fd\u066e\7c\2\2\u04fe")
buf.write(u"\u04ff\7^\2\2\u04ff\u0500\7e\2\2\u0500\u0501\7j\2\2\u0501")
buf.write(u"\u0502\7c\2\2\u0502\u0503\7t\2\2\u0503\u0504\7$\2\2\u0504")
buf.write(u"\u0505\7\62\2\2\u0505\u0506\7\62\2\2\u0506\u0507\7\62")
buf.write(u"\2\2\u0507\u0508\7\65\2\2\u0508\u0509\7;\2\2\u0509\u066e")
buf.write(u"\7\64\2\2\u050a\u050b\7^\2\2\u050b\u050c\7d\2\2\u050c")
buf.write(u"\u050d\7g\2\2\u050d\u050e\7v\2\2\u050e\u066e\7c\2\2\u050f")
buf.write(u"\u0510\7^\2\2\u0510\u0511\7I\2\2\u0511\u0512\7c\2\2\u0512")
buf.write(u"\u0513\7o\2\2\u0513\u0514\7o\2\2\u0514\u066e\7c\2\2\u0515")
buf.write(u"\u0516\7^\2\2\u0516\u0517\7i\2\2\u0517\u0518\7c\2\2\u0518")
buf.write(u"\u0519\7o\2\2\u0519\u051a\7o\2\2\u051a\u066e\7c\2\2\u051b")
buf.write(u"\u051c\7^\2\2\u051c\u051d\7F\2\2\u051d\u051e\7g\2\2\u051e")
buf.write(u"\u051f\7n\2\2\u051f\u0520\7v\2\2\u0520\u066e\7c\2\2\u0521")
buf.write(u"\u0522\7^\2\2\u0522\u0523\7f\2\2\u0523\u0524\7g\2\2\u0524")
buf.write(u"\u0525\7n\2\2\u0525\u0526\7v\2\2\u0526\u066e\7c\2\2\u0527")
buf.write(u"\u0528\7^\2\2\u0528\u0529\7e\2\2\u0529\u052a\7j\2\2\u052a")
buf.write(u"\u052b\7c\2\2\u052b\u052c\7t\2\2\u052c\u052d\7$\2\2\u052d")
buf.write(u"\u052e\7\62\2\2\u052e\u052f\7\62\2\2\u052f\u0530\7\62")
buf.write(u"\2\2\u0530\u0531\7\63\2\2\u0531\u0532\7;\2\2\u0532\u066e")
buf.write(u"\7\62\2\2\u0533\u0534\7^\2\2\u0534\u0535\7g\2\2\u0535")
buf.write(u"\u0536\7r\2\2\u0536\u0537\7u\2\2\u0537\u0538\7k\2\2\u0538")
buf.write(u"\u0539\7n\2\2\u0539\u053a\7q\2\2\u053a\u066e\7p\2\2\u053b")
buf.write(u"\u053c\7^\2\2\u053c\u053d\7x\2\2\u053d\u053e\7c\2\2\u053e")
buf.write(u"\u053f\7t\2\2\u053f\u0540\7g\2\2\u0540\u0541\7r\2\2\u0541")
buf.write(u"\u0542\7u\2\2\u0542\u0543\7k\2\2\u0543\u0544\7n\2\2\u0544")
buf.write(u"\u0545\7q\2\2\u0545\u066e\7p\2\2\u0546\u0547\7^\2\2\u0547")
buf.write(u"\u0548\7e\2\2\u0548\u0549\7j\2\2\u0549\u054a\7c\2\2\u054a")
buf.write(u"\u054b\7t\2\2\u054b\u054c\7$\2\2\u054c\u054d\7\62\2\2")
buf.write(u"\u054d\u054e\7\62\2\2\u054e\u054f\7\62\2\2\u054f\u0550")
buf.write(u"\7\65\2\2\u0550\u0551\7;\2\2\u0551\u066e\78\2\2\u0552")
buf.write(u"\u0553\7^\2\2\u0553\u0554\7|\2\2\u0554\u0555\7g\2\2\u0555")
buf.write(u"\u0556\7v\2\2\u0556\u066e\7c\2\2\u0557\u0558\7^\2\2\u0558")
buf.write(u"\u0559\7e\2\2\u0559\u055a\7j\2\2\u055a\u055b\7c\2\2\u055b")
buf.write(u"\u055c\7t\2\2\u055c\u055d\7$\2\2\u055d\u055e\7\62\2\2")
buf.write(u"\u055e\u055f\7\62\2\2\u055f\u0560\7\62\2\2\u0560\u0561")
buf.write(u"\7\65\2\2\u0561\u0562\7;\2\2\u0562\u066e\79\2\2\u0563")
buf.write(u"\u0564\7^\2\2\u0564\u0565\7g\2\2\u0565\u0566\7v\2\2\u0566")
buf.write(u"\u066e\7c\2\2\u0567\u0568\7^\2\2\u0568\u0569\7V\2\2\u0569")
buf.write(u"\u056a\7j\2\2\u056a\u056b\7g\2\2\u056b\u056c\7v\2\2\u056c")
buf.write(u"\u066e\7c\2\2\u056d\u056e\7^\2\2\u056e\u056f\7v\2\2\u056f")
buf.write(u"\u0570\7j\2\2\u0570\u0571\7g\2\2\u0571\u0572\7v\2\2\u0572")
buf.write(u"\u066e\7c\2\2\u0573\u0574\7^\2\2\u0574\u0575\7x\2\2\u0575")
buf.write(u"\u0576\7c\2\2\u0576\u0577\7t\2\2\u0577\u0578\7v\2\2\u0578")
buf.write(u"\u0579\7j\2\2\u0579\u057a\7g\2\2\u057a\u057b\7v\2\2\u057b")
buf.write(u"\u066e\7c\2\2\u057c\u057d\7^\2\2\u057d\u057e\7e\2\2\u057e")
buf.write(u"\u057f\7j\2\2\u057f\u0580\7c\2\2\u0580\u0581\7t\2\2\u0581")
buf.write(u"\u0582\7$\2\2\u0582\u0583\7\62\2\2\u0583\u0584\7\62\2")
buf.write(u"\2\u0584\u0585\7\62\2\2\u0585\u0586\7\65\2\2\u0586\u0587")
buf.write(u"\7;\2\2\u0587\u066e\7;\2\2\u0588\u0589\7^\2\2\u0589\u058a")
buf.write(u"\7k\2\2\u058a\u058b\7q\2\2\u058b\u058c\7v\2\2\u058c\u066e")
buf.write(u"\7c\2\2\u058d\u058e\7^\2\2\u058e\u058f\7e\2\2\u058f\u0590")
buf.write(u"\7j\2\2\u0590\u0591\7c\2\2\u0591\u0592\7t\2\2\u0592\u0593")
buf.write(u"\7$\2\2\u0593\u0594\7\62\2\2\u0594\u0595\7\62\2\2\u0595")
buf.write(u"\u0596\7\62\2\2\u0596\u0597\7\65\2\2\u0597\u0598\7;\2")
buf.write(u"\2\u0598\u066e\7C\2\2\u0599\u059a\7^\2\2\u059a\u059b")
buf.write(u"\7m\2\2\u059b\u059c\7c\2\2\u059c\u059d\7r\2\2\u059d\u059e")
buf.write(u"\7r\2\2\u059e\u066e\7c\2\2\u059f\u05a0\7^\2\2\u05a0\u05a1")
buf.write(u"\7N\2\2\u05a1\u05a2\7c\2\2\u05a2\u05a3\7o\2\2\u05a3\u05a4")
buf.write(u"\7d\2\2\u05a4\u05a5\7f\2\2\u05a5\u066e\7c\2\2\u05a6\u05a7")
buf.write(u"\7^\2\2\u05a7\u05a8\7n\2\2\u05a8\u05a9\7c\2\2\u05a9\u05aa")
buf.write(u"\7o\2\2\u05aa\u05ab\7d\2\2\u05ab\u05ac\7f\2\2\u05ac\u066e")
buf.write(u"\7c\2\2\u05ad\u05ae\7^\2\2\u05ae\u05af\7e\2\2\u05af\u05b0")
buf.write(u"\7j\2\2\u05b0\u05b1\7c\2\2\u05b1\u05b2\7t\2\2\u05b2\u05b3")
buf.write(u"\7$\2\2\u05b3\u05b4\7\62\2\2\u05b4\u05b5\7\62\2\2\u05b5")
buf.write(u"\u05b6\7\62\2\2\u05b6\u05b7\7\65\2\2\u05b7\u05b8\7;\2")
buf.write(u"\2\u05b8\u066e\7E\2\2\u05b9\u05ba\7^\2\2\u05ba\u05bb")
buf.write(u"\7o\2\2\u05bb\u066e\7w\2\2\u05bc\u05bd\7^\2\2\u05bd\u05be")
buf.write(u"\7e\2\2\u05be\u05bf\7j\2\2\u05bf\u05c0\7c\2\2\u05c0\u05c1")
buf.write(u"\7t\2\2\u05c1\u05c2\7$\2\2\u05c2\u05c3\7\62\2\2\u05c3")
buf.write(u"\u05c4\7\62\2\2\u05c4\u05c5\7\62\2\2\u05c5\u05c6\7\65")
buf.write(u"\2\2\u05c6\u05c7\7;\2\2\u05c7\u066e\7F\2\2\u05c8\u05c9")
buf.write(u"\7^\2\2\u05c9\u05ca\7p\2\2\u05ca\u066e\7w\2\2\u05cb\u05cc")
buf.write(u"\7^\2\2\u05cc\u05cd\7Z\2\2\u05cd\u066e\7k\2\2\u05ce\u05cf")
buf.write(u"\7^\2\2\u05cf\u05d0\7z\2\2\u05d0\u066e\7k\2\2\u05d1\u05d2")
buf.write(u"\7^\2\2\u05d2\u05d3\7e\2\2\u05d3\u05d4\7j\2\2\u05d4\u05d5")
buf.write(u"\7c\2\2\u05d5\u05d6\7t\2\2\u05d6\u05d7\7$\2\2\u05d7\u05d8")
buf.write(u"\7\62\2\2\u05d8\u05d9\7\62\2\2\u05d9\u05da\7\62\2\2\u05da")
buf.write(u"\u05db\7\65\2\2\u05db\u05dc\7;\2\2\u05dc\u066e\7H\2\2")
buf.write(u"\u05dd\u05de\7^\2\2\u05de\u05df\7q\2\2\u05df\u05e0\7")
buf.write(u"o\2\2\u05e0\u05e1\7k\2\2\u05e1\u05e2\7e\2\2\u05e2\u05e3")
buf.write(u"\7t\2\2\u05e3\u05e4\7q\2\2\u05e4\u066e\7p\2\2\u05e5\u05e6")
buf.write(u"\7^\2\2\u05e6\u05e7\7R\2\2\u05e7\u066e\7k\2\2\u05e8\u05e9")
buf.write(u"\7^\2\2\u05e9\u05ea\7x\2\2\u05ea\u05eb\7c\2\2\u05eb\u05ec")
buf.write(u"\7t\2\2\u05ec\u05ed\7r\2\2\u05ed\u066e\7k\2\2\u05ee\u05ef")
buf.write(u"\7^\2\2\u05ef\u05f0\7e\2\2\u05f0\u05f1\7j\2\2\u05f1\u05f2")
buf.write(u"\7c\2\2\u05f2\u05f3\7t\2\2\u05f3\u05f4\7$\2\2\u05f4\u05f5")
buf.write(u"\7\62\2\2\u05f5\u05f6\7\62\2\2\u05f6\u05f7\7\62\2\2\u05f7")
buf.write(u"\u05f8\7\65\2\2\u05f8\u05f9\7C\2\2\u05f9\u066e\7\63\2")
buf.write(u"\2\u05fa\u05fb\7^\2\2\u05fb\u05fc\7t\2\2\u05fc\u05fd")
buf.write(u"\7j\2\2\u05fd\u066e\7q\2\2\u05fe\u05ff\7^\2\2\u05ff\u0600")
buf.write(u"\7x\2\2\u0600\u0601\7c\2\2\u0601\u0602\7t\2\2\u0602\u0603")
buf.write(u"\7t\2\2\u0603\u0604\7j\2\2\u0604\u066e\7q\2\2\u0605\u0606")
buf.write(u"\7^\2\2\u0606\u0607\7U\2\2\u0607\u0608\7k\2\2\u0608\u0609")
buf.write(u"\7i\2\2\u0609\u060a\7o\2\2\u060a\u066e\7c\2\2\u060b\u060c")
buf.write(u"\7^\2\2\u060c\u060d\7u\2\2\u060d\u060e\7k\2\2\u060e\u060f")
buf.write(u"\7i\2\2\u060f\u0610\7o\2\2\u0610\u066e\7c\2\2\u0611\u0612")
buf.write(u"\7^\2\2\u0612\u0613\7x\2\2\u0613\u0614\7c\2\2\u0614\u0615")
buf.write(u"\7t\2\2\u0615\u0616\7u\2\2\u0616\u0617\7k\2\2\u0617\u0618")
buf.write(u"\7i\2\2\u0618\u0619\7o\2\2\u0619\u066e\7c\2\2\u061a\u061b")
buf.write(u"\7^\2\2\u061b\u061c\7e\2\2\u061c\u061d\7j\2\2\u061d\u061e")
buf.write(u"\7c\2\2\u061e\u061f\7t\2\2\u061f\u0620\7$\2\2\u0620\u0621")
buf.write(u"\7\62\2\2\u0621\u0622\7\62\2\2\u0622\u0623\7\62\2\2\u0623")
buf.write(u"\u0624\7\65\2\2\u0624\u0625\7C\2\2\u0625\u066e\7\66\2")
buf.write(u"\2\u0626\u0627\7^\2\2\u0627\u0628\7v\2\2\u0628\u0629")
buf.write(u"\7c\2\2\u0629\u066e\7w\2\2\u062a\u062b\7^\2\2\u062b\u062c")
buf.write(u"\7W\2\2\u062c\u062d\7r\2\2\u062d\u062e\7u\2\2\u062e\u062f")
buf.write(u"\7k\2\2\u062f\u0630\7n\2\2\u0630\u0631\7q\2\2\u0631\u066e")
buf.write(u"\7p\2\2\u0632\u0633\7^\2\2\u0633\u0634\7w\2\2\u0634\u0635")
buf.write(u"\7r\2\2\u0635\u0636\7u\2\2\u0636\u0637\7k\2\2\u0637\u0638")
buf.write(u"\7n\2\2\u0638\u0639\7q\2\2\u0639\u066e\7p\2\2\u063a\u063b")
buf.write(u"\7^\2\2\u063b\u063c\7R\2\2\u063c\u063d\7j\2\2\u063d\u066e")
buf.write(u"\7k\2\2\u063e\u063f\7^\2\2\u063f\u0640\7r\2\2\u0640\u0641")
buf.write(u"\7j\2\2\u0641\u066e\7k\2\2\u0642\u0643\7^\2\2\u0643\u0644")
buf.write(u"\7x\2\2\u0644\u0645\7c\2\2\u0645\u0646\7t\2\2\u0646\u0647")
buf.write(u"\7r\2\2\u0647\u0648\7j\2\2\u0648\u066e\7k\2\2\u0649\u064a")
buf.write(u"\7^\2\2\u064a\u064b\7e\2\2\u064b\u064c\7j\2\2\u064c\u064d")
buf.write(u"\7c\2\2\u064d\u064e\7t\2\2\u064e\u064f\7$\2\2\u064f\u0650")
buf.write(u"\7\62\2\2\u0650\u0651\7\62\2\2\u0651\u0652\7\62\2\2\u0652")
buf.write(u"\u0653\7\65\2\2\u0653\u0654\7C\2\2\u0654\u066e\79\2\2")
buf.write(u"\u0655\u0656\7^\2\2\u0656\u0657\7e\2\2\u0657\u0658\7")
buf.write(u"j\2\2\u0658\u066e\7k\2\2\u0659\u065a\7^\2\2\u065a\u065b")
buf.write(u"\7R\2\2\u065b\u065c\7u\2\2\u065c\u066e\7k\2\2\u065d\u065e")
buf.write(u"\7^\2\2\u065e\u065f\7r\2\2\u065f\u0660\7u\2\2\u0660\u066e")
buf.write(u"\7k\2\2\u0661\u0662\7^\2\2\u0662\u0663\7Q\2\2\u0663\u0664")
buf.write(u"\7o\2\2\u0664\u0665\7g\2\2\u0665\u0666\7i\2\2\u0666\u066e")
buf.write(u"\7c\2\2\u0667\u0668\7^\2\2\u0668\u0669\7q\2\2\u0669\u066a")
buf.write(u"\7o\2\2\u066a\u066b\7g\2\2\u066b\u066c\7i\2\2\u066c\u066e")
buf.write(u"\7c\2\2\u066d\u04ec\3\2\2\2\u066d\u04f8\3\2\2\2\u066d")
buf.write(u"\u04fe\3\2\2\2\u066d\u050a\3\2\2\2\u066d\u050f\3\2\2")
buf.write(u"\2\u066d\u0515\3\2\2\2\u066d\u051b\3\2\2\2\u066d\u0521")
buf.write(u"\3\2\2\2\u066d\u0527\3\2\2\2\u066d\u0533\3\2\2\2\u066d")
buf.write(u"\u053b\3\2\2\2\u066d\u0546\3\2\2\2\u066d\u0552\3\2\2")
buf.write(u"\2\u066d\u0557\3\2\2\2\u066d\u0563\3\2\2\2\u066d\u0567")
buf.write(u"\3\2\2\2\u066d\u056d\3\2\2\2\u066d\u0573\3\2\2\2\u066d")
buf.write(u"\u057c\3\2\2\2\u066d\u0588\3\2\2\2\u066d\u058d\3\2\2")
buf.write(u"\2\u066d\u0599\3\2\2\2\u066d\u059f\3\2\2\2\u066d\u05a6")
buf.write(u"\3\2\2\2\u066d\u05ad\3\2\2\2\u066d\u05b9\3\2\2\2\u066d")
buf.write(u"\u05bc\3\2\2\2\u066d\u05c8\3\2\2\2\u066d\u05cb\3\2\2")
buf.write(u"\2\u066d\u05ce\3\2\2\2\u066d\u05d1\3\2\2\2\u066d\u05dd")
buf.write(u"\3\2\2\2\u066d\u05e5\3\2\2\2\u066d\u05e8\3\2\2\2\u066d")
buf.write(u"\u05ee\3\2\2\2\u066d\u05fa\3\2\2\2\u066d\u05fe\3\2\2")
buf.write(u"\2\u066d\u0605\3\2\2\2\u066d\u060b\3\2\2\2\u066d\u0611")
buf.write(u"\3\2\2\2\u066d\u061a\3\2\2\2\u066d\u0626\3\2\2\2\u066d")
buf.write(u"\u062a\3\2\2\2\u066d\u0632\3\2\2\2\u066d\u063a\3\2\2")
buf.write(u"\2\u066d\u063e\3\2\2\2\u066d\u0642\3\2\2\2\u066d\u0649")
buf.write(u"\3\2\2\2\u066d\u0655\3\2\2\2\u066d\u0659\3\2\2\2\u066d")
buf.write(u"\u065d\3\2\2\2\u066d\u0661\3\2\2\2\u066d\u0667\3\2\2")
buf.write(u"\2\u066e\u0110\3\2\2\2\u066f\u0671\5\u010f\u0088\2\u0670")
buf.write(u"\u0672\t\7\2\2\u0671\u0670\3\2\2\2\u0671\u0672\3\2\2")
buf.write(u"\2\u0672\u0112\3\2\2\2\u0673\u0674\7^\2\2\u0674\u0675")
buf.write(u"\7r\2\2\u0675\u0676\7k\2\2\u0676\u0114\3\2\2\2\u0677")
buf.write(u"\u0678\7^\2\2\u0678\u0679\7k\2\2\u0679\u067a\7p\2\2\u067a")
buf.write(u"\u067b\7h\2\2\u067b\u067c\7v\2\2\u067c\u067d\7{\2\2\u067d")
buf.write(u"\u0116\3\2\2\2\u067e\u0686\5\u0115\u008b\2\u067f\u0680")
buf.write(u"\5\t\5\2\u0680\u0681\5\u0115\u008b\2\u0681\u0686\3\2")
buf.write(u"\2\2\u0682\u0683\5\u0115\u008b\2\u0683\u0684\5\u010b")
buf.write(u"\u0086\2\u0684\u0686\3\2\2\2\u0685\u067e\3\2\2\2\u0685")
buf.write(u"\u067f\3\2\2\2\u0685\u0682\3\2\2\2\u0686\u0118\3\2\2")
buf.write(u"\2\u0687\u0688\7^\2\2\u0688\u0689\7g\2\2\u0689\u068a")
buf.write(u"\7o\2\2\u068a\u068b\7r\2\2\u068b\u068c\7v\2\2\u068c\u068d")
buf.write(u"\7{\2\2\u068d\u068e\7u\2\2\u068e\u068f\7g\2\2\u068f\u0690")
buf.write(u"\7v\2\2\u0690\u011a\3\2\2\2\u0691\u0695\5\u0113\u008a")
buf.write(u"\2\u0692\u0695\5\u0117\u008c\2\u0693\u0695\5\u0119\u008d")
buf.write(u"\2\u0694\u0691\3\2\2\2\u0694\u0692\3\2\2\2\u0694\u0693")
buf.write(u"\3\2\2\2\u0695\u011c\3\2\2\2\u0696\u0697\7^\2\2\u0697")
buf.write(u"\u0698\7x\2\2\u0698\u0699\7c\2\2\u0699\u069a\7t\2\2\u069a")
buf.write(u"\u069b\7k\2\2\u069b\u069c\7c\2\2\u069c\u069d\7d\2\2\u069d")
buf.write(u"\u069e\7n\2\2\u069e\u069f\7g\2\2\u069f\u011e\3\2\2\2")
buf.write(u"\u06a0\u06a4\5\u0111\u0089\2\u06a1\u06a4\5\u00f3z\2\u06a2")
buf.write(u"\u06a4\5\u00f5{\2\u06a3\u06a0\3\2\2\2\u06a3\u06a1\3\2")
buf.write(u"\2\2\u06a3\u06a2\3\2\2\2\u06a4\u06a5\3\2\2\2\u06a5\u06a3")
buf.write(u"\3\2\2\2\u06a5\u06a6\3\2\2\2\u06a6\u06ba\3\2\2\2\u06a7")
buf.write(u"\u06b8\5\u00ddo\2\u06a8\u06ad\5\33\16\2\u06a9\u06ae\5")
buf.write(u"\u0111\u0089\2\u06aa\u06ae\5\u00f3z\2\u06ab\u06ae\5\u00f5")
buf.write(u"{\2\u06ac\u06ae\5\u00e5s\2\u06ad\u06a9\3\2\2\2\u06ad")
buf.write(u"\u06aa\3\2\2\2\u06ad\u06ab\3\2\2\2\u06ad\u06ac\3\2\2")
buf.write(u"\2\u06ae\u06af\3\2\2\2\u06af\u06ad\3\2\2\2\u06af\u06b0")
buf.write(u"\3\2\2\2\u06b0\u06b1\3\2\2\2\u06b1\u06b2\5\35\17\2\u06b2")
buf.write(u"\u06b9\3\2\2\2\u06b3\u06b7\5\u0111\u0089\2\u06b4\u06b7")
buf.write(u"\5\u00f3z\2\u06b5\u06b7\5\u00f5{\2\u06b6\u06b3\3\2\2")
buf.write(u"\2\u06b6\u06b4\3\2\2\2\u06b6\u06b5\3\2\2\2\u06b7\u06b9")
buf.write(u"\3\2\2\2\u06b8\u06a8\3\2\2\2\u06b8\u06b6\3\2\2\2\u06b9")
buf.write(u"\u06bb\3\2\2\2\u06ba\u06a7\3\2\2\2\u06ba\u06bb\3\2\2")
buf.write(u"\2\u06bb\u0120\3\2\2\2\u06bc\u06bd\5\u011d\u008f\2\u06bd")
buf.write(u"\u06be\5\33\16\2\u06be\u06bf\5\u011f\u0090\2\u06bf\u06c1")
buf.write(u"\5\35\17\2\u06c0\u06c2\5\u010b\u0086\2\u06c1\u06c0\3")
buf.write(u"\2\2\2\u06c1\u06c2\3\2\2\2\u06c2\u0122\3\2\2\2$\2\u012b")
buf.write(u"\u0232\u03b0\u03f7\u041d\u0442\u044a\u044c\u045c\u0469")
buf.write(u"\u0472\u0478\u0482\u0489\u048b\u0491\u0496\u04a2\u04b6")
buf.write(u"\u04ca\u04e2\u066d\u0671\u0685\u0694\u06a3\u06a5\u06ad")
buf.write(u"\u06af\u06b6\u06b8\u06ba\u06c1\3\b\2\2")
return buf.getvalue()
class PSLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
WS = 3
DOLLAR_SIGN = 4
ADD = 5
SUB = 6
MUL = 7
DIV = 8
L_PAREN = 9
R_PAREN = 10
L_GROUP = 11
R_GROUP = 12
L_BRACE = 13
R_BRACE = 14
L_BRACE_VISUAL = 15
R_BRACE_VISUAL = 16
L_BRACE_CMD = 17
R_BRACE_CMD = 18
L_BRACKET = 19
R_BRACKET = 20
L_BRACK = 21
R_BRACK = 22
BAR = 23
L_VERT = 24
R_VERT = 25
VERT = 26
L_FLOOR = 27
R_FLOOR = 28
LL_CORNER = 29
LR_CORNER = 30
L_CEIL = 31
R_CEIL = 32
UL_CORNER = 33
UR_CORNER = 34
L_LEFT = 35
R_RIGHT = 36
ML_LEFT = 37
MR_RIGHT = 38
FUNC_LIM = 39
LIM_APPROACH_SYM = 40
FUNC_INT = 41
FUNC_SUM = 42
FUNC_PROD = 43
FUNC_LOG = 44
FUNC_LN = 45
FUNC_EXP = 46
FUNC_SIN = 47
FUNC_COS = 48
FUNC_TAN = 49
FUNC_CSC = 50
FUNC_SEC = 51
FUNC_COT = 52
FUNC_ARCSIN = 53
FUNC_ARCCOS = 54
FUNC_ARCTAN = 55
FUNC_ARCCSC = 56
FUNC_ARCSEC = 57
FUNC_ARCCOT = 58
FUNC_SINH = 59
FUNC_COSH = 60
FUNC_TANH = 61
FUNC_ARSINH = 62
FUNC_ARCOSH = 63
FUNC_ARTANH = 64
FUNC_ARCSINH = 65
FUNC_ARCCOSH = 66
FUNC_ARCTANH = 67
FUNC_ARSINH_NAME = 68
FUNC_ARCSINH_NAME = 69
FUNC_ARCOSH_NAME = 70
FUNC_ARCCOSH_NAME = 71
FUNC_ARTANH_NAME = 72
FUNC_ARCTANH_NAME = 73
FUNC_GCD_NAME = 74
FUNC_LCM_NAME = 75
FUNC_FLOOR_NAME = 76
FUNC_CEIL_NAME = 77
FUNC_SQRT = 78
FUNC_GCD = 79
FUNC_LCM = 80
FUNC_FLOOR = 81
FUNC_CEIL = 82
FUNC_MAX = 83
FUNC_MIN = 84
CMD_TIMES = 85
CMD_CDOT = 86
CMD_DIV = 87
CMD_FRAC = 88
CMD_BINOM = 89
CMD_CHOOSE = 90
CMD_MOD = 91
CMD_MATHIT = 92
CMD_OPERATORNAME = 93
MATRIX_TYPE_MATRIX = 94
MATRIX_TYPE_PMATRIX = 95
MATRIX_TYPE_BMATRIX = 96
MATRIX_TYPE_DET = 97
MATRIX_TYPES = 98
CMD_MATRIX_START = 99
CMD_MATRIX_END = 100
CMD_DET_START = 101
CMD_DET_END = 102
MATRIX_DEL_COL = 103
MATRIX_DEL_ROW = 104
MATRIX_XRIGHTARROW = 105
TRANSFORM_EXCHANGE = 106
ROW_OR_COL = 107
ACCENT_OVERLINE = 108
ACCENT_BAR = 109
UNDERSCORE = 110
CARET = 111
COLON = 112
SEMICOLON = 113
COMMA = 114
PERIOD = 115
DIFFERENTIAL = 116
EXP_E = 117
E_NOTATION_E = 118
LETTER_NO_E = 119
NUMBER = 120
E_NOTATION = 121
ASSIGNMENT = 122
EQUAL = 123
LT = 124
LTE = 125
GT = 126
GTE = 127
UNEQUAL = 128
BANG = 129
PERCENT_NUMBER = 130
GREEK_CMD = 131
SYMBOL = 132
VARIABLE = 133
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'^T'", u"'''", | |
json.load(setup)
else:
with open("POSCAR", "r") as poscar:
for line_ind, line in enumerate(poscar):
if line_ind in [5, 6]:
no_of_atoms = sum([int(atom_no) for atom_no in re.findall("[0-9]+", line)])
if no_of_atoms > 0: break
raw_argv_dict = {key.lower(): value for key, value in [argv.split(":") for argv in argv_list[1:]]}
argv_dict = {}
try:
argv_dict["NL_start"] = int(raw_argv_dict.get("--nl_start", 10))
except:
print(__doc__)
raise Exception("The value passed to --NL_start should be an integer. See more in the above document")
try:
argv_dict["NL_end"] = int(raw_argv_dict.get("--nl_end", 60))
except:
print(__doc__)
raise Exception("The value passed to --NL_end should be an integer. See more in the above document")
try:
argv = raw_argv_dict.get("--dn", "2")
items = re.findall("[0-9]+", argv)
if len(items) == 1:
assert int(items[0]) > 0
if argv == items[0]:
argv = [int(argv)] * 3
elif argv.lower() == ("any_" + items[0]):
argv = {"any": int(items[0])}
else:
assert 1 == 2
elif len(items) == 3:
assert argv == "_".join(items)
argv = [int(item) for item in items]
assert False not in [item >= 0 for item in argv]
assert True in [item > 0 for item in argv]
else:
assert 1 == 2
argv_dict["dN"] = argv
except:
print(__doc__)
raise Exception("Fail to parse --dN. See the above document to ensure it is set properly")
try:
argv_dict["max_no_of_points"] = int(raw_argv_dict.get("--max_no_of_points", 10))
assert argv_dict["max_no_of_points"] > 0
except:
print(__doc__)
raise Exception("Fail to parse --max_no_of_points or its value is not positive. See more in the above document.")
try:
argv_dict["opt_nl_if_conv_failed"] = int(raw_argv_dict.get("--opt_nl_if_conv_failed", 60))
assert argv_dict["opt_nl_if_conv_failed"] >= 60
except:
print(__doc__)
raise Exception("The value passed to --opt_nl_if_conv_failed should be an integer >= 60 (default: 60). See more in the above document.")
convergence= raw_argv_dict.get("--convergence", "1meV/atom").lower()
argv_dict["convergence_unit"] = "ev"
if "mev/atom" in convergence:
argv_dict["convergence"] = float(convergence.split("mev/atom")[0])*no_of_atoms/1000.
elif "ev/atom" in convergence:
argv_dict["convergence"] = float(convergence.split("ev/atom")[0])*no_of_atoms
elif "mev" in convergence:
argv_dict["convergence"] = float(convergence.split("mev")[0])/1000.
elif "ev" in convergence:
argv_dict["convergence"] = float(convergence.split("ev"))
else:
print(__doc__)
raise Exception("The energy convergence criterion should be set by '--convergence=AB', where A is a number and B should be ev, mev, ev/atom or mev/atom.\nSee more in the above document")
if "--convergence_type" in raw_argv_dict.keys():
convergence_type = raw_argv_dict["--convergence_type"].lower()
if convergence_type.startswith("chg"):
argv_dict["convergence_type"] = "chg"
elif convergence_type.startswith("aver"):
argv_dict["convergence_type"] = "aver"
else:
print(__doc__)
raise Exception("The value passed to --convergence_type should be either 'chg' or 'aver'. See the above document for more details.")
else:
argv_dict["convergence_type"] = "aver"
try:
argv_dict["no_of_consecutive_convergences"] = int(raw_argv_dict.get("--no_of_consecutive_convergences", 3))
assert argv_dict["no_of_consecutive_convergences"] >= 2
except:
print(__doc__)
raise Exception("Fail to parse --no_of_consecutive_convergences or its value is incorrect. See the above document to ensure that it is set properly.")
try:
argv_dict["which"] = int(raw_argv_dict.get("--which", 2))
assert argv_dict["which"] <= argv_dict["no_of_consecutive_convergences"] + 1
except:
print(__doc__)
raise Exception("Fail to parse --which or its value is invalid. See the above document to ensure it is set properly.")
try:
argv_dict["max_vacuum_thickness"] = raw_argv_dict["--max_vacuum_thickness"]
argv_dict["max_vacuum_thickness"] = [int(thickness) for thickness in argv_dict["max_vacuum_thickness"].split("_")[:3]]
except:
print(__doc__)
raise Exception("You must set --max_vacuum_thickness. See the above document to ensure that it is set properly.")
kmesh_type = raw_argv_dict.get("--kmesh_type", "Auto").lower()
if kmesh_type.startswith("g"):
kmesh_type = "Gamma"
elif kmesh_type.startswith("m"):
kmesh_type = "Monkhorst-Pack"
elif kmesh_type.startswith("a"):
kmesh_type = "Auto"
else:
print(__doc__)
raise Exception("Fail to parse --kmesh_type. See the above document to ensure that it is set properly..")
argv_dict["kmesh_type"] = kmesh_type
try:
argv_dict["shift"] = [int(st) if st=="0" else float(st) for st in raw_argv_dict.get("--shift", "0_0_0").split("_")[:3]]
except:
print(__doc__)
raise Exception("Fail to parse --shift. See the above document to ensure that it is set properly.")
try:
argv_dict["symprec_latt_const"] = float(raw_argv_dict.get("--symprec_latt_const", 0.1))
except:
print(__doc__)
raise Exception("Fail to parse --symprec_latt_const. See the above document to ensure that it is set properly.")
try:
argv_dict["symprec_angle"] = float(raw_argv_dict.get("--symprec_angle", 1))
except:
print(__doc__)
raise Exception("Fail to parse --symprec_angle. See the above document to ensure that it is set properly.")
argv_dict["extra_copy"] = [file for file in raw_argv_dict.get("--extra_copy", "").split("+") if file]
for file in argv_dict["extra_copy"]:
assert os.path.isfile(file), "{} doesn't exist under {}".format(file, os.getcwd())
for std_vasp_input in ["INCAR", "POTCAR", "POSCAR", "KPOINTS"]:
assert not file.endswith(std_vasp_input), "INCAR, POTCAR, POSCAR and KPOINTS will be copied implicitly. Don't set them via --extra_copy"
with open("kpoints_convergence_setup.json", "w") as setup:
json.dump(argv_dict, setup, indent=4)
input_kwargvs = {}
for key in ["kmesh_type", "shift", "max_vacuum_thickness", "symprec_latt_const", "symprec_angle"]:
input_kwargvs[key] = argv_dict[key]
input_kwargvs["cal_loc"] = "."
NL_list, kpoints_setup_list, NL, dN, NL_end = [], [], argv_dict["NL_start"], argv_dict["dN"], argv_dict["NL_end"]
is_it_0D = False
while NL <= NL_end:
automatic_kmesh = VaspAutomaticKMesh(NL=NL, **input_kwargvs)#.get_kpoints_setup()
kpoints_setup = automatic_kmesh.get_kpoints_setup()
if automatic_kmesh.pbc_type_of_xyz == [False, False, False]:
#If it is 0D, KPOINTS must be Gamma point only.
is_it_0D = True
break
optimal_NL = list(kpoints_setup["optimal_NL"].keys())[0]
pbc_subdivision = VaspAutomaticKMesh.get_pbc_sublist(kpoints_setup["subdivisions"], kpoints_setup["pbc_type_of_xyz"])
is_NL_unique = False
if NL_list == []:
is_NL_unique = True
if isinstance(argv_dict["dN"], list):
pbc_dN_list = VaspAutomaticKMesh.get_pbc_sublist(argv_dict["dN"], kpoints_setup["pbc_type_of_xyz"])
elif isinstance(argv_dict["dN"], list):
if False not in [dN <= (pbc_div_1 - pbc_div_0) for dN, pbc_div_0, pbc_div_1 in zip(pbc_dN_list, pbc_subdivision_0, pbc_subdivision)]:
is_NL_unique = True
elif isinstance(argv_dict["dN"], dict):
if True in [argv_dict["dN"]["any"] <= (pbc_div_1 - pbc_div_0) for pbc_div_0, pbc_div_1 in zip(pbc_subdivision_0, pbc_subdivision)]:
is_NL_unique = True
#For test only
if False and NL_list == [] and is_NL_unique:
print(kpoints_setup["subdivisions"], NL, optimal_NL, kpoints_setup["equivalent_NL"])
elif False and is_NL_unique:
print(kpoints_setup["subdivisions"], NL, optimal_NL, kpoints_setup["equivalent_NL"], end=" ")
if isinstance(argv_dict["dN"], list):
print([dN <= (pbc_div_1 - pbc_div_0) for dN, pbc_div_0, pbc_div_1 in zip(pbc_dN_list, pbc_subdivision_0, pbc_subdivision)])
else:
print([argv_dict["dN"]["any"] <= (pbc_div_1 - pbc_div_0) for pbc_div_0, pbc_div_1 in zip(pbc_subdivision_0, pbc_subdivision)])
NL = max(kpoints_setup["equivalent_NL"]) + 1
if is_NL_unique:
pbc_subdivision_0 = pbc_subdivision
kpoints_setup["NL"] = optimal_NL
kpoints_setup_list.append(kpoints_setup)
NL_list.append(optimal_NL)
argv_dict["opt_kpoints_setup_if_conv_failed"] = VaspAutomaticKMesh(NL=argv_dict["opt_nl_if_conv_failed"], **input_kwargvs).get_kpoints_setup()
argv_dict["NL_list"] = NL_list[:min([len(NL_list), argv_dict["max_no_of_points"]])]
argv_dict["kpoints_setup_list"] = kpoints_setup_list[:min([len(NL_list), argv_dict["max_no_of_points"]])]
argv_dict["is_it_0D"] = is_it_0D
assert is_it_0D == False, "Given the parameters in kpoints_convergence_setup.json, this is zero-dimensional material. No need to test total energy convergence w.r.t. KPOINTS."
argv_dict["is_nl_end_included"] = (argv_dict["NL_list"][-1] == NL_list[-1])
sub_dir_creation_summary_dict = {"extra_copy_to_sub_dir": [os.path.split(file)[1] for file in argv_dict["extra_copy"]]}
sub_dir_creation_summary_dict["sub_dir_name_list"] = ["NL_" + str(NL) for NL in argv_dict["NL_list"]]
with open("sub_dir_creation_summary.json", "w") as summary_df:
json.dump(sub_dir_creation_summary_dict, summary_df, indent=4)
return argv_dict
# In[6]:
def prepare_cal_files(argv_dict):
if argv_dict["opt_nl_if_conv_failed"] not in argv_dict["NL_list"]:
kpoints_setup_list = argv_dict["kpoints_setup_list"] + [argv_dict["opt_kpoints_setup_if_conv_failed"]]
NL_list = argv_dict["NL_list"] + [argv_dict["opt_nl_if_conv_failed"]]
is_opt_nl_if_conv_failed_appended = True
else:
kpoints_setup_list = argv_dict["kpoints_setup_list"]
NL_list = argv_dict["NL_list"]
is_opt_nl_if_conv_failed_appended = False
for kpoints_setup, NL in zip(kpoints_setup_list, NL_list):
is_preparation_needed = True
sub_dir_name = "NL_" + str(NL)
if not os.path.isdir(sub_dir_name):
os.mkdir(sub_dir_name)
else:
file_list = os.listdir(sub_dir_name)
for filename in file_list:
if filename.startswith("__") and filename.endswith("__"):
#The presence of any HTC signal file indicates that the sub-dir VASP calculation input files were prepared.
is_preparation_needed = False
break
if is_preparation_needed:
if os.path.isfile(os.path.join(sub_dir_name, "opt_nl_if_conv_failed")):
if is_opt_nl_if_conv_failed_appended:
pass
else:
open(os.path.join(sub_dir_name, "__ready__"), "w").close()
print("{}: The VASP input files are already ready. Just create __ready__".format(sub_dir_name))
else:
shutil.copy("POSCAR", os.path.join(sub_dir_name, "POSCAR"))
shutil.copy("KPOINTS", os.path.join(sub_dir_name, "KPOINTS"))
shutil.copy("POTCAR", os.path.join(sub_dir_name, "POTCAR"))
shutil.copy("INCAR", os.path.join(sub_dir_name, "INCAR"))
if argv_dict["extra_copy"]:
for file in argv_dict["extra_copy"]:
shutil.copy2(file, sub_dir_name)
print("Create sub-dir {} and copy the following files to it: INCAR, POSCAR, POTCAR, KPOINTS, ".format(sub_dir_name), end=" ")
[print(extra_file, end=" ") for extra_file in argv_dict["extra_copy"]]
print(" && write KPOINTS under {}: ".format(sub_dir_name), end=" ")
#The detail of KPOINTS will be printed by the bellow function.
VaspAutomaticKMesh.write_KPOINTS(kpoints_setup=kpoints_setup, cal_loc=sub_dir_name)
if is_opt_nl_if_conv_failed_appended and NL_list[-1] == NL:
open(os.path.join(sub_dir_name, "opt_nl_if_conv_failed"), "w").close()
else:
open(os.path.join(sub_dir_name, "__ready__"), "w").close()
if not is_opt_nl_if_conv_failed_appended and argv_dict["opt_nl_if_conv_failed"] == NL:
open(os.path.join(sub_dir_name, "opt_nl_if_conv_failed"), "w").close()
# In[2]:
def read_and_write_no_to(filename, read_mode=True, no=0):
if read_mode:
if not os.path.isfile(filename):
return 0
with open(filename, "r") as f:
no = list(f)[0].strip("\n").strip()
return int(no)
else:
with open(filename, "w") as f:
f.write(str(no))
# In[3]:
def are_all_sub_dir_cal_finished(argv_dict):
for NL in argv_dict["NL_list"]:
sub_dir_name = "NL_" + str(NL)
if True not in [os.path.isfile(os.path.join(sub_dir_name, target_file)) for target_file in
["__done__", "__skipped__", "__done_cleaned_analyzed__", "__done_failed_to_clean_analyze__"]]:
return False
with open(os.path.join(sub_dir_name, "OSZICAR"), "r") as oszicar:
for line in oszicar:
pass
incomplete_oszicar_counter = os.path.join(sub_dir_name, "_no_of_incomplete_OSZICAR_")
if "F=" not in line or "E0=" not in line:
no = read_and_write_no_to(filename=incomplete_oszicar_counter, read_mode=True)
if no <= 5:
read_and_write_no_to(filename=incomplete_oszicar_counter, read_mode=False, no=no+1)
else:
open("__manual__", "w").close()
print("Although the calculation under {} finished, OSZICAR is found incomplete 5 times. Please check.".format(sub_dir_name))
print("Create __manual__")
return False
else:
if os.path.isfile(incomplete_oszicar_counter):
os.remove(incomplete_oszicar_counter)
return True
# In[15]:
def find_converged_NL(argv_dict):
""" Find the converged Nk_IRBZ w.r.t. the total Energy E0 in OSZICAR.
Two different cases:
1. convergence_type:chg,
If there are NCC consencutive absolute changes in the | |
<reponame>RobSpringer/xls<filename>xls/contrib/xlscc/build_rules/xlscc_rules.bzl
# Copyright 2021 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build rules to compile with xlscc"""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(
"//xls/build_rules:xls_common_rules.bzl",
"append_default_to_args",
"args_to_string",
"get_output_filename_value",
"is_args_valid",
)
load(
"//xls/build_rules:xls_config_rules.bzl",
"CONFIG",
"enable_generated_file_wrapper",
)
load("//xls/build_rules:xls_providers.bzl", "ConvIRInfo")
load(
"//xls/build_rules:xls_ir_rules.bzl",
"append_xls_ir_opt_ir_generated_files",
"get_xls_ir_opt_ir_generated_files",
"xls_ir_opt_ir_attrs",
"xls_ir_opt_ir_impl",
)
load(
"//xls/build_rules:xls_codegen_rules.bzl",
"append_xls_ir_verilog_generated_files",
"get_xls_ir_verilog_generated_files",
"xls_ir_verilog_attrs",
"xls_ir_verilog_impl",
)
load("//xls/build_rules:xls_toolchains.bzl", "xls_toolchain_attr")
_CC_FILE_EXTENSION = ".cc"
_H_FILE_EXTENSION = ".h"
_INC_FILE_EXTENSION = ".inc"
_IR_FILE_EXTENSION = ".ir"
_PROTOBIN_FILE_EXTENSION = ".protobin"
_BINARYPB_FILE_EXTENSION = ".binarypb"
_DEFAULT_XLSCC_ARGS = {
"dump_ir_only": "True",
"top": "Run",
}
def _append_xls_cc_ir_generated_files(args, basename):
"""Returns a dictionary of arguments appended with filenames generated by the 'xls_cc_ir' rule.
Args:
args: A dictionary of arguments.
basename: The file basename.
Returns:
Returns a dictionary of arguments appended with filenames generated by the 'xls_cc_ir' rule.
"""
args.setdefault("ir_file", basename + _IR_FILE_EXTENSION)
return args
def _get_xls_cc_ir_generated_files(args):
"""Returns a list of filenames generated by the 'xls_cc_ir' rule found in 'args'.
Args:
args: A dictionary of arguments.
Returns:
Returns a list of files generated by the 'xls_cc_ir' rule found in 'args'.
"""
return [args.get("ir_file")]
def _get_runfiles_for_xls_cc_ir(ctx):
"""Returns the runfiles from a 'xls_cc_ir' ctx.
Args:
ctx: The current rule's context object.
Returns:
The runfiles from a 'xls_cc_ir' ctx.
"""
transitive_runfiles = []
runfiles = ctx.runfiles(files = [ctx.file.src] + [ctx.file.block] +
ctx.files._default_cc_header_files +
ctx.files._default_synthesis_header_files +
ctx.files.src_deps)
transitive_runfiles.append(ctx.attr
._xlscc_tool[DefaultInfo].default_runfiles)
transitive_runfiles.append(ctx.attr
._default_cc_header_files[DefaultInfo].default_runfiles)
transitive_runfiles.append(ctx.attr
._default_synthesis_header_files[DefaultInfo].default_runfiles)
for dep in ctx.attr.src_deps:
transitive_runfiles.append(dep[DefaultInfo].default_runfiles)
runfiles = runfiles.merge_all(transitive_runfiles)
return runfiles
def _get_transitive_built_files_for_xls_cc_ir(ctx):
"""Returns the transitive built files from a 'xls_cc_ir' ctx.
Args:
ctx: The current rule's context object.
Returns:
The transitive built files from a 'xls_cc_ir' ctx.
"""
transitive_built_files = []
transitive_built_files.append(ctx.attr.src[DefaultInfo].files)
transitive_built_files.append(ctx.attr.block[DefaultInfo].files)
transitive_built_files.append(ctx.attr._xlscc_tool[DefaultInfo].files)
transitive_built_files.append(ctx.attr
._default_cc_header_files[DefaultInfo].files)
transitive_built_files.append(ctx.attr
._default_synthesis_header_files[DefaultInfo].files)
for dep in ctx.attr.src_deps:
transitive_built_files.append(dep[DefaultInfo].files)
if not transitive_built_files:
return None
return transitive_built_files
def _xls_cc_ir_impl(ctx):
"""The implementation of the 'xls_cc_ir' rule.
Converts a C/C++ source file to an IR file.
Args:
ctx: The current rule's context object.
Returns:
A tuple with the following elements in the order presented:
1. The ConvIRInfo provider
1. The list of built files.
1. The runfiles.
"""
XLSCC_FLAGS = (
"module_name",
"block_pb",
"top",
"package",
"clang_args_file",
"defines",
"include_dirs",
"meta_out",
"dump_ir_only",
)
xlscc_args = append_default_to_args(
ctx.attr.xlscc_args,
_DEFAULT_XLSCC_ARGS,
)
# Append to user paths.
xlscc_args["include_dirs"] = (
xlscc_args.get("include_dirs", "") + ",${PWD},./," +
ctx.genfiles_dir.path + "," + ctx.bin_dir.path + "," +
"xls/contrib/xlscc/synth_only," +
"xls/contrib/xlscc/synth_only/ac_compat," +
ctx.attr._default_cc_header_files.label.workspace_root # This must the last directory in the list.
)
# Append to user defines.
xlscc_args["defines"] = (
xlscc_args.get("defines", "") + "__SYNTHESIS__," +
"__AC_OVERRIDE_OVF_UPDATE_BODY=,__AC_OVERRIDE_OVF_UPDATE2_BODY="
)
is_args_valid(xlscc_args, XLSCC_FLAGS)
my_args = args_to_string(xlscc_args)
ir_filename = get_output_filename_value(
ctx,
"ir_file",
ctx.attr.name + _IR_FILE_EXTENSION,
)
ir_file = ctx.actions.declare_file(ir_filename)
# Get runfiles
runfiles = _get_runfiles_for_xls_cc_ir(ctx)
ctx.actions.run_shell(
outputs = [ir_file],
# The IR converter executable is a tool needed by the action.
tools = [ctx.executable._xlscc_tool],
# The files required for converting the C/C++ source file.
inputs = runfiles.files,
command = "{} {} --block_pb {} {} > {}".format(
ctx.executable._xlscc_tool.path,
ctx.file.src.path,
ctx.file.block.path,
my_args,
ir_file.path,
),
mnemonic = "ConvertXLSCC",
progress_message = "Converting XLSCC file: %s" % (ctx.file.src.path),
)
return [ConvIRInfo(conv_ir_file = ir_file), [ir_file], runfiles]
_xls_cc_ir_attrs = {
"src": attr.label(
doc = "The C/C++ source file containing the top level block. A " +
"single source file must be provided. The file must have a '" +
_CC_FILE_EXTENSION + "' extension.",
mandatory = True,
allow_single_file = [_CC_FILE_EXTENSION],
),
"block": attr.label(
doc = "Protobuf describing top-level block interface. A single " +
"source file single source file must be provided. The file " +
"must have a '" + _PROTOBIN_FILE_EXTENSION + "' or a '" +
_BINARYPB_FILE_EXTENSION + "' extension.",
mandatory = True,
allow_single_file = [
_PROTOBIN_FILE_EXTENSION,
_BINARYPB_FILE_EXTENSION,
],
),
"src_deps": attr.label_list(
doc = "Additional source files for the rule. The file must have a " +
_CC_FILE_EXTENSION + ", " + _H_FILE_EXTENSION + " or " +
_INC_FILE_EXTENSION + " extension.",
allow_files = [
_CC_FILE_EXTENSION,
_H_FILE_EXTENSION,
_INC_FILE_EXTENSION,
],
),
"xlscc_args": attr.string_dict(
doc = "Arguments of the XLSCC conversion tool.",
),
"ir_file": attr.output(
doc = "Filename of the generated IR. If not specified, the " +
"target name of the bazel rule followed by an " +
_IR_FILE_EXTENSION + " extension is used.",
),
"_xlscc_tool": attr.label(
doc = "The target of the XLSCC executable.",
default = Label("//xls/contrib/xlscc:xlscc"),
allow_single_file = True,
executable = True,
cfg = "exec",
),
"_default_cc_header_files": attr.label(
doc = "Default C/C++ header files for xlscc.",
default = Label("@com_github_hlslibs_ac_types//:ac_types_as_data"),
cfg = "target",
),
"_default_synthesis_header_files": attr.label(
doc = "Default synthesis header files for xlscc.",
default = Label("//xls/contrib/xlscc:synth_only_headers"),
cfg = "target",
),
}
def _xls_cc_ir_impl_wrapper(ctx):
"""The implementation of the 'xls_cc_ir' rule.
Wrapper for xls_cc_ir_impl. See: xls_cc_ir_impl.
Args:
ctx: The current rule's context object.
Returns:
ConvIRInfo provider
DefaultInfo provider
"""
ir_conv_info, built_files, runfiles = _xls_cc_ir_impl(ctx)
return [
ir_conv_info,
DefaultInfo(
files = depset(
direct = built_files,
transitive = _get_transitive_built_files_for_xls_cc_ir(ctx),
),
runfiles = runfiles,
),
]
xls_cc_ir = rule(
doc = """A build rule that converts a C/C++ source file to an IR file.
Examples:
1) A simple IR conversion example. Assume target 'a_block_pb' is
defined.
```
xls_cc_ir(
name = "a_ir",
src = "a.cc",
block = ":a_block_pb",
)
```
""",
implementation = _xls_cc_ir_impl_wrapper,
attrs = dicts.add(
_xls_cc_ir_attrs,
CONFIG["xls_outs_attrs"],
),
)
def xls_cc_ir_macro(
name,
src,
block,
src_deps = [],
xlscc_args = {},
enable_generated_file = True,
enable_presubmit_generated_file = False,
**kwargs):
"""A macro that instantiates a build rule generating an IR file from a C/C++ source file.
The macro instantiates a rule that converts a C/C++ source file to an IR
file and the 'enable_generated_file_wrapper' function. The generated files
are listed in the outs attribute of the rule.
Examples:
1) A simple IR conversion example. Assume target 'a_block_pb' is defined.
```
xls_cc_ir(
name = "a_ir",
src = "a.cc",
block = ":a_block_pb",
)
```
Args:
name: The name of the rule.
src: The C/C++ source file containing the top level block. A single source
file must be provided. The file must have a '.cc' extension.
block: Protobuf describing top-level block interface. A single source file
single source file must be provided. The file must have a '.protobin'
or a '.binarypb' extension.
src_deps: Additional source files for the rule. The file must have a
'.cc', '.h' or '.inc' extension.
xlscc_args: Arguments of the XLSCC conversion tool.
enable_generated_file: See 'enable_generated_file' from
'enable_generated_file_wrapper' function.
enable_presubmit_generated_file: See 'enable_presubmit_generated_file'
from 'enable_generated_file_wrapper' function.
**kwargs: Keyword arguments. Named arguments.
"""
# Type check input
if type(name) != type(""):
fail("Argument 'name' must be of string type.")
if type(src) != type(""):
fail("Argument 'src' must be of string type.")
if type(block) != type(""):
fail("Argument 'block' must be of string type.")
if type(src_deps) != type([]):
fail("Argument 'src_deps' must be of list type.")
if type(xlscc_args) != type({}):
fail("Argument 'xlscc_args' must be of dictionary type.")
if type(enable_generated_file) != type(True):
fail("Argument 'enable_generated_file' must be of boolean type.")
if type(enable_presubmit_generated_file) != type(True):
fail("Argument 'enable_presubmit_generated_file' must be " +
"of boolean type.")
# Append output files to arguments.
kwargs = _append_xls_cc_ir_generated_files(kwargs, name)
xls_cc_ir(
name = name,
src = src,
block = block,
src_deps = src_deps,
xlscc_args = xlscc_args,
outs = _get_xls_cc_ir_generated_files(kwargs),
**kwargs
)
enable_generated_file_wrapper(
wrapped_target = name,
enable_generated_file = enable_generated_file,
enable_presubmit_generated_file = enable_presubmit_generated_file,
**kwargs
)
def _xls_cc_verilog_impl(ctx):
"""The implementation of the 'xls_cc_verilog' rule.
Converts a C/C++ file to an IR, optimizes the IR, and generates a verilog
file from the optimized IR.
Args:
ctx: The current rule's context object.
Returns:
ConvIRInfo provider.
OptIRInfo provider.
CodegenInfo provider.
DefaultInfo provider.
"""
ir_conv_info, ir_conv_built_files, ir_conv_runfiles = _xls_cc_ir_impl(ctx)
ir_opt_info, opt_ir_built_files, opt_ir_runfiles = xls_ir_opt_ir_impl(
ctx,
ir_conv_info.conv_ir_file,
)
codegen_info, verilog_built_files, verilog_runfiles = xls_ir_verilog_impl(
ctx,
ir_opt_info.opt_ir_file,
)
runfiles = ir_conv_runfiles.merge_all([opt_ir_runfiles, verilog_runfiles])
return [
ir_conv_info,
ir_opt_info,
codegen_info,
DefaultInfo(
files = depset(
direct = ir_conv_built_files + opt_ir_built_files +
verilog_built_files,
transitive = _get_transitive_built_files_for_xls_cc_ir(ctx),
),
runfiles = runfiles,
),
]
_cc_verilog_attrs = dicts.add(
_xls_cc_ir_attrs,
xls_ir_opt_ir_attrs,
xls_ir_verilog_attrs,
CONFIG["xls_outs_attrs"],
xls_toolchain_attr,
)
xls_cc_verilog = rule(
doc = """A build rule that generates a Verilog file from a C/C++ | |
retrieve the preferred
lifetime value of a DHCP IPv6 Network object.
range_templates: The list of IPv6 address range templates assigned
to this IPv6 network template object. When you create an IPv6
network based on an IPv6 network template object that contains
IPv6 range templates, the IPv6 address ranges are created based
on the associated IPv6 address range templates.
recycle_leases: If the field is set to True, the leases are kept in
the Recycle Bin until one week after expiration. Otherwise, the
leases are permanently deleted.
rir: The registry (RIR) that allocated the IPv6 network address
space.
rir_organization: The RIR organization associated with the IPv6
network.
rir_registration_action: The action for the RIR registration.
rir_registration_status: The registration status of the IPv6 network
in RIR.
send_rir_request: Determines whether to send the RIR registration
request.
update_dns_on_lease_renewal: This field controls whether the DHCP
server updates DNS when a DHCP lease is renewed.
use_ddns_domainname: Use flag for: ddns_domainname
use_ddns_enable_option_fqdn: Use flag for: ddns_enable_option_fqdn
use_ddns_generate_hostname: Use flag for: ddns_generate_hostname
use_ddns_ttl: Use flag for: ddns_ttl
use_domain_name: Use flag for: domain_name
use_domain_name_servers: Use flag for: domain_name_servers
use_enable_ddns: Use flag for: enable_ddns
use_options: Use flag for: options
use_preferred_lifetime: Use flag for: preferred_lifetime
use_recycle_leases: Use flag for: recycle_leases
use_update_dns_on_lease_renewal: Use flag for:
update_dns_on_lease_renewal
use_valid_lifetime: Use flag for: valid_lifetime
valid_lifetime: Use this method to set or retrieve the valid
lifetime value of a DHCP IPv6 Network object.
"""
_infoblox_type = 'ipv6networktemplate'
_fields = ['allow_any_netmask', 'auto_create_reversezone', 'cidr',
'cloud_api_compatible', 'comment', 'ddns_domainname',
'ddns_enable_option_fqdn', 'ddns_generate_hostname',
'ddns_server_always_updates', 'ddns_ttl', 'delegated_member',
'domain_name', 'domain_name_servers', 'enable_ddns', 'extattrs',
'fixed_address_templates', 'ipv6prefix', 'members', 'name',
'options', 'preferred_lifetime', 'range_templates',
'recycle_leases', 'rir', 'rir_organization',
'rir_registration_action', 'rir_registration_status',
'send_rir_request', 'update_dns_on_lease_renewal',
'use_ddns_domainname', 'use_ddns_enable_option_fqdn',
'use_ddns_generate_hostname', 'use_ddns_ttl', 'use_domain_name',
'use_domain_name_servers', 'use_enable_ddns', 'use_options',
'use_preferred_lifetime', 'use_recycle_leases',
'use_update_dns_on_lease_renewal', 'use_valid_lifetime',
'valid_lifetime']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'ipv6prefix', 'name',
'rir_organization']
_all_searchable_fields = ['comment', 'ipv6prefix', 'name', 'rir',
'rir_organization']
_return_fields = ['comment', 'extattrs', 'name']
_remap = {}
_shadow_fields = ['_ref']
_ip_version = 6
_custom_field_processing = {
'members': Dhcpmember.from_dict,
'options': Dhcpoption.from_dict,
}
class IPRange(InfobloxObject):
@classmethod
def get_v4_class(cls):
return IPRangeV4
@classmethod
def get_v6_class(cls):
return IPRangeV6
class IPRangeV4(IPRange):
""" IPRangeV4: DHCP Range object.
Corresponds to WAPI object 'range'
A DHCP range defines the specified range of IP addresses in a
network. A DHCP range should be added for a network so the Infoblox
appliance can assign IP addresses within that specified range to
DHCP clients. If the client is on a network that is assigned a DHCP
range, the device distributes an available IP address from that
range to the DHCP client, or to a DHCP relay agent if the request
came through an agent. The DHCP range should also be assigned with a
device. If devices are in a grid, the particular member serving DHCP
for the DHCP range must be specified. If the server is an
independent device, this device must be specified as the member that
serves the DHCP range.
Fields:
always_update_dns: This field controls whether only the DHCP server
is allowed to update DNS, regardless of the DHCP clients
requests.
bootfile: The bootfile name for the range. You can configure the
DHCP server to support clients that use the boot file name
option in their DHCPREQUEST messages.
bootserver: The bootserver address for the range. You can specify
the name and/or IP address of the boot server that the host
needs to boot.The boot server IPv4 Address or name in FQDN
format.
cloud_info: Structure containing all cloud API related information
for this object.
comment: Comment for the range; maximum 256 characters.
ddns_domainname: The dynamic DNS domain name the appliance uses
specifically for DDNS updates for this range.
ddns_generate_hostname: If this field is set to True, the DHCP
server generates a hostname and updates DNS with it when the
DHCP client request does not contain a hostname.
deny_all_clients: If True, send NAK forcing the client to take the
new address.
deny_bootp: If set to true, BOOTP settings are disabled and BOOTP
requests will be denied.
dhcp_utilization: The percentage of the total DHCP utilization of
the range multiplied by 1000. This is the percentage of the
total number of available IP addresses belonging to the range
versus the total number of all IP addresses in the range.
dhcp_utilization_status: A string describing the utilization level
of the range.
disable: Determines whether a range is disabled or not. When this is
set to False, the range is enabled.
discover_now_status: Discover now status for this range.
discovery_basic_poll_settings: The discovery basic poll settings for
this range.
discovery_blackout_setting: The discovery blackout setting for this
range.
discovery_member: The member that will run discovery for this range.
dynamic_hosts: The total number of DHCP leases issued for the range.
email_list: The e-mail lists to which the appliance sends DHCP
threshold alarm e-mail messages.
enable_ddns: The dynamic DNS updates flag of a DHCP range object. If
set to True, the DHCP server sends DDNS updates to DNS servers
in the same Grid, and to external DNS servers.
enable_dhcp_thresholds: Determines if DHCP thresholds are enabled
for the range.
enable_discovery: Determines whether a discovery is enabled or not
for this range. When this is set to False, the discovery for
this range is disabled.
enable_email_warnings: Determines if DHCP threshold warnings are
sent through email.
enable_ifmap_publishing: Determines if IFMAP publishing is enabled
for the range.
enable_immediate_discovery: Determines if the discovery for the
range should be immediately enabled.
enable_pxe_lease_time: Set this to True if you want the DHCP server
to use a different lease time for PXE clients.
enable_snmp_warnings: Determines if DHCP threshold warnings are send
through SNMP.
end_addr: The IPv4 Address end address of the range.
endpoint_sources: The endpoints that provides data for the DHCP
Range object.
exclude: These are ranges of IP addresses that the appliance does
not use to assign to clients. You can use these exclusion
addresses as static IP addresses. They contain the start and end
addresses of the exclusion range, and optionally, information
about this exclusion range.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
failover_association: The name of the failover association: the
server in this failover association will serve the IPv4 range in
case the main server is out of service.
fingerprint_filter_rules: This field contains the fingerprint
filters for this DHCP range.The appliance uses matching rules in
these filters to select the address range from which it assigns
a lease.
high_water_mark: The percentage of DHCP range usage threshold above
which range usage is not expected and may warrant your
attention. When the high watermark is reached, the Infoblox
appliance generates a syslog message and sends a warning (if
enabled).A number that specifies the percentage of allocated
addresses. The range is from 1 to 100.
high_water_mark_reset: The percentage of DHCP range usage below
which the corresponding SNMP trap is reset.A number that
specifies the percentage of allocated addresses. The range is
from 1 to 100. The high watermark reset value must be lower than
the high watermark value.
ignore_dhcp_option_list_request: If this field is set to False, the
appliance returns all DHCP options the client is eligible to
receive, rather than only the list of options the client has
requested.
ignore_id: Indicates whether the appliance will ignore DHCP client
IDs or MAC addresses. Valid values are "NONE", "CLIENT", or
"MACADDR". The default is "NONE".
ignore_mac_addresses: A list of MAC addresses the appliance will
ignore.
is_split_scope: This field will be 'true' if this particular range
is part of a split scope.
known_clients: Permission for known clients. This can be 'Allow' or
'Deny'. If set to 'Deny' known clients will be denied IP
addresses.Known clients include roaming hosts and clients with
fixed addresses or DHCP host entries. Unknown clients include
clients that are not roaming hosts and clients that do not have
fixed addresses or DHCP host entries.
lease_scavenge_time: An integer that specifies the period of time
(in seconds) that frees and backs up leases remained | |
return vimagemodule.VImage_tan(*args)
def andimage(*args): return vimagemodule.VImage_andimage(*args)
def orimage(*args): return vimagemodule.VImage_orimage(*args)
def eorimage(*args): return vimagemodule.VImage_eorimage(*args)
def shiftleft(*args): return vimagemodule.VImage_shiftleft(*args)
def shiftright(*args): return vimagemodule.VImage_shiftright(*args)
def greyc(*args): return vimagemodule.VImage_greyc(*args)
def greyc_mask(*args): return vimagemodule.VImage_greyc_mask(*args)
def LCh2Lab(*args): return vimagemodule.VImage_LCh2Lab(*args)
def LCh2UCS(*args): return vimagemodule.VImage_LCh2UCS(*args)
def Lab2LCh(*args): return vimagemodule.VImage_Lab2LCh(*args)
def Lab2LabQ(*args): return vimagemodule.VImage_Lab2LabQ(*args)
def Lab2LabS(*args): return vimagemodule.VImage_Lab2LabS(*args)
def Lab2UCS(*args): return vimagemodule.VImage_Lab2UCS(*args)
def Lab2XYZ(*args): return vimagemodule.VImage_Lab2XYZ(*args)
def Lab2XYZ_temp(*args): return vimagemodule.VImage_Lab2XYZ_temp(*args)
def Lab2disp(*args): return vimagemodule.VImage_Lab2disp(*args)
def LabQ2LabS(*args): return vimagemodule.VImage_LabQ2LabS(*args)
def LabQ2Lab(*args): return vimagemodule.VImage_LabQ2Lab(*args)
def LabQ2XYZ(*args): return vimagemodule.VImage_LabQ2XYZ(*args)
def LabQ2disp(*args): return vimagemodule.VImage_LabQ2disp(*args)
def LabS2LabQ(*args): return vimagemodule.VImage_LabS2LabQ(*args)
def LabS2Lab(*args): return vimagemodule.VImage_LabS2Lab(*args)
def UCS2LCh(*args): return vimagemodule.VImage_UCS2LCh(*args)
def UCS2Lab(*args): return vimagemodule.VImage_UCS2Lab(*args)
def UCS2XYZ(*args): return vimagemodule.VImage_UCS2XYZ(*args)
def XYZ2Lab(*args): return vimagemodule.VImage_XYZ2Lab(*args)
def XYZ2Lab_temp(*args): return vimagemodule.VImage_XYZ2Lab_temp(*args)
def XYZ2UCS(*args): return vimagemodule.VImage_XYZ2UCS(*args)
def XYZ2Yxy(*args): return vimagemodule.VImage_XYZ2Yxy(*args)
def XYZ2disp(*args): return vimagemodule.VImage_XYZ2disp(*args)
def XYZ2sRGB(*args): return vimagemodule.VImage_XYZ2sRGB(*args)
def Yxy2XYZ(*args): return vimagemodule.VImage_Yxy2XYZ(*args)
def dE00_fromLab(*args): return vimagemodule.VImage_dE00_fromLab(*args)
def dECMC_fromLab(*args): return vimagemodule.VImage_dECMC_fromLab(*args)
def dECMC_fromdisp(*args): return vimagemodule.VImage_dECMC_fromdisp(*args)
def dE_fromLab(*args): return vimagemodule.VImage_dE_fromLab(*args)
def dE_fromXYZ(*args): return vimagemodule.VImage_dE_fromXYZ(*args)
def dE_fromdisp(*args): return vimagemodule.VImage_dE_fromdisp(*args)
def disp2Lab(*args): return vimagemodule.VImage_disp2Lab(*args)
def disp2XYZ(*args): return vimagemodule.VImage_disp2XYZ(*args)
def icc_ac2rc(*args): return vimagemodule.VImage_icc_ac2rc(*args)
def icc_export(*args): return vimagemodule.VImage_icc_export(*args)
def icc_export_depth(*args): return vimagemodule.VImage_icc_export_depth(*args)
def icc_import(*args): return vimagemodule.VImage_icc_import(*args)
def icc_import_embedded(*args): return vimagemodule.VImage_icc_import_embedded(*args)
def icc_transform(*args): return vimagemodule.VImage_icc_transform(*args)
def lab_morph(*args): return vimagemodule.VImage_lab_morph(*args)
def sRGB2XYZ(*args): return vimagemodule.VImage_sRGB2XYZ(*args)
def bandjoin(*args): return vimagemodule.VImage_bandjoin(*args)
__swig_getmethods__["black"] = lambda x: vimagemodule.VImage_black
if _newclass:black = staticmethod(vimagemodule.VImage_black)
def c2amph(*args): return vimagemodule.VImage_c2amph(*args)
def c2imag(*args): return vimagemodule.VImage_c2imag(*args)
def c2ps(*args): return vimagemodule.VImage_c2ps(*args)
def c2real(*args): return vimagemodule.VImage_c2real(*args)
def c2rect(*args): return vimagemodule.VImage_c2rect(*args)
def clip2c(*args): return vimagemodule.VImage_clip2c(*args)
def clip2cm(*args): return vimagemodule.VImage_clip2cm(*args)
def clip2d(*args): return vimagemodule.VImage_clip2d(*args)
def clip2dcm(*args): return vimagemodule.VImage_clip2dcm(*args)
def clip2f(*args): return vimagemodule.VImage_clip2f(*args)
def clip2fmt(*args): return vimagemodule.VImage_clip2fmt(*args)
def clip2i(*args): return vimagemodule.VImage_clip2i(*args)
def clip2s(*args): return vimagemodule.VImage_clip2s(*args)
def clip2ui(*args): return vimagemodule.VImage_clip2ui(*args)
def clip2us(*args): return vimagemodule.VImage_clip2us(*args)
def clip(*args): return vimagemodule.VImage_clip(*args)
def copy(*args): return vimagemodule.VImage_copy(*args)
def copy_morph(*args): return vimagemodule.VImage_copy_morph(*args)
def copy_swap(*args): return vimagemodule.VImage_copy_swap(*args)
def copy_set(*args): return vimagemodule.VImage_copy_set(*args)
__swig_getmethods__["csv2vips"] = lambda x: vimagemodule.VImage_csv2vips
if _newclass:csv2vips = staticmethod(vimagemodule.VImage_csv2vips)
def extract_area(*args): return vimagemodule.VImage_extract_area(*args)
def extract_areabands(*args): return vimagemodule.VImage_extract_areabands(*args)
def extract_band(*args): return vimagemodule.VImage_extract_band(*args)
def extract_bands(*args): return vimagemodule.VImage_extract_bands(*args)
def extract(*args): return vimagemodule.VImage_extract(*args)
def falsecolour(*args): return vimagemodule.VImage_falsecolour(*args)
def fliphor(*args): return vimagemodule.VImage_fliphor(*args)
def flipver(*args): return vimagemodule.VImage_flipver(*args)
__swig_getmethods__["gbandjoin"] = lambda x: vimagemodule.VImage_gbandjoin
if _newclass:gbandjoin = staticmethod(vimagemodule.VImage_gbandjoin)
def grid(*args): return vimagemodule.VImage_grid(*args)
def insert(*args): return vimagemodule.VImage_insert(*args)
def insert_noexpand(*args): return vimagemodule.VImage_insert_noexpand(*args)
__swig_getmethods__["jpeg2vips"] = lambda x: vimagemodule.VImage_jpeg2vips
if _newclass:jpeg2vips = staticmethod(vimagemodule.VImage_jpeg2vips)
def lrjoin(*args): return vimagemodule.VImage_lrjoin(*args)
__swig_getmethods__["magick2vips"] = lambda x: vimagemodule.VImage_magick2vips
if _newclass:magick2vips = staticmethod(vimagemodule.VImage_magick2vips)
__swig_getmethods__["mask2vips"] = lambda x: vimagemodule.VImage_mask2vips
if _newclass:mask2vips = staticmethod(vimagemodule.VImage_mask2vips)
def msb(*args): return vimagemodule.VImage_msb(*args)
def msb_band(*args): return vimagemodule.VImage_msb_band(*args)
__swig_getmethods__["png2vips"] = lambda x: vimagemodule.VImage_png2vips
if _newclass:png2vips = staticmethod(vimagemodule.VImage_png2vips)
__swig_getmethods__["exr2vips"] = lambda x: vimagemodule.VImage_exr2vips
if _newclass:exr2vips = staticmethod(vimagemodule.VImage_exr2vips)
__swig_getmethods__["ppm2vips"] = lambda x: vimagemodule.VImage_ppm2vips
if _newclass:ppm2vips = staticmethod(vimagemodule.VImage_ppm2vips)
__swig_getmethods__["analyze2vips"] = lambda x: vimagemodule.VImage_analyze2vips
if _newclass:analyze2vips = staticmethod(vimagemodule.VImage_analyze2vips)
def recomb(*args): return vimagemodule.VImage_recomb(*args)
def replicate(*args): return vimagemodule.VImage_replicate(*args)
def ri2c(*args): return vimagemodule.VImage_ri2c(*args)
def rot180(*args): return vimagemodule.VImage_rot180(*args)
def rot270(*args): return vimagemodule.VImage_rot270(*args)
def rot90(*args): return vimagemodule.VImage_rot90(*args)
def scale(*args): return vimagemodule.VImage_scale(*args)
def scaleps(*args): return vimagemodule.VImage_scaleps(*args)
def rightshift_size(*args): return vimagemodule.VImage_rightshift_size(*args)
def slice(*args): return vimagemodule.VImage_slice(*args)
def subsample(*args): return vimagemodule.VImage_subsample(*args)
def system(*args): return vimagemodule.VImage_system(*args)
def tbjoin(*args): return vimagemodule.VImage_tbjoin(*args)
__swig_getmethods__["text"] = lambda x: vimagemodule.VImage_text
if _newclass:text = staticmethod(vimagemodule.VImage_text)
def thresh(*args): return vimagemodule.VImage_thresh(*args)
__swig_getmethods__["tiff2vips"] = lambda x: vimagemodule.VImage_tiff2vips
if _newclass:tiff2vips = staticmethod(vimagemodule.VImage_tiff2vips)
def vips2csv(*args): return vimagemodule.VImage_vips2csv(*args)
def vips2jpeg(*args): return vimagemodule.VImage_vips2jpeg(*args)
def vips2mask(*args): return vimagemodule.VImage_vips2mask(*args)
def vips2mimejpeg(*args): return vimagemodule.VImage_vips2mimejpeg(*args)
def vips2png(*args): return vimagemodule.VImage_vips2png(*args)
def vips2ppm(*args): return vimagemodule.VImage_vips2ppm(*args)
def vips2tiff(*args): return vimagemodule.VImage_vips2tiff(*args)
def wrap(*args): return vimagemodule.VImage_wrap(*args)
def zoom(*args): return vimagemodule.VImage_zoom(*args)
def addgnoise(*args): return vimagemodule.VImage_addgnoise(*args)
def compass(*args): return vimagemodule.VImage_compass(*args)
def contrast_surface(*args): return vimagemodule.VImage_contrast_surface(*args)
def contrast_surface_raw(*args): return vimagemodule.VImage_contrast_surface_raw(*args)
def conv(*args): return vimagemodule.VImage_conv(*args)
def conv_raw(*args): return vimagemodule.VImage_conv_raw(*args)
def convf(*args): return vimagemodule.VImage_convf(*args)
def convf_raw(*args): return vimagemodule.VImage_convf_raw(*args)
def convsep(*args): return vimagemodule.VImage_convsep(*args)
def convsep_raw(*args): return vimagemodule.VImage_convsep_raw(*args)
def convsepf(*args): return vimagemodule.VImage_convsepf(*args)
def convsepf_raw(*args): return vimagemodule.VImage_convsepf_raw(*args)
def convsub(*args): return vimagemodule.VImage_convsub(*args)
def embed(*args): return vimagemodule.VImage_embed(*args)
def fastcor(*args): return vimagemodule.VImage_fastcor(*args)
def fastcor_raw(*args): return vimagemodule.VImage_fastcor_raw(*args)
__swig_getmethods__["gaussnoise"] = lambda x: vimagemodule.VImage_gaussnoise
if _newclass:gaussnoise = staticmethod(vimagemodule.VImage_gaussnoise)
def grad_x(*args): return vimagemodule.VImage_grad_x(*args)
def grad_y(*args): return vimagemodule.VImage_grad_y(*args)
def gradcor(*args): return vimagemodule.VImage_gradcor(*args)
def gradcor_raw(*args): return vimagemodule.VImage_gradcor_raw(*args)
def gradient(*args): return vimagemodule.VImage_gradient(*args)
__swig_getmethods__["rank_image"] = lambda x: vimagemodule.VImage_rank_image
if _newclass:rank_image = staticmethod(vimagemodule.VImage_rank_image)
def lindetect(*args): return vimagemodule.VImage_lindetect(*args)
__swig_getmethods__["maxvalue"] = lambda x: vimagemodule.VImage_maxvalue
if _newclass:maxvalue = staticmethod(vimagemodule.VImage_maxvalue)
def mpercent(*args): return vimagemodule.VImage_mpercent(*args)
def phasecor_fft(*args): return vimagemodule.VImage_phasecor_fft(*args)
def rank(*args): return vimagemodule.VImage_rank(*args)
def rank_raw(*args): return vimagemodule.VImage_rank_raw(*args)
def resize_linear(*args): return vimagemodule.VImage_resize_linear(*args)
def sharpen(*args): return vimagemodule.VImage_sharpen(*args)
def shrink(*args): return vimagemodule.VImage_shrink(*args)
def spcor(*args): return vimagemodule.VImage_spcor(*args)
def spcor_raw(*args): return vimagemodule.VImage_spcor_raw(*args)
def stretch3(*args): return vimagemodule.VImage_stretch3(*args)
def zerox(*args): return vimagemodule.VImage_zerox(*args)
__swig_getmethods__["create_fmask"] = lambda x: vimagemodule.VImage_create_fmask
if _newclass:create_fmask = staticmethod(vimagemodule.VImage_create_fmask)
def disp_ps(*args): return vimagemodule.VImage_disp_ps(*args)
def flt_image_freq(*args): return vimagemodule.VImage_flt_image_freq(*args)
__swig_getmethods__["fractsurf"] = lambda x: vimagemodule.VImage_fractsurf
if _newclass:fractsurf = staticmethod(vimagemodule.VImage_fractsurf)
def freqflt(*args): return vimagemodule.VImage_freqflt(*args)
def fwfft(*args): return vimagemodule.VImage_fwfft(*args)
def rotquad(*args): return vimagemodule.VImage_rotquad(*args)
def invfft(*args): return vimagemodule.VImage_invfft(*args)
def invfftr(*args): return vimagemodule.VImage_invfftr(*args)
def gammacorrect(*args): return vimagemodule.VImage_gammacorrect(*args)
def heq(*args): return vimagemodule.VImage_heq(*args)
def hist(*args): return vimagemodule.VImage_hist(*args)
def histcum(*args): return vimagemodule.VImage_histcum(*args)
def histeq(*args): return vimagemodule.VImage_histeq(*args)
def histgr(*args): return vimagemodule.VImage_histgr(*args)
def histnD(*args): return vimagemodule.VImage_histnD(*args)
def histnorm(*args): return vimagemodule.VImage_histnorm(*args)
def histplot(*args): return vimagemodule.VImage_histplot(*args)
def histspec(*args): return vimagemodule.VImage_histspec(*args)
def hsp(*args): return vimagemodule.VImage_hsp(*args)
__swig_getmethods__["identity"] = lambda x: vimagemodule.VImage_identity
if _newclass:identity = staticmethod(vimagemodule.VImage_identity)
__swig_getmethods__["identity_ushort"] = lambda x: vimagemodule.VImage_identity_ushort
if _newclass:identity_ushort = staticmethod(vimagemodule.VImage_identity_ushort)
def ismonotonic(*args): return vimagemodule.VImage_ismonotonic(*args)
def lhisteq(*args): return vimagemodule.VImage_lhisteq(*args)
def lhisteq_raw(*args): return vimagemodule.VImage_lhisteq_raw(*args)
__swig_getmethods__["invertlut"] = lambda x: vimagemodule.VImage_invertlut
if _newclass:invertlut = staticmethod(vimagemodule.VImage_invertlut)
__swig_getmethods__["buildlut"] = lambda x: vimagemodule.VImage_buildlut
if _newclass:buildlut = staticmethod(vimagemodule.VImage_buildlut)
def maplut(*args): return vimagemodule.VImage_maplut(*args)
def project(*args): return vimagemodule.VImage_project(*args)
def stdif(*args): return vimagemodule.VImage_stdif(*args)
def stdif_raw(*args): return vimagemodule.VImage_stdif_raw(*args)
def tone_analyse(*args): return vimagemodule.VImage_tone_analyse(*args)
__swig_getmethods__["tone_build"] = lambda x: vimagemodule.VImage_tone_build
if _newclass:tone_build = staticmethod(vimagemodule.VImage_tone_build)
__swig_getmethods__["tone_build_range"] = lambda x: vimagemodule.VImage_tone_build_range
if _newclass:tone_build_range = staticmethod(vimagemodule.VImage_tone_build_range)
def tone_map(*args): return vimagemodule.VImage_tone_map(*args)
def circle(*args): return vimagemodule.VImage_circle(*args)
def flood_blob_copy(*args): return vimagemodule.VImage_flood_blob_copy(*args)
def insertplace(*args): return vimagemodule.VImage_insertplace(*args)
def line(*args): return vimagemodule.VImage_line(*args)
def lineset(*args): return vimagemodule.VImage_lineset(*args)
__swig_getmethods__["binfile"] = lambda x: vimagemodule.VImage_binfile
if _newclass:binfile = staticmethod(vimagemodule.VImage_binfile)
def cache(*args): return vimagemodule.VImage_cache(*args)
def header_get_type(*args): return vimagemodule.VImage_header_get_type(*args)
def header_int(*args): return vimagemodule.VImage_header_int(*args)
def header_double(*args): return vimagemodule.VImage_header_double(*args)
def header_string(*args): return vimagemodule.VImage_header_string(*args)
def cntlines(*args): return vimagemodule.VImage_cntlines(*args)
def dilate(*args): return vimagemodule.VImage_dilate(*args)
def dilate_raw(*args): return vimagemodule.VImage_dilate_raw(*args)
def erode(*args): return vimagemodule.VImage_erode(*args)
def erode_raw(*args): return vimagemodule.VImage_erode_raw(*args)
def profile(*args): return vimagemodule.VImage_profile(*args)
def affine(*args): return vimagemodule.VImage_affine(*args)
def align_bands(*args): return vimagemodule.VImage_align_bands(*args)
def correl(*args): return vimagemodule.VImage_correl(*args)
def _find_lroverlap(*args): return vimagemodule.VImage__find_lroverlap(*args)
def _find_tboverlap(*args): return vimagemodule.VImage__find_tboverlap(*args)
def global_balance(*args): return vimagemodule.VImage_global_balance(*args)
def global_balancef(*args): return vimagemodule.VImage_global_balancef(*args)
def lrmerge(*args): return vimagemodule.VImage_lrmerge(*args)
def lrmerge1(*args): return vimagemodule.VImage_lrmerge1(*args)
def lrmosaic(*args): return vimagemodule.VImage_lrmosaic(*args)
def lrmosaic1(*args): return vimagemodule.VImage_lrmosaic1(*args)
def match_linear(*args): return vimagemodule.VImage_match_linear(*args)
def match_linear_search(*args): return vimagemodule.VImage_match_linear_search(*args)
def maxpos_subpel(*args): return vimagemodule.VImage_maxpos_subpel(*args)
def remosaic(*args): return vimagemodule.VImage_remosaic(*args)
def similarity_area(*args): return vimagemodule.VImage_similarity_area(*args)
def similarity(*args): return vimagemodule.VImage_similarity(*args)
def tbmerge(*args): return vimagemodule.VImage_tbmerge(*args)
def tbmerge1(*args): return vimagemodule.VImage_tbmerge1(*args)
def tbmosaic(*args): return vimagemodule.VImage_tbmosaic(*args)
def tbmosaic1(*args): return vimagemodule.VImage_tbmosaic1(*args)
def benchmark(*args): return vimagemodule.VImage_benchmark(*args)
def benchmark2(*args): return vimagemodule.VImage_benchmark2(*args)
def benchmarkn(*args): return vimagemodule.VImage_benchmarkn(*args)
__swig_getmethods__["eye"] = lambda x: vimagemodule.VImage_eye
if _newclass:eye = staticmethod(vimagemodule.VImage_eye)
__swig_getmethods__["grey"] = lambda x: vimagemodule.VImage_grey
if _newclass:grey = staticmethod(vimagemodule.VImage_grey)
__swig_getmethods__["feye"] = lambda x: vimagemodule.VImage_feye
if _newclass:feye = staticmethod(vimagemodule.VImage_feye)
__swig_getmethods__["fgrey"] = lambda x: vimagemodule.VImage_fgrey
if _newclass:fgrey = staticmethod(vimagemodule.VImage_fgrey)
__swig_getmethods__["fzone"] = lambda x: vimagemodule.VImage_fzone
if _newclass:fzone = staticmethod(vimagemodule.VImage_fzone)
__swig_getmethods__["make_xy"] = lambda x: vimagemodule.VImage_make_xy
if _newclass:make_xy = staticmethod(vimagemodule.VImage_make_xy)
__swig_getmethods__["zone"] = lambda x: vimagemodule.VImage_zone
if _newclass:zone = staticmethod(vimagemodule.VImage_zone)
def blend(*args): return vimagemodule.VImage_blend(*args)
def equal(*args): return vimagemodule.VImage_equal(*args)
def ifthenelse(*args): return vimagemodule.VImage_ifthenelse(*args)
def less(*args): return vimagemodule.VImage_less(*args)
def lesseq(*args): return vimagemodule.VImage_lesseq(*args)
def more(*args): return vimagemodule.VImage_more(*args)
def moreeq(*args): return vimagemodule.VImage_moreeq(*args)
def notequal(*args): return vimagemodule.VImage_notequal(*args)
__swig_getmethods__["video_test"] = lambda x: vimagemodule.VImage_video_test
if _newclass:video_test = staticmethod(vimagemodule.VImage_video_test)
__swig_getmethods__["video_v4l1"] = lambda x: vimagemodule.VImage_video_v4l1
if _newclass:video_v4l1 = staticmethod(vimagemodule.VImage_video_v4l1)
def tobuffer(*args): return vimagemodule.VImage_tobuffer(*args)
__swig_getmethods__["frombuffer"] = lambda x: vimagemodule.VImage_frombuffer
if _newclass:frombuffer = staticmethod(vimagemodule.VImage_frombuffer)
def tostring(*args): return vimagemodule.VImage_tostring(*args)
__swig_getmethods__["fromstring"] = lambda x: vimagemodule.VImage_fromstring
if _newclass:fromstring = staticmethod(vimagemodule.VImage_fromstring)
VImage_swigregister = vimagemodule.VImage_swigregister
VImage_swigregister(VImage)
VImage_print_all = vimagemodule.VImage_print_all
VImage_convert2disc = vimagemodule.VImage_convert2disc
VImage_linreg = vimagemodule.VImage_linreg
VImage_black = vimagemodule.VImage_black
VImage_csv2vips = vimagemodule.VImage_csv2vips
VImage_gbandjoin = vimagemodule.VImage_gbandjoin
VImage_jpeg2vips = vimagemodule.VImage_jpeg2vips
VImage_magick2vips = vimagemodule.VImage_magick2vips
VImage_mask2vips = vimagemodule.VImage_mask2vips
VImage_png2vips = vimagemodule.VImage_png2vips
VImage_exr2vips = vimagemodule.VImage_exr2vips
VImage_ppm2vips = vimagemodule.VImage_ppm2vips
VImage_analyze2vips = vimagemodule.VImage_analyze2vips
VImage_text = vimagemodule.VImage_text
VImage_tiff2vips = vimagemodule.VImage_tiff2vips
VImage_gaussnoise = vimagemodule.VImage_gaussnoise
VImage_rank_image = vimagemodule.VImage_rank_image
VImage_maxvalue = vimagemodule.VImage_maxvalue
VImage_create_fmask = vimagemodule.VImage_create_fmask
VImage_fractsurf = vimagemodule.VImage_fractsurf
VImage_identity = vimagemodule.VImage_identity
VImage_identity_ushort = vimagemodule.VImage_identity_ushort
VImage_invertlut = vimagemodule.VImage_invertlut
VImage_buildlut = vimagemodule.VImage_buildlut
VImage_tone_build = vimagemodule.VImage_tone_build
VImage_tone_build_range = vimagemodule.VImage_tone_build_range
VImage_binfile = vimagemodule.VImage_binfile
VImage_eye = vimagemodule.VImage_eye
VImage_grey = vimagemodule.VImage_grey
VImage_feye = vimagemodule.VImage_feye
VImage_fgrey = vimagemodule.VImage_fgrey
VImage_fzone = vimagemodule.VImage_fzone
VImage_make_xy = vimagemodule.VImage_make_xy
VImage_zone = vimagemodule.VImage_zone
VImage_video_test = vimagemodule.VImage_video_test
VImage_video_v4l1 = vimagemodule.VImage_video_v4l1
VImage_frombuffer = vimagemodule.VImage_frombuffer
VImage_fromstring = vimagemodule.VImage_fromstring
im_init_world = vimagemodule.im_init_world
im__print_all = vimagemodule.im__print_all
im_col_Lab2XYZ = vimagemodule.im_col_Lab2XYZ
# try to guess a PIL mode string from a VIPS image
def PIL_mode_from_vips (vim):
if vim.Bands | |
/ "correct2002",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing --------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct*"))) == 6
# Check all found correctors ---
# only octupole correctors should be present
for correction in (df_corrections_f4000, df_corrections_f2200, df_corrections_f2002):
assert len(correction.index) == 4
assert all(correction['order'] == 4)
# f4000 and f2200 should give same values for correction
assert_frame_equal(df_corrections_f4000, df_corrections_f2200)
# f4000 and f2002 should give different values for correction
with pytest.raises(AssertionError):
assert_series_equal(df_corrections_f4000[VALUE], df_corrections_f2002[VALUE])
# frames are equal apart from value, though
non_val_columns = [col for col in df_corrections_f2200.columns if col != VALUE]
assert_frame_equal(df_corrections_f4000[non_val_columns], df_corrections_f2002[non_val_columns])
def test_switched_beta(self):
"""Test using the special RDTs* where the beta-exponents are switched."""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
beta = 2
error_value = 2
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, betax=beta, betay=beta)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
value=error_value,
)
# Correction ---------------------------------------------------------------
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics, ],
errors=[errors, ],
beams=[1, ],
rdts=["f4000", ],
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_switched = irnl_correct(
accel=accel,
optics=[optics, ],
errors=[errors, ],
beams=[1, ],
rdts=["f0004*", ], # only for testing purposes use this RDT
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# as beta cancels out:
error_strengths = n_sides * n_magnets * error_value
for ip in correct_ips:
mask = df_corrections_switched[IP] == ip
corrector_strengths_switched = sum(df_corrections_switched.loc[mask, VALUE])
assert abs(corrector_strengths_switched + error_strengths) < EPS # compensation of RDT
assert_frame_equal(df_corrections, df_corrections_switched)
class TestFeeddown:
@pytest.mark.parametrize('x', (2, 0))
@pytest.mark.parametrize('y', (1.5, 0))
def test_general_feeddown(self, tmp_path: Path, x: float, y: float):
"""Test feeddown functionality from decapoles to octupoles and sextupoles."""
# Parameters -----------------------------------------------------------
accel = 'lhc'
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, x=x, y=y)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
)
errors["K4L"] = error_value # normal decapole errors
# Correction ---------------------------------------------------------------
rdts = "f4000", "f3001"
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
_, df_corrections_fd1 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct_fd1",
feeddown=1,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
errors["K4L"] = 0
errors["K5L"] = error_value # normal dodecapole errors
_, df_corrections_fd2 = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct_fd2",
feeddown=2,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
# Testing ------------------------------------------------------------------
# Check output data ---
assert len(list(tmp_path.glob("correct*"))) == 6
# Check all found correctors ---
# no corrections with feed-down
assert all(df_corrections[VALUE] == 0)
if x == 0 and y == 0:
assert all(df_corrections_fd1[VALUE] == 0)
assert all(df_corrections_fd2[VALUE] == 0)
else:
for ip in correct_ips:
normal_oct_mask = (df_corrections[STRENGTH] == "K3L") & (df_corrections[IP] == ip)
skew_oct_mask = (df_corrections[STRENGTH] == "K3SL") & (df_corrections[IP] == ip)
dodecapole_error_sum = error_value * n_magnets * n_sides
norm_oct_corr_fd1 = sum(df_corrections_fd1.loc[normal_oct_mask, VALUE])
skew_oct_corr_fd1 = sum(df_corrections_fd1.loc[skew_oct_mask, VALUE])
assert abs(norm_oct_corr_fd1 + x * dodecapole_error_sum) < EPS
assert abs(skew_oct_corr_fd1 + y * dodecapole_error_sum) < EPS
norm_oct_corr_fd2 = sum(df_corrections_fd2.loc[normal_oct_mask, VALUE])
skew_oct_corr_fd2 = sum(df_corrections_fd2.loc[skew_oct_mask, VALUE])
assert abs(norm_oct_corr_fd2 + 0.5 * (x**2 - y**2) * dodecapole_error_sum) < EPS
assert abs(skew_oct_corr_fd2 + x * y * dodecapole_error_sum) < EPS
@pytest.mark.parametrize('corrector', ("a5", "b5", "a6", "b6"))
@pytest.mark.parametrize('x', (2, 0))
@pytest.mark.parametrize('y', (2, 1.5, 0))
def test_correct_via_feeddown(self, tmp_path: Path, x: float, y: float, corrector: str):
"""Test correct RDT via feeddown from higher order corrector.
In this example: Use normal and skew deca- and dodecapole correctors
to correct for normal octupole errors (which make it easy to
just sum up over both sides).
"""
# Parameters -----------------------------------------------------------
accel = 'hllhc'
correct_ips = (1, 3)
error_value = 2
n_magnets = 4
n_ips = 4
n_sides = 2
# Setup ----------------------------------------------------------------
optics = generate_pseudo_model(
accel=accel, n_ips=n_ips, n_magnets=n_magnets, x=x, y=y)
errors = generate_errortable(
index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets),
)
errors["K3L"] = error_value # octupole errors
# Correction ---------------------------------------------------------------
rdts = {"f4000": [corrector]}
_, df_corrections = irnl_correct(
accel=accel,
optics=[optics],
errors=[errors],
beams=[1],
rdts=rdts,
output=tmp_path / "correct",
feeddown=0,
ips=correct_ips,
ignore_missing_columns=True,
iterations=1,
)
assert len(df_corrections.index) == len(correct_ips) * n_sides
assert all(df_corrections[FIELD] == corrector)
coeff = {"a5": y, "b5": x, "a6": y*x, "b6": 0.5*(x**2 - y**2)}[corrector]
if coeff == 0:
# No Feed-down possible
assert all(df_corrections[VALUE] < EPS)
else:
# as beta cancels out (and is 1 anyway)
error_strengths = n_sides * n_magnets * error_value
for ip in correct_ips:
mask = df_corrections[IP] == ip
corrector_strengths = coeff * sum(df_corrections.loc[mask, VALUE])
assert abs(corrector_strengths + error_strengths) < EPS # compensation of RDT
class TestUnit:
"""Unit Tests for easy to test functions."""
def test_get_integral_sign(self):
for n in range(10):
assert get_integral_sign(n, "R") == (-1)**n
assert get_integral_sign(n, "L") == 1
def test_list_to_str(self):
assert ABC == "".join(list2str(list(ABC)).replace(" ", "").replace("'", "").replace('"', "").split(','))
def test_wrong_arguments(self):
with pytest.raises(AttributeError) as e:
irnl_correct(
feddown=0,
itterations=1,
)
assert "feddown" in str(e)
assert "itterations" in str(e)
@pytest.mark.parametrize('beam', (1, 2, 4))
def test_switch_signs(self, beam: int):
all_k = [f"K{order}{orientation}L" for order in range(2, MAX_N) for orientation in ("S", "")]
optics = generate_pseudo_model(n_ips=1, n_magnets=10, accel='lhc', x=10, y=5)
optics[all_k] = 1
errors = generate_errortable(index=optics.index, value=2.)
# make copies as it switches in place
optics_switch = optics.copy()
errors_switch = errors.copy()
switch_signs_for_beam4([optics_switch], [errors_switch], [beam])
if beam != 4:
assert_frame_equal(optics, optics_switch)
assert_frame_equal(errors, errors_switch)
else:
# in madx optics only X changes sign for beam 4 ...
switch_col_optics_mask = optics.columns.isin(["X"])
assert_frame_equal(optics.loc[:, switch_col_optics_mask], -optics_switch.loc[:, switch_col_optics_mask])
assert_frame_equal(optics.loc[:, ~switch_col_optics_mask], optics_switch.loc[:, ~switch_col_optics_mask])
# ... but in the errors DX and the anti-symmetric KL change sign
switch_col_errors_mask = errors.columns.isin(["DX"] + _get_opposite_sign_beam4_kl_columns(range(MAX_N)))
assert_frame_equal(errors.loc[:, switch_col_errors_mask], -errors_switch.loc[:, switch_col_errors_mask])
assert_frame_equal(errors.loc[:, ~switch_col_errors_mask], errors_switch.loc[:, ~switch_col_errors_mask])
def test_ircorrector_class(self):
# Test Corrector
a5_corrector_L1 = IRCorrector(field_component="a5", accel="lhc", ip=1, side="L")
# Test Equality
assert a5_corrector_L1 == IRCorrector(field_component="a5", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 != IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
# Test > and < per order (important for feed-down!)
assert a5_corrector_L1 > IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 > IRCorrector(field_component="a4", accel="lhc", ip=2, side="R")
assert a5_corrector_L1 > IRCorrector(field_component="b4", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="a6", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="b6", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="b6", accel="lhc", ip=8, side="R")
# These ones are arbitrary, just to allow sorting/make sorting unique
assert a5_corrector_L1 > IRCorrector(field_component="b5", accel="lhc", ip=1, side="L")
assert a5_corrector_L1 < IRCorrector(field_component="a5", accel="lhc", ip=1, side="R")
assert a5_corrector_L1 < IRCorrector(field_component="a5", accel="lhc", ip=2, side="L")
def test_ircorrector_accel(self):
a4_corrector_L1 = IRCorrector(field_component="a4", accel="lhc", ip=1, side="L")
assert "F" not in a4_corrector_L1.name
a4_corrector_L1_hllhc = IRCorrector(field_component="a4", accel="hllhc", ip=1, side="L")
assert "F" in a4_corrector_L1_hllhc.name
assert a4_corrector_L1_hllhc.name.startswith("MCOS")
assert a4_corrector_L1 != a4_corrector_L1_hllhc
assert IRCorrector(field_component="a4", accel="lhc", ip=2, side="L") == IRCorrector(field_component="a4", accel="hllhc", ip=2, side="L")
assert IRCorrector(field_component="b2", accel="hllhc", ip=1, side="L").name.startswith("MCQ")
assert IRCorrector(field_component="a2", accel="hllhc", ip=1, side="L").name.startswith("MCQS")
assert IRCorrector(field_component="b3", accel="hllhc", ip=1, side="L").name.startswith("MCS")
assert IRCorrector(field_component="a3", accel="hllhc", ip=1, side="L").name.startswith("MCSS")
assert IRCorrector(field_component="b4", accel="hllhc", ip=1, side="L").name.startswith("MCO")
assert IRCorrector(field_component="a4", accel="hllhc", ip=1, side="L").name.startswith("MCOS")
assert IRCorrector(field_component="b5", accel="hllhc", ip=1, side="L").name.startswith("MCD")
assert IRCorrector(field_component="a5", accel="hllhc", ip=1, side="L").name.startswith("MCDS")
assert IRCorrector(field_component="b6", accel="hllhc", ip=1, side="L").name.startswith("MCT")
assert IRCorrector(field_component="a6", accel="hllhc", ip=1, side="L").name.startswith("MCTS")
def test_rdt_init(self):
jklm = (1, 2, 3, 4)
rdt = RDT(name=f"f{''.join(str(ii) for ii in jklm)}")
assert rdt.order == sum(jklm)
assert rdt.jklm == jklm
assert rdt.j == jklm[0]
assert rdt.k == jklm[1]
assert rdt.l == jklm[2]
assert rdt.m == jklm[3]
assert not rdt.swap_beta_exp
assert RDT("f1001*").swap_beta_exp
def test_rdt_equality(self):
assert RDT("f2110") == RDT("f2110")
assert RDT("f2110") != RDT("f2110*")
def test_rdt_sortable(self):
# sortable by order
assert RDT("f1001") < RDT("f2001")
assert RDT("f1003") > RDT("f2001")
# arbitrary (so sorting is unique)
assert RDT("f1001") > RDT("f2000")
assert RDT("f3002") < RDT("f2003")
assert RDT("f2110") < RDT("f2110*")
assert RDT("f1001*") > RDT("f1001")
# Helper -------------------------------------------------------------------------------------------
def read_lhc_model(beam: int) -> tfs.TfsDataFrame:
"""Read the LHC model from the input directory."""
# tfs files were too big, but if generated from the `.madx` the `.tfs` can be used directly.
# E.g. for debugging purposes.
# return tfs.read_tfs(LHC_MODELS_PATH / f"twiss.lhc.b{beam}.nominal.tfs", index="NAME")
return tfs_tools.read_hdf(LHC_MODELS_PATH / f"twiss.lhc.b{beam}.nominal.hd5")
def generate_pseudo_model(n_ips: int, n_magnets: int, accel: str,
betax: float = 1, betay: float = 1, x: float = 0, y: float = 0) -> pd.DataFrame:
"""Generate a Twiss-Like DataFrame with magnets as index and Beta and Orbit columns."""
df = pd.DataFrame(
index=(
get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets) +
get_lhc_corrector_names(n_ips=n_ips, accelerator=accel)
),
columns=[f"{BETA}{X}", f"{BETA}{Y}", X, Y, KEYWORD]
)
df[f"{BETA}{X}"] = betax
df[f"{BETA}{Y}"] = betay
df[X] = x
df[Y] = y
df[KEYWORD] = MULTIPOLE
return df
def generate_errortable(index: pd.Series, value: float = 0) -> pd.DataFrame:
"""Return | |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CusFusedAbsMax1"""
from te import tik
from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cus_fused_abs_max1_op_info = TBERegOp("CusFusedAbsMax1") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("fusedabsmax1.so") \
.compute_cost(10) \
.kernel_name("CusFusedAbsMax1") \
.partial_flag(True) \
.attr("origin_shape", "required", "listInt", "all") \
.input(0, "x1", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(cus_fused_abs_max1_op_info)
def CusFusedAbsMax1(input_x, output, origin_shape=None, kernel_name="fused_abs_max1"):
"""CusFusedAbsMax1"""
input_x_shape = input_x.get("shape")
output_shape = output.get("shape")
dtype = input_x.get("dtype")
if util.get_product_version() == util.VERSION_MINI:
tik_instance = tik.Tik(tik.Dprofile("v100", "mini"))
else:
tik_instance = tik.Tik(tik.Dprofile("v100", "cloud"))
support_shape = [((1, 128, 128), "float32"),
((2, 128, 128), "float32"),
((4, 128, 128), "float32"),
((8, 128, 128), "float32"),
((16, 128, 128), "float32"),
((5, 128, 128), "float32"),
((9, 128, 128), "float32"),
((18, 128, 128), "float32"),
((36, 128, 128), "float32"),
((32, 128, 128), "float32"),
((1, 64, 64), "float32"),
((32, 64), "float32")
]
ori_shape = tuple(origin_shape)
input_info = (tuple(input_x_shape), dtype)
if input_info not in support_shape:
raise RuntimeError("input_shape %s is not supported" % str(input_info))
if input_info == ((1, 128, 128), "float32"):
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
total_elements = 1
for val in input_x_shape:
total_elements *= val
blocks = 32
each_block_element = total_elements // blocks
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub",
scope=tik.scope_ubuf)
broadcast_0_local_UB = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_UB",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[each_block_element * block_index], 0, 1,
each_block_element // 8, 0, 0)
repeat_time = each_block_element // 64
tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8)
with tik_instance.for_range(0, 64) as cc0:
data_temp = tik_instance.Scalar("float32")
data_temp.set_as(input_x_ub[cc0])
tik_instance.vector_dup(64, broadcast_0_local_UB[cc0 * 64], data_temp, 1, 1, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[2048], 32, 1, 1,
1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[1024], 16, 1, 1,
1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[512], 8, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[256], 4, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[128], 2, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[64], 1, 1, 1, 1,
8, 8, 8)
tik_instance.data_move(res[block_index, 0], broadcast_0_local_UB, 0, 1, 8, 0, 0)
elif input_info == ((2, 128, 128), "float32"):
if ori_shape == (147, 147):
phase_1 = 16384
phase_2 = 1216
blocks = 32
each_block_element = phase_1 // blocks + 64
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub",
scope=tik.scope_ubuf)
broadcast_0_local_UB = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_UB",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[512 * block_index], 0, 1, 512 // 8, 0, 0)
line_id = block_index % 19
tik_instance.data_move(input_x_ub[512], input_x[16384 + 128 * line_id], 0, 1, 8, 0, 0)
repeat_time = each_block_element // 64
tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8)
tik_instance.vmax(19, input_x_ub, input_x_ub, input_x_ub[512], 1, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8)
with tik_instance.for_range(0, 64) as cc0:
data_temp = tik_instance.Scalar("float32")
data_temp.set_as(input_x_ub[cc0])
tik_instance.vector_dup(64, broadcast_0_local_UB[cc0 * 64], data_temp, 1, 1, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[2048], 32, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[1024], 16, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[512], 8, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[256], 4, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[128], 2, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[64], 1, 1, 1,
1, 8, 8, 8)
tik_instance.data_move(res[block_index, 0], broadcast_0_local_UB, 0, 1, 8, 0, 0)
elif ori_shape in ((256, 256), None, (-1, -1)):
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
total_elements = 1
for val in input_x_shape:
total_elements *= val
blocks = 32
each_block_element = total_elements // blocks
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub",
scope=tik.scope_ubuf)
broadcast_0_local_UB = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_UB",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[each_block_element * block_index], 0, 1,
each_block_element // 8, 0, 0)
repeat_time = each_block_element // 64
tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[512], 8, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8)
with tik_instance.for_range(0, 64) as cc0:
data_temp = tik_instance.Scalar("float32")
data_temp.set_as(input_x_ub[cc0])
tik_instance.vector_dup(64, broadcast_0_local_UB[cc0 * 64], data_temp, 1, 1, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[2048], 32, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[1024], 16, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[512], 8, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[256], 4, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[128], 2, 1,
1, 1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[64], 1, 1, 1,
1, 8, 8, 8)
tik_instance.data_move(res[block_index, 0], broadcast_0_local_UB, 0, 1, 8, 0, 0)
else:
raise RuntimeError("origin shape %s is not supported" % str(ori_shape))
elif input_info == ((4, 128, 128), "float32"):
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
total_elements = 1
for val in input_x_shape:
total_elements *= val
blocks = 32
each_block_element = total_elements // blocks
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub",
scope=tik.scope_ubuf)
broadcast_0_local_UB = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_UB",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[each_block_element * block_index], 0, 1,
each_block_element // 8, 0, 0)
repeat_time = each_block_element // 64
tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[1024], 16, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[512], 8, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8)
tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8)
with tik_instance.for_range(0, 64) as cc0:
data_temp = tik_instance.Scalar("float32")
data_temp.set_as(input_x_ub[cc0])
tik_instance.vector_dup(64, broadcast_0_local_UB[cc0 * 64], data_temp, 1, 1, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[2048], 32, 1, 1,
1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[1024], 16, 1, 1,
1, 8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[512], 8, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[256], 4, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[128], 2, 1, 1, 1,
8, 8, 8)
tik_instance.vmax(64, broadcast_0_local_UB, broadcast_0_local_UB, broadcast_0_local_UB[64], 1, 1, 1, 1,
8, 8, 8)
tik_instance.data_move(res[block_index, 0], broadcast_0_local_UB, 0, 1, 8, 0, 0)
elif input_info == ((8, 128, 128), "float32"):
if ori_shape == (1000, 1000):
input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
res = tik_instance.Tensor("float32", output_shape, name="res", scope=tik.scope_gm)
blocks = 32
each_block_element = 7 * 128 * 128 // 32 + 4 * 128
phase_1 = 7 * 128 * 128 // 32
with tik_instance.for_range(0, blocks, block_num=blocks) as block_index:
input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub",
scope=tik.scope_ubuf)
broadcast_0_local_UB = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_UB",
scope=tik.scope_ubuf)
tik_instance.data_move(input_x_ub, input_x[phase_1 * block_index], 0, 1, phase_1 // 8, 0, 0)
tik_instance.data_move(input_x_ub[phase_1], input_x[114688 + block_index * 384], 0, 1, 384 // 8, 0,
0)
move_idx = block_index % 8
tik_instance.data_move(input_x_ub[phase_1 + 384], input_x[114688 + 96 * 128 + move_idx * 128], 0, 1,
128 // 8, 0, 0)
repeat_time = each_block_element // 64
tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8)
vmask = 1000 - 7 * 128 - 64
with tik_instance.for_range(0, 4) as loop_idx:
tik_instance.vmax(vmask, input_x_ub[3584 + 128 * loop_idx], input_x_ub[3584 + 128 * loop_idx],
input_x_ub[3584 + 128 * loop_idx + 64], 1, 1, 1, 1, | |
<reponame>RuichunWang/ModelArts-Lab<filename>tools/DeepFM-GPU/code/data_process.py
# coding:utf-8
import os
import pickle
import glob
import toml
import collections
import argparse
import tensorflow as tf
import numpy as np
import pandas as pd
import time
class DataStatsDict():
def __init__(self, value_col_num=13, category_col_num=26, multi_category_len=()):
self.value_col_num = value_col_num
self.category_col_num = category_col_num
self.multi_category_col_num = sum(multi_category_len)
self.multi_category_len = multi_category_len
self.field_size = value_col_num + category_col_num + self.multi_category_col_num
self.val_cols = ["val_{}".format(i + 1) for i in range(value_col_num)]
self.cat_cols = ["cat_{}".format(i + 1) for i in range(category_col_num)]
self.multi_cat_cols = ["multi_cat_{}".format(i + 1) for i in range(len(multi_category_len))]
self.val_min_dict = {col: 99999 for col in self.val_cols}
self.val_max_dict = {col: -99999 for col in self.val_cols}
self.cat_count_dict = {col: collections.defaultdict(int) for col in self.cat_cols}
self.multi_cat_count_dict = {col: collections.defaultdict(int) for col in self.multi_cat_cols}
self.oov_prefix = "OOV_"
self.cat2id_dict = {}
self.cat2id_dict.update({col: i for i, col in enumerate(self.val_cols)})
self.cat2id_dict.update({self.oov_prefix + col: i + len(self.val_cols) for i, col in enumerate(self.cat_cols)})
self.cat2id_dict.update({self.oov_prefix + col: i + len(self.val_cols) + len(self.cat_cols) for i, col in enumerate(self.multi_cat_cols)})
def stats_vals(self, val_list):
assert len(val_list) == len(self.val_cols)
def map_max_min(i, val):
key = self.val_cols[i]
if val != "":
if float(val) > self.val_max_dict[key]:
self.val_max_dict[key] = float(val)
if float(val) < self.val_min_dict[key]:
self.val_min_dict[key] = float(val)
for i, val in enumerate(val_list):
map_max_min(i, val)
def stats_cats(self, cat_list):
assert len(cat_list) == len(self.cat_cols)
def map_cat_count(i, cat):
key = self.cat_cols[i]
self.cat_count_dict[key][cat] += 1
for i, cat in enumerate(cat_list):
map_cat_count(i, cat)
def stats_multi_cats(self, multi_cat_list):
assert len(multi_cat_list) == sum(self.multi_category_len)
for multi_cat in multi_cat_list:
_, multi_cat_id, multi_cat_value = multi_cat.split('_')
key = "multi_cat_%s" % (int(multi_cat_id) + 1)
self.multi_cat_count_dict[key][multi_cat_value] += 1
def save_dict(self, output_path, prefix=""):
with open(os.path.join(output_path, "{}val_max_dict.pkl".format(prefix)), "wb") as file_wrt:
pickle.dump(self.val_max_dict, file_wrt)
with open(os.path.join(output_path, "{}val_min_dict.pkl".format(prefix)), "wb") as file_wrt:
pickle.dump(self.val_min_dict, file_wrt)
with open(os.path.join(output_path, "{}cat_count_dict.pkl".format(prefix)), "wb") as file_wrt:
pickle.dump(self.cat_count_dict, file_wrt)
with open(os.path.join(output_path, "{}multi_cat_count_dict.pkl".format(prefix)), "wb") as file_wrt:
pickle.dump(self.multi_cat_count_dict, file_wrt)
def load_dict(self, dict_path, prefix=""):
with open(os.path.join(dict_path, "{}val_max_dict.pkl".format(prefix)), "rb") as file_wrt:
self.val_max_dict = pickle.load(file_wrt)
with open(os.path.join(dict_path, "{}val_min_dict.pkl".format(prefix)), "rb") as file_wrt:
self.val_min_dict = pickle.load(file_wrt)
with open(os.path.join(dict_path, "{}cat_count_dict.pkl".format(prefix)), "rb") as file_wrt:
self.cat_count_dict = pickle.load(file_wrt)
with open(os.path.join(dict_path, "{}multi_cat_count_dict.pkl".format(prefix)), "rb") as file_wrt:
self.multi_cat_count_dict = pickle.load(file_wrt)
print("val_max_dict.items()[:50]: {}".format(list(self.val_max_dict.items())))
print("val_min_dict.items()[:50]: {}".format(list(self.val_min_dict.items())))
def get_cat2id(self, threshold=100):
for key, cat_count_d in self.cat_count_dict.items():
new_cat_count_d = dict(filter(lambda x: x[1] > threshold, cat_count_d.items()))
for cat_str, count in new_cat_count_d.items():
self.cat2id_dict[key + "_" + cat_str] = len(self.cat2id_dict)
for key, multi_cat_count_d in self.multi_cat_count_dict.items():
new_multi_cat_count_d = dict(filter(lambda x: x[1] > threshold, multi_cat_count_d.items()))
for multi_cat_str, count in new_multi_cat_count_d.items():
if multi_cat_str == 'OOV':
continue
self.cat2id_dict[key + "_" + multi_cat_str] = len(self.cat2id_dict)
print("data vocab size: {}".format(len(self.cat2id_dict)))
print("data vocab size[:50]: {}".format(list(self.cat2id_dict.items())[:50]))
def map_cat2id(self, values, cats, multi_cats):
def minmax_scale_value(i, val):
min_v = float(self.val_min_dict["val_{}".format(i + 1)])
max_v = float(self.val_max_dict["val_{}".format(i + 1)])
if val >= max_v:
return 1.0
elif val <= min_v or max_v == min_v:
return 0.0
else:
return float(val - min_v) / (max_v - min_v)
id_list = []
weight_list = []
for i, val in enumerate(values):
if val == "":
id_list.append(i)
weight_list.append(0)
else:
key = "val_{}".format(i + 1)
id_list.append(self.cat2id_dict[key])
weight_list.append(minmax_scale_value(i, float(val)))
for i, cat_str in enumerate(cats):
key = "cat_{}".format(i + 1) + "_" + cat_str
if key in self.cat2id_dict:
id_list.append(self.cat2id_dict[key])
else:
id_list.append(self.cat2id_dict[self.oov_prefix + "cat_{}".format(i + 1)])
weight_list.append(1.0)
for i, multi_cat_str in enumerate(multi_cats):
_, multi_cat_id, multi_cat_value = multi_cat_str.split('_')
multi_cat_id = int(multi_cat_id)
if multi_cat_value == 'OOV':
key = "OOV_multi_cat_%s" % (multi_cat_id + 1)
else:
key = "multi_cat_%s_%s" % (multi_cat_id + 1, multi_cat_value)
if key in self.cat2id_dict:
id_list.append(self.cat2id_dict[key])
else:
id_list.append(self.cat2id_dict[self.oov_prefix + "multi_cat_{}".format(multi_cat_id + 1)])
weight_list.append(1.0)
return id_list, weight_list
def mkdir_path(file_path):
if not os.path.exists(file_path):
os.makedirs(file_path)
def statsdata(data_file_path, output_path, data_stats):
with open(data_file_path, encoding="utf-8") as file_in:
errorline_list = []
count = 0
for line in file_in:
count += 1
line = line.strip("\n")
items = line.split("\t")
if len(items) != data_stats.field_size + 1: # feature columns; + label_col
errorline_list.append(count)
print("line: {}".format(line))
raise ValueError(
"Expect column count is {}, real column count is {}, please check "
"your value_col_num and category_col_num. "
"\nError line number: {}, Error line content: {}".format(
data_stats.field_size + 1, len(items), count - 1, line))
if count % 1000000 == 0:
print("Have handle {}w lines.".format(count // 10000))
label = items[0]
features = items[1:]
values = features[:data_stats.value_col_num]
cats = features[data_stats.value_col_num:data_stats.value_col_num + data_stats.category_col_num]
multi_cats = features[data_stats.value_col_num + data_stats.category_col_num:]
assert len(values) == data_stats.value_col_num, "values.size: {}".format(len(values))
assert len(cats) == data_stats.category_col_num, "cats.size: {}".format(len(cats))
assert len(multi_cats) == data_stats.multi_category_col_num, "multi-cats.size: {}".format(len(multi_cats))
data_stats.stats_vals(values)
data_stats.stats_cats(cats)
data_stats.stats_multi_cats(multi_cats)
data_stats.save_dict(output_path)
def add_write(file_path, wrt_str):
with open(file_path, 'a', encoding="utf-8") as file_out:
file_out.write(wrt_str + "\n")
def get_file_line_count(file_path):
line_count = 0
with open(file_path, 'r', encoding="utf-8") as file_in:
for line in file_in:
line = line.strip("\n")
if line == "":
continue
line_count += 1
return line_count
def random_split_trans2h5(in_file_path, output_path, data_stats, part_rows=2000000, test_size=0.1,
seed=2020, output_format='h5'):
value_col_num = data_stats.value_col_num
category_col_num = data_stats.category_col_num
multi_category_col_num = data_stats.multi_category_col_num
train_line_count = get_file_line_count(in_file_path)
test_size = int(train_line_count * test_size)
train_size = train_line_count - test_size
all_indices = [i for i in range(train_line_count)]
np.random.seed(seed)
np.random.shuffle(all_indices)
print("all_indices.size: {}".format(len(all_indices)))
lines_count_dict = collections.defaultdict(int)
test_indices_set = set(all_indices[: test_size])
print("test_indices_set.size: {}".format(len(test_indices_set)))
print("----------" * 10 + "\n" * 2)
train_feature_file_name = os.path.join(output_path, "train_input_part_{}.h5")
train_label_file_name = os.path.join(output_path, "train_output_part_{}.h5")
test_feature_file_name = os.path.join(output_path, "test_input_part_{}.h5")
test_label_file_name = os.path.join(output_path, "test_output_part_{}.h5")
train_feature_list = []
train_label_list = []
test_feature_list = []
test_label_list = []
ids_len = 0
filtered_train_size = 0
filtered_test_size = 0
with open(in_file_path, encoding="utf-8") as file_in:
count = 0
train_part_number = 0
test_part_number = 0
for i, line in enumerate(file_in):
count += 1
if count % 1000000 == 0:
print("Have handle {}w lines.".format(count // 10000))
line = line.strip("\n")
items = line.split("\t")
if len(items) != 1 + value_col_num + category_col_num + multi_category_col_num:
continue
label = float(items[0])
values = items[1:value_col_num+1]
cats = items[value_col_num+1:value_col_num+category_col_num+1]
multi_cats = items[value_col_num+category_col_num+1:]
assert len(values) == value_col_num, "values.size: {}".format(len(values))
assert len(cats) == category_col_num, "cats.size: {}".format(len(cats))
assert len(multi_cats) == multi_category_col_num, "multi-cats.size: {}".format(len(multi_cats))
ids, wts = data_stats.map_cat2id(values, cats, multi_cats)
ids_len = len(ids)
if i not in test_indices_set:
train_feature_list.append(ids + wts)
train_label_list.append(label)
else:
test_feature_list.append(ids + wts)
test_label_list.append(label)
if (len(train_label_list) > 0) and (len(train_label_list) % part_rows == 0):
if output_format == 'h5':
pd.DataFrame(np.asarray(train_feature_list)).to_hdf(train_feature_file_name.format(train_part_number),
key="fixed")
pd.DataFrame(np.asarray(train_label_list)).to_hdf(train_label_file_name.format(train_part_number), key="fixed")
else:
with open(os.path.join(output_path, 'train_part_{}.txt'.format(train_part_number)), 'w') as f:
for i in range(len(train_feature_list)):
train_feature = [str(s) for s in train_feature_list[i]]
train_label = str(int(train_label_list[i]))
f.write(train_label + ' ' + ' '.join(train_feature) + '\n')
filtered_train_size += len(train_feature_list)
train_feature_list = []
train_label_list = []
train_part_number += 1
if (len(test_label_list) > 0) and (len(test_label_list) % part_rows == 0):
if output_format == 'h5':
pd.DataFrame(np.asarray(test_feature_list)).to_hdf(test_feature_file_name.format(test_part_number), key="fixed")
pd.DataFrame(np.asarray(test_label_list)).to_hdf(test_label_file_name.format(test_part_number), key="fixed")
else:
with open(os.path.join(output_path, 'test_part_{}.txt'.format(test_part_number)), 'w') as f:
for i in range(len(test_feature_list)):
test_feature = [str(s) for s in test_feature_list[i]]
test_label = str(int(test_label_list[i]))
f.write(test_label + ' ' + ' '.join(test_feature) + '\n')
filtered_test_size += len(test_feature_list)
test_feature_list = []
test_label_list = []
test_part_number += 1
if len(train_label_list) > 0:
filtered_train_size += len(train_feature_list)
if output_format == 'h5':
pd.DataFrame(np.asarray(train_feature_list)).to_hdf(train_feature_file_name.format(train_part_number),
key="fixed")
pd.DataFrame(np.asarray(train_label_list)).to_hdf(train_label_file_name.format(train_part_number), key="fixed")
else:
with open(os.path.join(output_path, 'train_part_{}.txt'.format(train_part_number)),
'w') as f:
for i in range(len(train_feature_list)):
train_feature = [str(s) for s in train_feature_list[i]]
train_label = str(int(train_label_list[i]))
f.write(train_label + ' ' + ' '.join(train_feature) + '\n')
if len(test_label_list) > 0:
filtered_test_size += len(test_feature_list)
if output_format == 'h5':
pd.DataFrame(np.asarray(test_feature_list)).to_hdf(test_feature_file_name.format(test_part_number), key="fixed")
pd.DataFrame(np.asarray(test_label_list)).to_hdf(test_label_file_name.format(test_part_number), key="fixed")
else:
with open(os.path.join(output_path, 'test_part_{}.txt'.format(test_part_number)), 'w') as f:
for i in range(len(test_feature_list)):
test_feature = [str(s) for s in test_feature_list[i]]
test_label = str(int(test_label_list[i]))
f.write(test_label + ' ' + ' '.join(test_feature) + '\n')
num_features = len(data_stats.cat2id_dict)
num_inputs = ids_len
return num_features, filtered_train_size, filtered_test_size, num_inputs
def fix_multi_cat(data_file_path, multi_cat_col_num, multi_category_len, output_dir, file_pattern, feat_sep, multi_category_sep):
multi_cat_len = [0 for _ in range(multi_cat_col_num)]
import glob
if os.path.isdir(data_file_path):
data_files_list = glob.glob(os.path.join(data_file_path, file_pattern))
else:
data_files_list = [data_file_path]
for data_file in data_files_list:
with open(data_file, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
items = line.split(feat_sep)
multi_cat_items = items[len(items) - multi_cat_col_num:]
for i, multi_cat in enumerate(multi_cat_items):
multi_cat_len[i] = max(multi_cat_len[i], len(multi_cat.split(multi_category_sep)))
for i in range(len(multi_cat_len)):
if multi_category_len[i] is not None and multi_category_len[i] >= 0:
multi_cat_len[i] = multi_category_len[i]
new_data_file_path = os.path.join(output_dir, 'fixed.txt')
with open(new_data_file_path, 'w') as fw:
for data_file in data_files_list:
with open(data_file, 'r') as fr:
for line in fr:
line = line.strip()
if not line:
continue
items = line.split(feat_sep)
ok_items = items[:len(items) - multi_cat_col_num]
fw.write('\t'.join(ok_items))
multi_cat_items = items[len(items) - multi_cat_col_num:]
for i, multi_cat in enumerate(multi_cat_items):
fw.write('\t')
c_list = multi_cat.split(multi_category_sep)
c_list = c_list[:multi_cat_len[i]]
fw.write('\t'.join(['m_%d_%s' % (i, c) for c in c_list]))
padding_len = multi_cat_len[i] - len(c_list)
if padding_len > 0:
fw.write('\t')
fw.write('\t'.join(['m_%d_OOV' % i for _ in range(padding_len)]))
fw.write('\n')
return new_data_file_path, multi_cat_len
def convert_tfrecords(num_inputs, input_filename, output_filename, samples_per_line):
# label id1,id2,...,idn val1,val2,...,valn
with open(input_filename, "r") | |
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spikewarp as sw
"""
Class and helpers for main clustering meta analyses
"""
class MetaClusterAnalysisHolder(object):
def __init__(self, shuffle_option_string, is_mainz=True):
self.shuffle_option_string = shuffle_option_string
self.suf = "_" + shuffle_option_string
self.is_mainz = is_mainz
self.pdds = {}
self.sdds = {}
for data_name in sw.list_of_first_stage_data_names:
self.pdds.update({data_name: []})
for data_name in sw.list_of_second_stage_data_names:
self.sdds.update({data_name: []})
self.final_angled_cluster_count = 0
self.did_contribute_atleast_one_final_angled_cluster_count = 0
self.all_both_spiking_reliabilities = []; self.all_both_spiking_reliabilities_0s_removed = []
self.all_number_of_conjunctive_trials = []; self.all_number_of_conjunctive_trials_0s_removed = []
def extend_standard_cluster_arrays(self, single_clustering):
if (single_clustering.do_use_clusters_in_analysis):
self.final_angled_cluster_count += single_clustering.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += single_clustering.was_first_single_clustering_to_pass_for_pair
for key in single_clustering.primary_data_dicts.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(single_clustering.primary_data_dicts[key])
for key in single_clustering.secondary_data_dicts.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(single_clustering.secondary_data_dicts[key])
def extend_standard_cluster_arrays_using_another_mcah(self, mcah):
self.final_angled_cluster_count += mcah.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += mcah.did_contribute_atleast_one_final_angled_cluster_count
for key in mcah.pdds.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(mcah.pdds[key])
for key in mcah.sdds.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(mcah.sdds[key])
def calculate_time_span_info_and_plots(self, directory_holder, cortical_onset, time_window_following_cortical_onset, end_of_spiking_activity):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
tex_tag_file_name = dh.collated_root_output_directory + "AnalysisOutputLatexTimeSpan.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
# Cluster Time Spans
sw.basic_x_y_plot([pdds['FlatClusterStats_FlatCluster_FS_Mean0']], [pdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "PrimaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([sdds['FlatClusterStats_FlatCluster_FS_Mean0']], [sdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "SecondaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([2.0*np.hstack((pdds['FlatClusterStats_FlatCluster_N0_FS_SD'], pdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((pdds['FlatClusterStats_FlatCluster_FS_Mean0'], pdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "PrimaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
sw.basic_x_y_plot([2.0*np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "SecondaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
secondary_flat_cluster_means = np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))
secondary_flat_cluster_pre_limits = secondary_flat_cluster_means - 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
secondary_flat_cluster_post_limits = secondary_flat_cluster_means + 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
sw.normal_histo_plot([secondary_flat_cluster_post_limits], dh.clus_time_spans_dir + "LimitsOfFlatClustersForAngledClustersOnly" + suf, bins=20, histo_range=[0.0, 100.0], x_axis_label="ms", y_axis_label="Frequency", custom_x_tick_locators=[100.0, 10.0], custom_y_tick_locators=[10.0, 10.0], alpha=0.78, add_chi_squared_text=True)
time_threshold = cortical_onset + time_window_following_cortical_onset
num_before = np.sum(secondary_flat_cluster_post_limits < time_threshold)
num_after = np.sum(secondary_flat_cluster_post_limits > time_threshold)
percent_before = 100.0 * float(num_before) / float(num_after + num_before)
percent_before_string = "{:.{}f}".format(percent_before, 1)
data_part = percent_before_string + "\\%"
cluster_time_span_string = "As " + data_part + " of Stage 2 clusters extracted over 90ms following cortical activation onset lied within " + str(int(time_window_following_cortical_onset)) + "ms following onset (Supplementary Fig. 12), analysis was constrained to spikes in the first " + str(int(time_window_following_cortical_onset)) + "ms following activation onset. "
sw.append_new_tag(data_part, "ClusterTimeSpanSummaryNum", tex_tag_file_name)
sw.append_new_tag(cluster_time_span_string, "ClusterTimeSpanSummary", tex_tag_file_name)
def plot_p_value_histos(self, directory_holder, do_extra_plots=False):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
plot_all_lag_histograms = False
if (do_extra_plots):
plot_all_lag_histograms = True
tex_tag_file_name = dh.collated_root_output_directory + suf + "AnalysisOutputLatex.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
specific_prim_clus_corr_dir = dh.prim_clus_corr_dir + suf + "/"; sw.makedirs(specific_prim_clus_corr_dir)
specific_sec_clus_corr_dir = dh.sec_clus_corr_dir + suf + "/"; sw.makedirs(specific_sec_clus_corr_dir)
# Cluster Correlations Primary
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_ZoomHist", bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[30, 30], alpha=0.78, add_chi_squared_text=True)
flat_cluster_correlations_chi_squared_table_strings_array = sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_LowResHist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 100], alpha=0.78, add_chi_squared_text=True)
sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "LowRes_LowResCumHist", bins=20, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", add_chi_squared_text=True)
if ('FlatClusterStats_FlatCluster_LR_rsquared' in sdds.keys()):
# Cluster Correlations Secondary
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared'], sdds['FlatClusterStats_FlatCluster_LR_rvalue']], specific_sec_clus_corr_dir + "RVal_Hist", bins=40, histo_range=[-1.0, 1.0], x_axis_left_buffer=0.01, x_axis_label="$r$, $r^2$", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[50, 10], alpha=0.78)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared']], specific_sec_clus_corr_dir + "R^2_Hist", colors=['g'], bins=20, x_axis_left_buffer=0.01, x_axis_label="r^2-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20])
cluster_p_minus_unclustered_conj_p = np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])
num_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p < 0.0)
num_not_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p >= 0.0)
percent_improved_by_clustering = 100.0 * float(num_improved_by_clustering) / float(num_improved_by_clustering + num_not_improved_by_clustering)
percent_improved_by_clustering_string = "{:.{}f}".format(percent_improved_by_clustering, 1)
num_non_significant_before_clustering = np.sum(np.asarray(sdds['Unclustered_Conj_LR_pvalue']) > 0.05)
num_sdd_clusters = len(sdds['Unclustered_Conj_LR_pvalue'])
percent_non_significant_before_clustering = 100.0*(num_non_significant_before_clustering/num_sdd_clusters)
percent_non_significant_before_clustering_string = "{:.{}f}".format(percent_non_significant_before_clustering, 1)
sw.basic_x_y_plot([sdds['Unclustered_Conj_LR_pvalue']], [sdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_sec_clus_corr_dir + "NonConjPVal_Vs_ClusPVal", draw_y_equals_x=True, y_equals_x_max=1.0, x_axis_label='p-value', y_axis_label='p-value', scatter_point_color_groups=['b'], custom_x_tick_locators=[1.0, 0.2], dashes=(8, 2))
sw.normal_histo_plot([sdds['Unclustered_Conj_LR_pvalue']], specific_sec_clus_corr_dir + "ConjPVal_Vs_ClusPVal", bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
sw.normal_histo_plot([np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])], specific_sec_clus_corr_dir + "ClusPVal_Minus_ConjPVal_Hist", bins=21, histo_range=[-1.0, 0.05], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
# Cluster Differences Correlations
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_ZoomHist" + suf, bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[200, 200], alpha=0.78, add_chi_squared_text=True)
differences_chi_squared_table_strings_array = sw.cumulative_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_LowResHist" + suf, bins=20, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 20], alpha=0.78, add_chi_squared_text=True)
# Cluster Correlation Summary Latex
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])) + " Stage 1 clusters were extracted", "NumStage1ClustersFullString", tex_tag_file_name)
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])), "NumStage1ClustersData", tex_tag_file_name)
cluster_correlation_string0 = "Spike pairs within Stage 1 cluster ellipses were linearly correlated above chance levels (Fisher's method: " + flat_cluster_correlations_chi_squared_table_strings_array[0] + ")"
sw.append_new_tag(cluster_correlation_string0, "Stage1ClusterFisherFullString", tex_tag_file_name)
sw.append_new_tag(flat_cluster_correlations_chi_squared_table_strings_array[0], "Stage1ClusterFisherData", tex_tag_file_name)
cluster_correlation_string0p1 = "spike pair differences were correlated with the spike time of the first neuron in the pair for Stage 2 clusters (Fisher's method: " + differences_chi_squared_table_strings_array[0] + "; Fig. 3g), shows that correlations are not explained by a model of the form $s_1 = s_0 + d + independent\\_noise$ where $d$ is a fixed difference."
sw.append_new_tag(cluster_correlation_string0p1, "ClusterCorrelationSummary0p1", tex_tag_file_name)
num_greaterthan = np.sum(np.asarray(sdds['FlatClusterStats_FlatCluster_LR_rvalue']) > 0.0)
data_part = sw.percent_and_frac_string(num_greaterthan, self.final_angled_cluster_count)
cluster_correlation_string1 = data_part + " of Stage 2 clusters were positively correlated "
sw.append_new_tag(cluster_correlation_string1, "Stage2PositivelyCorrelatedFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "Stage2PositivelyCorrelatedNum", tex_tag_file_name)
cluster_correlation_string2 = percent_improved_by_clustering_string + "\\% (" + str(num_improved_by_clustering) + "/" + str(num_improved_by_clustering + num_not_improved_by_clustering) + ") of the Stage 2 clusters had correlations of higher significance than correlations calculated for all unclustered first spike pairs in the originating response distribution (Fig. 3h). Moreover, " + percent_non_significant_before_clustering_string + "\\% (" + str(num_non_significant_before_clustering) + '/' + str(num_sdd_clusters) + ") of the original response distributions from which Stage 2 clusters were extracted were not correlated significantly (p>0.05) (Fig. 3h). "
sw.append_new_tag(cluster_correlation_string2, "ClusterCorrelationSummary2", tex_tag_file_name)
angled_clusters_unique_pairs_summary_string = "A total of " + str(self.final_angled_cluster_count) + " unique Stage 2 clusters were extracted from " + str(self.did_contribute_atleast_one_final_angled_cluster_count) + " unique response distributions." #, confirming that there were no repeated or similar clusters."
sw.append_new_tag(angled_clusters_unique_pairs_summary_string, "AngledClustersUniquePairsSummary", tex_tag_file_name)
# Angle Comparisons
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_mean_angle']], [sdds["SelectivelyDifferencedBoxJenkins" + '_FA_angle_BS_mean']], dh.angle_analysis_directory + "BS_PCA_VS_SelectivelyDifferencedBoxJenkins_FA_Angles" + suf, draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Cluster Reliabilities
sw.plot_cluster_reliability_plots(sdds['PCA_ellipse_overall_reliability'], sdds['PCA_ellipse_conj_reliability'], dh.cluster_reliabilities_dir, suf)
analysis_dict_keys= ['Original', 'OriginalTestsPassed', "SelectivelyDifferenced", "SelectivelyDifferencedTestsPassedActuallyDifferenced", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferencedBoxJenkinsTestsPassed"]
if ('analysis_dict_member_keys' in sdds.keys()):
analysis_dict_member_keys = sdds['analysis_dict_member_keys']
for analysis_dict_key in analysis_dict_keys:
# Directories
specific_angle_analysis_dir = dh.angle_analysis_directory + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_angle_analysis_dir)
specific_nonstationarity_dir = dh.clus_non_stationarity_dir + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_nonstationarity_dir)
sharipo_normality_specific_nonstationarity_dir = specific_nonstationarity_dir + "SharipoNormality/"; sw.makedirs(sharipo_normality_specific_nonstationarity_dir)
KPSS_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "KPSSStationarity/"; sw.makedirs(KPSS_stationarity_specific_nonstationarity_dir)
ADF_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "ADFStationarity/"; sw.makedirs(ADF_stationarity_specific_nonstationarity_dir)
LR_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRStationarity/"; sw.makedirs(LR_specific_nonstationarity_dir)
HZ_specific_nonstationarity_dir = specific_nonstationarity_dir + "HZStationarity/"; sw.makedirs(HZ_specific_nonstationarity_dir)
bartlett_specific_nonstationarity_dir = specific_nonstationarity_dir + "BartlettSphericity/"; sw.makedirs(bartlett_specific_nonstationarity_dir)
specific_lag_pvals_nonstationary_dir = specific_nonstationarity_dir + "LagPVals/"; sw.makedirs(specific_lag_pvals_nonstationary_dir)
LR_correlation_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRCorrelation/"; sw.makedirs(LR_correlation_specific_nonstationarity_dir)
true_where_tests_not_passed_ORIGINAL = np.asarray(sdds['Original_tests_passed'])
num_tests_not_passed_ORIGINAL = np.sum(true_where_tests_not_passed_ORIGINAL == False)
if (analysis_dict_key in ["Original", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferenced"]):
num_for_type = np.sum(np.bitwise_not(np.asarray(sdds[analysis_dict_key + '_is_empty'])))
true_where_normal = np.asarray(sdds[analysis_dict_key + '_normal'])
num_normal = np.sum(true_where_normal)
where_normal = np.where(true_where_normal)
true_where_tests_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_passed = np.sum(true_where_tests_passed)
where_tests_passed = np.where(true_where_tests_passed)
true_where_tests_not_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_not_passed = np.sum(true_where_tests_not_passed == False)
true_where_tests_passed_and_normal = np.asarray(sdds[analysis_dict_key + '_tests_passed_and_normal'])
num_tests_passed_and_normal = np.sum(true_where_tests_passed_and_normal)
where_tests_passed_and_normal = np.where(true_where_tests_passed_and_normal)
true_where_correlated = np.asarray(sdds[analysis_dict_key + '_is_still_correlated'])
number_correlated = np.sum(true_where_correlated)
where_correlated = np.where(true_where_correlated)
true_where_tests_passed_and_correlated = np.logical_and(true_where_correlated, true_where_tests_passed)
num_tests_passed_and_correlated = np.sum(true_where_tests_passed_and_correlated)
where_tests_passed_and_correlated = np.where(true_where_tests_passed_and_correlated)
where_different_from_45 = np.logical_and(np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']), np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_0']))
num_different_from_45 = np.sum(where_different_from_45)
true_where_correlated_and_different_from_45 = np.logical_and(true_where_correlated, np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']))
num_correlated_and_different_from_45 = np.sum(true_where_correlated_and_different_from_45)
where_correlated_and_different_from_45 = np.where(true_where_correlated_and_different_from_45)
true_where_correlated_and_different_from_45_tests_passed = np.logical_and(true_where_correlated_and_different_from_45, true_where_tests_passed)
num_correlated_and_different_from_45_tests_passed = np.sum(true_where_correlated_and_different_from_45_tests_passed)
where_correlated_and_different_from_45_tests_passed = np.where(true_where_correlated_and_different_from_45_tests_passed)
true_where_correlated_and_different_from_45_tests_passed_and_normal = np.logical_and(true_where_correlated_and_different_from_45, true_where_tests_passed_and_normal)
num_correlated_and_different_from_45_tests_passed_and_normal = np.sum(true_where_correlated_and_different_from_45_tests_passed_and_normal)
where_correlated_and_different_from_45_tests_passed_and_normal = np.where(true_where_correlated_and_different_from_45_tests_passed_and_normal)
true_where_correlated_and_different_from_45_and_different_from_0 = np.logical_and(true_where_correlated_and_different_from_45, np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_0']))
num_correlated_and_different_from_45_and_different_from_0 = np.sum(true_where_correlated_and_different_from_45_and_different_from_0)
where_correlated_and_different_from_45_and_different_from_0 = np.where(true_where_correlated_and_different_from_45_and_different_from_0)
true_where_correlated_and_different_from_45_and_different_from_0_tests_passed = np.logical_and(true_where_correlated_and_different_from_45_and_different_from_0, true_where_tests_passed)
num_correlated_and_different_from_45_and_different_from_0_tests_passed = np.sum(true_where_correlated_and_different_from_45_and_different_from_0_tests_passed)
where_correlated_and_different_from_45_and_different_from_0_tests_passed = np.where(true_where_correlated_and_different_from_45_and_different_from_0_tests_passed)
true_where_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal = np.logical_and(true_where_correlated_and_different_from_45_and_different_from_0, true_where_tests_passed_and_normal)
num_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal = np.sum(true_where_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal)
where_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal = np.where(true_where_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal)
num_correlated_diferent_from_45_but_not_different_from_0 = num_correlated_and_different_from_45 - num_correlated_and_different_from_45_and_different_from_0
num_correlated_diferent_from_45_but_not_different_from_0_tests_passed = num_correlated_and_different_from_45_tests_passed - num_correlated_and_different_from_45_and_different_from_0_tests_passed
num_correlated_diferent_from_45_but_not_different_from_0_tests_passed_and_normal = num_correlated_and_different_from_45_tests_passed_and_normal - num_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal
data_part = sw.percent_and_frac_string(num_different_from_45, self.final_angled_cluster_count)
ps_0 = data_part + " of $\\theta_{45}$ angles were between and significantly different from $0^{\circ}$ and $45^{\circ}$ for " + analysis_dict_key
sw.append_new_tag(ps_0, analysis_dict_key + "AnglesDifferentFrom45FullText", tex_tag_file_name)
sw.append_new_tag(data_part, analysis_dict_key + "AnglesDifferentFrom45Num", tex_tag_file_name)
data_part = sw.percent_and_frac_string(num_correlated_and_different_from_45_and_different_from_0_tests_passed, num_tests_passed)
ps_1_1 = data_part + analysis_dict_key + " had $\\theta_{45}$ angles between and significantly different from $45^{\circ}$ (p<0.025) and $0^{\circ}$ (p<0.025). "
sw.append_new_tag(ps_1_1, analysis_dict_key + "TestsPassedAngleSummaryFullString", tex_tag_file_name)
sw.append_new_tag(data_part, analysis_dict_key + "TestsPassedAngleSummaryNum", tex_tag_file_name)
data_part = sw.percent_and_frac_string(num_for_type - num_normal, num_for_type)
ps_new_normality_str = "It is important to note that the Henze-Zirkler null hypothesis of normality was rejected (p < 0.05) for " + data_part + " " + analysis_dict_key
sw.append_new_tag(ps_new_normality_str, analysis_dict_key + "NormalityFullString", tex_tag_file_name)
sw.append_new_tag(data_part, analysis_dict_key + "NormalityNum", tex_tag_file_name)
data_part = sw.percent_and_frac_string(num_tests_passed - num_tests_passed_and_normal, num_tests_passed)
ps_new_tests_passed_normality_str = "It is important to note that the Henze-Zirkler null hypothesis (p < 0.05) of normality was rejected for " + data_part + " " + analysis_dict_key + "TestsPassed"
sw.append_new_tag(ps_new_tests_passed_normality_str, analysis_dict_key + "TestsPassedNormalityFullString", tex_tag_file_name)
sw.append_new_tag(data_part, analysis_dict_key + "TestsPassedNormalityNum", tex_tag_file_name)
if (analysis_dict_key == "Original"):
num_stage_2_clusters_string = str(self.final_angled_cluster_count) + " unique Stage 2 clusters were extracted"
sw.append_new_tag(num_stage_2_clusters_string, "NumStage2ClustersFullString", tex_tag_file_name)
sw.append_new_tag(str(self.final_angled_cluster_count), "NumStage2ClustersNum", tex_tag_file_name)
data_part = sw.percent_and_frac_string(number_correlated, self.final_angled_cluster_count)
ps_p1 = data_part + " of which remained correlated above chance (p < 0.005)."
sw.append_new_tag(ps_p1, "NumStage2ClustersCorrelatedFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "NumStage2ClustersCorrelatedNum", tex_tag_file_name)
ps_p2 = str(self.final_angled_cluster_count - number_correlated) + " Stage 2 clusters were not significantly correlated (p >= 0.005) and were removed from further analysis, leaving " + str(number_correlated) + " Stage 2 clusters."
sw.append_new_tag(ps_p2, "OriginalStage2Uncorrelated", tex_tag_file_name)
data_part = sw.percent_and_frac_string(num_tests_passed, self.final_angled_cluster_count)
ps_1_0 = data_part + " of Stage 2 clusters were determined as stationary "
sw.append_new_tag(ps_1_0, "OriginalTestsPassedSummaryFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "OriginalTestsPassedSummaryNum", tex_tag_file_name)
ps_1_1 = str(num_tests_passed_and_normal) + " stationary Stage 2 clusters were determined as normal (Henze-Zirkler p > 0.05), of which " + str(num_correlated_and_different_from_45_and_different_from_0_tests_passed_and_normal) + " had $\\theta_{45}$ angles between and significantly different from $45^{\circ}$ (p-value < 0.025) | |
inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true
`log p(observations[t] | observations[:t])`.
${non_markovian_specification_str}
#### Examples
**Tracking unknown position and velocity**: Let's consider tracking an object
moving in a one-dimensional space. We'll define a dynamical system
by specifying an `initial_state_prior`, a `transition_fn`,
and `observation_fn`.
The structure of the latent state space is determined by the prior
distribution. Here, we'll define a state space that includes the object's
current position and velocity:
```python
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Normal(loc=0., scale=1.),
'velocity': tfd.Normal(loc=0., scale=0.1)})
```
The `transition_fn` specifies the evolution of the system. It should
return a distribution over latent states of the same structure as the prior.
Here, we'll assume that the position evolves according to the velocity,
with a small random drift, and the velocity also changes slowly, following
a random drift:
```python
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=previous_state['position'] + previous_state['velocity'],
scale=0.1),
'velocity': tfd.Normal(loc=previous_state['velocity'], scale=0.01)})
```
The `observation_fn` specifies the process by which the system is observed
at each time step. Let's suppose we observe only a noisy version of the =
current position.
```python
def observation_fn(_, state):
return tfd.Normal(loc=state['position'], scale=0.1)
```
Now let's track our object. Suppose we've been given observations
corresponding to an initial position of `0.4` and constant velocity of `0.01`:
```python
# Generate simulated observations.
observed_positions = tfd.Normal(loc=tf.linspace(0.4, 0.8, 0.01),
scale=0.1).sample()
# Run particle filtering to sample plausible trajectories.
(trajectories, # {'position': [40, 1000], 'velocity': [40, 1000]}
lps) = tfp.experimental.mcmc.infer_trajectories(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=1000)
```
For all `i`, `trajectories['position'][:, i]` is a sample from the
posterior over position sequences, given the observations:
`p(state[0:T] | observations[0:T])`. Often, the sampled trajectories
will be highly redundant in their earlier timesteps, because most
of the initial particles have been discarded through resampling
(this problem is known as 'particle degeneracy'; see section 3.5 of
[Doucet and Johansen][1]).
In such cases it may be useful to also consider the series of *filtering*
distributions `p(state[t] | observations[:t])`, in which each latent state
is inferred conditioned only on observations up to that point in time; these
may be computed using `tfp.mcmc.experimental.particle_filter`.
#### References
[1] <NAME> and <NAME>. A tutorial on particle
filtering and smoothing: Fifteen years later.
_Handbook of nonlinear filtering_, 12(656-704), 2009.
https://www.stats.ox.ac.uk/~doucet/doucet_johansen_tutorialPF2011.pdf
"""
with tf.name_scope(name or 'infer_trajectories') as name:
seed = SeedStream(seed, 'infer_trajectories')
(particles,
log_weights,
parent_indices,
incremental_log_marginal_likelihoods) = particle_filter(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
initial_state_proposal=initial_state_proposal,
proposal_fn=proposal_fn,
resample_criterion_fn=resample_criterion_fn,
rejuvenation_kernel_fn=rejuvenation_kernel_fn,
num_transitions_per_observation=num_transitions_per_observation,
num_steps_state_history_to_pass=num_steps_state_history_to_pass,
num_steps_observation_history_to_pass=(
num_steps_observation_history_to_pass),
trace_fn=_default_trace_fn,
seed=seed,
name=name)
weighted_trajectories = reconstruct_trajectories(particles, parent_indices)
# Resample all steps of the trajectories using the final weights.
resample_indices = categorical.Categorical(
dist_util.move_dimension(
log_weights[-1, ...],
source_idx=0,
dest_idx=-1)).sample(num_particles, seed=seed)
trajectories = tf.nest.map_structure(
lambda x: _batch_gather(x, resample_indices, axis=1),
weighted_trajectories)
return trajectories, incremental_log_marginal_likelihoods
@docstring_util.expand_docstring(
particle_filter_arg_str=particle_filter_arg_str,
non_markovian_specification_str=non_markovian_specification_str)
def particle_filter(observations,
initial_state_prior,
transition_fn,
observation_fn,
num_particles,
initial_state_proposal=None,
proposal_fn=None,
resample_criterion_fn=ess_below_threshold,
rejuvenation_kernel_fn=None, # TODO(davmre): not yet supported. pylint: disable=unused-argument
num_transitions_per_observation=1,
num_steps_state_history_to_pass=None,
num_steps_observation_history_to_pass=None,
trace_fn=_default_trace_fn,
step_indices_to_trace=None,
seed=None,
name=None): # pylint: disable=g-doc-args
"""Samples a series of particles representing filtered latent states.
The particle filter samples from the sequence of "filtering" distributions
`p(state[t] | observations[:t])` over latent
states: at each point in time, this is the distribution conditioned on all
observations *up to that time*. Because particles may be resampled, a particle
at time `t` may be different from the particle with the same index at time
`t + 1`. To reconstruct trajectories by tracing back through the resampling
process, see `tfp.mcmc.experimental.reconstruct_trajectories`.
${particle_filter_arg_str}
trace_fn: Python `callable` defining the values to be traced at each step.
It takes a `ParticleFilterStepResults` tuple and returns a structure of
`Tensor`s. The default function returns
`(particles, log_weights, parent_indices, step_log_likelihood)`.
step_indices_to_trace: optional `int` `Tensor` listing, in increasing order,
the indices of steps at which to record the values traced by `trace_fn`.
If `None`, the default behavior is to trace at every timestep,
equivalent to specifying `step_indices_to_trace=tf.range(num_timsteps)`.
seed: Python `int` seed for random ops.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'particle_filter'`).
Returns:
particles: a (structure of) Tensor(s) matching the latent state, each
of shape
`concat([[num_timesteps, num_particles, b1, ..., bN], event_shape])`,
representing (possibly weighted) samples from the series of filtering
distributions `p(latent_states[t] | observations[:t])`.
log_weights: `float` `Tensor` of shape
`[num_timesteps, num_particles, b1, ..., bN]`, such that
`log_weights[t, :]` are the logarithms of normalized importance weights
(such that `exp(reduce_logsumexp(log_weights), axis=-1) == 1.`) of
the particles at time `t`. These may be used in conjunction with
`particles` to compute expectations under the series of filtering
distributions.
parent_indices: `int` `Tensor` of shape
`[num_timesteps, num_particles, b1, ..., bN]`,
such that `parent_indices[t, k]` gives the index of the particle at
time `t - 1` that the `k`th particle at time `t` is immediately descended
from. See also
`tfp.experimental.mcmc.reconstruct_trajectories`.
incremental_log_marginal_likelihoods: float `Tensor` of shape
`[num_observation_steps, b1, ..., bN]`,
giving the natural logarithm of an unbiased estimate of
`p(observations[t] | observations[:t])` at each observed timestep `t`.
Note that (by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true
`log p(observations[t] | observations[:t])`.
${non_markovian_specification_str}
"""
seed = SeedStream(seed, 'particle_filter')
with tf.name_scope(name or 'particle_filter'):
num_observation_steps = ps.size0(tf.nest.flatten(observations)[0])
num_timesteps = (
1 + num_transitions_per_observation * (num_observation_steps - 1))
# If no criterion is specified, default is to resample at every step.
if not resample_criterion_fn:
resample_criterion_fn = lambda _: True
# Canonicalize the list of steps to trace as a rank-1 tensor of (sorted)
# positive integers. E.g., `3` -> `[3]`, `[-2, -1]` -> `[N - 2, N - 1]`.
if step_indices_to_trace is not None:
(step_indices_to_trace,
traced_steps_have_rank_zero) = _canonicalize_steps_to_trace(
step_indices_to_trace, num_timesteps)
# Dress up the prior and prior proposal as a fake `transition_fn` and
# `proposal_fn` respectively.
prior_fn = lambda _1, _2: SampleParticles( # pylint: disable=g-long-lambda
initial_state_prior, num_particles)
prior_proposal_fn = (
None if initial_state_proposal is None
else lambda _1, _2: SampleParticles( # pylint: disable=g-long-lambda
initial_state_proposal, num_particles))
# Initially the particles all have the same weight, `1. / num_particles`.
broadcast_batch_shape = tf.convert_to_tensor(
functools.reduce(
ps.broadcast_shape,
tf.nest.flatten(initial_state_prior.batch_shape_tensor()),
[]), dtype=tf.int32)
log_uniform_weights = ps.zeros(
ps.concat([
[num_particles],
broadcast_batch_shape], axis=0),
dtype=tf.float32) - ps.log(num_particles)
# Initialize from the prior and incorporate the first observation.
dummy_previous_step = ParticleFilterStepResults(
particles=prior_fn(0, []).sample(),
log_weights=log_uniform_weights,
parent_indices=None,
incremental_log_marginal_likelihood=0.,
accumulated_log_marginal_likelihood=0.)
initial_step_results = _filter_one_step(
step=0,
# `previous_particles` at the first step is a dummy quantity, used only
# to convey state structure and num_particles to an optional
# proposal fn.
previous_step_results=dummy_previous_step,
observation=tf.nest.map_structure(
lambda x: tf.gather(x, 0), observations),
transition_fn=prior_fn,
observation_fn=observation_fn,
proposal_fn=prior_proposal_fn,
resample_criterion_fn=resample_criterion_fn,
seed=seed)
def _loop_body(step,
previous_step_results,
accumulated_traced_results,
state_history,
num_steps_traced):
"""Take one step in dynamics and accumulate marginal likelihood."""
step_has_observation = (
# The second of these conditions subsumes the first, but both are
# useful because the first can often be evaluated statically.
ps.equal(num_transitions_per_observation, 1) |
ps.equal(step % num_transitions_per_observation, 0))
observation_idx = step // num_transitions_per_observation
current_observation = tf.nest.map_structure(
lambda x, step=step: tf.gather(x, observation_idx), observations)
history_to_pass_into_fns = {}
if num_steps_observation_history_to_pass:
history_to_pass_into_fns['observation_history'] = _gather_history(
observations,
observation_idx,
num_steps_observation_history_to_pass)
if num_steps_state_history_to_pass:
history_to_pass_into_fns['state_history'] = state_history
new_step_results = _filter_one_step(
step=step,
previous_step_results=previous_step_results,
observation=current_observation,
transition_fn=functools.partial(
transition_fn, **history_to_pass_into_fns),
observation_fn=functools.partial(
observation_fn, **history_to_pass_into_fns),
proposal_fn=(
None if proposal_fn is None else
functools.partial(proposal_fn, **history_to_pass_into_fns)),
resample_criterion_fn=resample_criterion_fn,
has_observation=step_has_observation,
seed=seed)
return _update_loop_variables(
step=step,
current_step_results=new_step_results,
accumulated_traced_results=accumulated_traced_results,
state_history=state_history,
trace_fn=trace_fn,
step_indices_to_trace=step_indices_to_trace,
num_steps_traced=num_steps_traced)
loop_results = tf.while_loop(
cond=lambda step, *_: step < num_timesteps,
body=_loop_body,
loop_vars=_initialize_loop_variables(
initial_step_results=initial_step_results,
num_timesteps=num_timesteps,
num_steps_state_history_to_pass=num_steps_state_history_to_pass,
trace_fn=trace_fn,
step_indices_to_trace=step_indices_to_trace))
results = tf.nest.map_structure(lambda ta: ta.stack(),
loop_results.accumulated_traced_results)
if step_indices_to_trace is not None:
# If we were passed a rank-0 (single scalar) step to trace, don't
# return a time axis in the returned results.
results = ps.cond(
traced_steps_have_rank_zero,
lambda: tf.nest.map_structure(lambda x: x[0, ...], results),
lambda: results)
return results
def _canonicalize_steps_to_trace(step_indices_to_trace, num_timesteps):
"""Canonicalizes `3` -> `[3]`, `[-2, -1]` -> `[N - 2, N - 1]`, etc."""
step_indices_to_trace = tf.convert_to_tensor(
step_indices_to_trace, dtype_hint=tf.int32)
traced_steps_have_rank_zero = ps.equal(
ps.rank_from_shape(ps.shape(step_indices_to_trace)), 0)
# Canonicalize negative step indices as positive.
step_indices_to_trace = ps.where(step_indices_to_trace < 0,
num_timesteps + step_indices_to_trace,
step_indices_to_trace)
# Canonicalize scalars as length-one vectors.
return (ps.reshape(step_indices_to_trace, [ps.size(step_indices_to_trace)]),
traced_steps_have_rank_zero)
def _initialize_loop_variables(initial_step_results,
num_timesteps,
num_steps_state_history_to_pass,
trace_fn,
step_indices_to_trace):
"""Initialize arrays and other quantities passed through the filter loop."""
# Create arrays to store traced values (particles, likelihoods, etc).
num_steps_to_trace = (num_timesteps
if step_indices_to_trace is None
else ps.size0(step_indices_to_trace))
traced_results = trace_fn(initial_step_results)
trace_arrays = tf.nest.map_structure(
lambda | |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2018 <NAME> <<EMAIL>> and collaborators.
# Licensed under the MIT License.
"""Model data with least-squares fitting
This module provides tools for fitting models to data using least-squares
optimization.
"""
from __future__ import absolute_import, division, print_function
__all__ = 'ModelBase Model ComposedModel PolynomialModel ScaleModel'.split()
import numpy as np
try:
# numpy 1.7
import numpy.polynomial.polynomial as npoly
except ImportError:
import numpy.polynomial as npoly
from six import get_function_code
from six.moves import range, reduce
from . import binary_type, text_type
class Parameter(object):
"""Information about a parameter in a least-squares model.
These data may only be obtained after solving least-squares problem. These
objects reference information from their parent objects, so changing the
parent will alter the apparent contents of these objects.
"""
def __init__(self, owner, index):
self._owner = owner
self._index = index
def __repr__(self):
return '<Parameter "%s" (#%d) of %s>' % (self.name, self._index, self._owner)
@property
def index(self): # make this read-only
"The parameter's index in the Model's arrays."
return self._index
@property
def name(self):
"The parameter's name."
return self._owner.pnames[self._index]
@property
def value(self):
"The parameter's value."
return self._owner.params[self._index]
@property
def uncert(self):
"The uncertainty in :attr:`value`."
return self._owner.puncerts[self._index]
@property
def uval(self):
"Accesses :attr:`value` and :attr:`uncert` as a :class:`pwkit.msmt.Uval`."
from .msmt import Uval
return Uval.from_norm(self.value, self.uncert)
class ModelBase(object):
"""An abstract base class holding data and a model for least-squares fitting.
The models implemented in this module all derive from this class and so
inherit the attributes and methods described below.
A :class:`Parameter` data structure may be obtained by indexing this
object with either the parameter's numerical index or its name. I.e.::
m = Model(...).solve(...)
p = m['slope']
print(p.name, p.value, p.uncert, p.uval)
"""
data = None
"The data to be modeled; an *n*-dimensional Numpy array."
invsigma = None
"Data weights: 1/σ for each data point."
params = None
"After fitting, a Numpy ndarray of solved model parameters."
puncerts = None
"After fitting, a Numpy ndarray of 1σ uncertainties on the model parameters."
pnames = None
"A list of textual names for the parameters."
covar = None
"""After fitting, the variance-covariance matrix representing the parameter
uncertainties.
"""
mfunc = None
"""After fitting, a callable function evaluating the model fixed at best params.
The resulting function may or may not take arguments depending on the particular
kind of model being evaluated.
"""
mdata = None
"After fitting, the modeled data at the best parameters."
chisq = None
"After fitting, the χ² of the fit."
rchisq = None
"After fitting, the reduced χ² of the fit, or None if there are no degrees of freedom."
resids = None
"After fitting, the residuals: ``resids = data - mdata``."
def __init__(self, data, invsigma=None):
self.set_data(data, invsigma)
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self
def print_soln(self):
"""Print information about the model solution."""
lmax = reduce(max,(len(x) for x in self.pnames), len('r chi sq'))
if self.puncerts is None:
for pn, val in zip(self.pnames, self.params):
print('%s: %14g' % (pn.rjust(lmax), val))
else:
for pn, val, err in zip(self.pnames, self.params, self.puncerts):
frac = abs(100. * err / val)
print('%s: %14g +/- %14g (%.2f%%)' % (pn.rjust(lmax), val, err, frac))
if self.rchisq is not None:
print('%s: %14g' % ('r chi sq'.rjust(lmax), self.rchisq))
elif self.chisq is not None:
print('%s: %14g' % ('chi sq'.rjust(lmax), self.chisq))
else:
print('%s: unknown/undefined' % ('r chi sq'.rjust(lmax)))
return self
def make_frozen_func(self, params):
"""Return a data-generating model function frozen at the specified parameters.
As with the :attr:`mfunc` attribute, the resulting function may or may
not take arguments depending on the particular kind of model being
evaluated.
"""
raise NotImplementedError()
def __getitem__(self, key):
if isinstance(key, binary_type):
# If you're not using the unicode_literals __future__, things get
# annoying really quickly without this.
key = text_type(key)
if isinstance(key, int):
idx = key
if idx < 0 or idx >= len(self.pnames):
raise ValueError('illegal parameter number %d' % key)
elif isinstance(key, text_type):
try:
idx = self.pnames.index(key)
except ValueError:
raise ValueError('no such parameter named "%s"' % key)
else:
raise ValueError('illegal parameter key %r' % key)
return Parameter(self, idx)
def plot(self, modelx, dlines=False, xmin=None, xmax=None,
ymin=None, ymax=None, **kwargs):
"""Plot the data and model (requires `omega`).
This assumes that `data` is 1D and that `mfunc` takes one argument
that should be treated as the X variable.
"""
import omega as om
modelx = np.asarray(modelx)
if modelx.shape != self.data.shape:
raise ValueError('modelx and data arrays must have same shape')
modely = self.mfunc(modelx)
sigmas = self.invsigma**-1 # TODO: handle invsigma = 0
vb = om.layout.VBox(2)
vb.pData = om.quickXYErr(modelx, self.data, sigmas,
'Data', lines=dlines, **kwargs)
vb[0] = vb.pData
vb[0].addXY(modelx, modely, 'Model')
vb[0].setYLabel('Y')
vb[0].rebound(False, True)
vb[0].setBounds(xmin, xmax, ymin, ymax)
vb[1] = vb.pResid = om.RectPlot()
vb[1].defaultField.xaxis = vb[1].defaultField.xaxis
vb[1].addXYErr(modelx, self.resids, sigmas, None, lines=False)
vb[1].setLabels('X', 'Residuals')
vb[1].rebound(False, True)
# ignore Y values since residuals are on different scale:
vb[1].setBounds(xmin, xmax)
vb.setWeight(0, 3)
return vb
def show_cov(self):
"Show the parameter covariance matrix with `pwkit.ndshow_gtk3`."
# would be nice: labels with parameter names (hard because this is
# ndshow, not omegaplot)
from .ndshow_gtk3 import view
view(self.covar, title='Covariance Matrix')
def show_corr(self):
"Show the parameter correlation matrix with `pwkit.ndshow_gtk3`."
from .ndshow_gtk3 import view
d = np.diag(self.covar) ** -0.5
corr = self.covar * d[np.newaxis,:] * d[:,np.newaxis]
view(corr, title='Correlation Matrix')
class Model(ModelBase):
"""Models data with a generic nonlinear optimizer
Basic usage is::
def func(p1, p2, x):
simulated_data = p1 * x + p2
return simulated_data
x = [1, 2, 3]
data = [10, 14, 15.8]
mdl = Model(func, data, args=(x,)).solve(guess).print_soln()
The :class:`Model` constructor can take an optional argument ``invsigma``
after ``data``; it specifies *inverse sigmas*, **not** inverse *variances*
(the usual statistical weights), for the data points. Since most
applications deal in sigmas, take care to write::
m = Model(func, data, 1. / uncerts) # right!
not::
m = Model(func, data, uncerts) # WRONG
If you have zero uncertainty on a measurement, you must wind a way to
express that constraint without including that measurement as part of the
``data`` vector.
"""
lm_prob = None
"""A :class:`pwkit.lmmin.Problem` instance describing the problem to be solved.
After setting up the data-generating function, you can access this item to
tune the solver.
"""
def __init__(self, simple_func, data, invsigma=None, args=()):
if simple_func is not None:
self.set_simple_func(simple_func, args)
if data is not None:
self.set_data(data, invsigma)
def set_func(self, func, pnames, args=()):
"""Set the model function to use an efficient but tedious calling convention.
The function should obey the following convention::
def func(param_vec, *args):
modeled_data = { do something using param_vec }
return modeled_data
This function creates the :class:`pwkit.lmmin.Problem` so that the
caller can futz with it before calling :meth:`solve`, if so desired.
Returns *self*.
"""
from .lmmin import Problem
self.func = func
self._args = args
self.pnames = list(pnames)
self.lm_prob = Problem(len(self.pnames))
return self
def set_simple_func(self, func, args=()):
"""Set the model function to use a simple but somewhat inefficient calling
convention.
The function should obey the following convention::
def func(param0, param1, ..., paramN, *args):
modeled_data = { do something using the parameters }
return modeled_data
Returns *self*.
"""
code = get_function_code(func)
npar = code.co_argcount - len(args)
pnames = code.co_varnames[:npar]
def wrapper(params, *args):
return func(*(tuple(params) + args))
return self.set_func(wrapper, pnames, args)
def make_frozen_func(self, params):
"""Returns a model function frozen to the specified parameter values.
Any remaining arguments are left free and must be provided when the
function is called.
For this model, the returned function is the application of
:func:`functools.partial` to the :attr:`func` property of this object.
"""
params = np.array(params, dtype=np.float, ndmin=1)
from functools import partial
return partial(self.func, params)
def solve(self, guess):
"""Solve for the parameters, using an initial guess.
This uses the Levenberg-Marquardt optimizer described in
:mod:`pwkit.lmmin`.
Returns *self*.
"""
guess = np.array(guess, dtype=np.float, ndmin=1)
f = self.func
args = self._args
def lmfunc(params, vec):
vec[:] = f(params, *args).flatten()
self.lm_prob.set_residual_func(self.data.flatten(),
self.invsigma.flatten(),
lmfunc, None)
self.lm_soln | |
Adjustment Method: SS-S by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_S_adjustment(item.copy())
print ("SS-S: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-S' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('SS-S' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ********************************************** #
# Site Specific Simple + Filter Adjustment (SS-SF)
if method != 'SS-SF':
pass
elif method == 'SS-SF' and adjustments_metadata['SS-SF'] == False:
pass
else:
print('Applying Adjustment Method: SS-SF')
logger.info('Applying Adjustment Method: SS-SF')
# inputdata_adj, lm_adj, m, c = perform_SS_SF_adjustment(inputdata.copy())
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(inputdata.copy())
print("SS-SF: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-SF'
adjustment_name = 'SS_SF'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-SF by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-SF by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item[primary_idx].copy())
print("SS-SF: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-SF by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-SF by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item.copy())
print ("SS-SF: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD,
ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-SF by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-SF by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item.copy())
print ("SS-SF: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane,
ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************ #
# Site Specific Simple Adjustment (SS-SS) combining stability classes adjusted differently
if method != 'SS-SS':
pass
elif method == 'SS-SS' and adjustments_metadata['SS-SS'] == False:
pass
elif RSDtype['Selection'][0:4] != 'Wind' and 'ZX' not in RSDtype['Selection']:
pass
else:
print('Applying Adjustment Method: SS-SS')
logger.info('Applying Adjustment Method: SS-SS')
inputdata_adj, lm_adj, m, c = perform_SS_SS_adjustment(inputdata.copy(),All_class_data,primary_idx)
print("SS-SS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-SS'
adjustment_name = 'SS_SS'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-SS by stability class (TKE). SAME as Baseline')
logger.info('Applying Adjustment Method: SS-SS by stability class (TKE). SAME as Baseline')
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
print("SS-SS: y = " + str(m) + " * x + " + str(c))
adjustment_name = str('SS_SS' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-SS by stability class Alpha w/ RSD. SAEM as Baseline')
logger.info('Applying Adjustment Method: SS-SS by stability class Alpha w/ RSD. SAEM as Baseline')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
print ("SS-SS: y = " + str(m) + "* x +" + str(c))
adjustment_name = str('SS_SS' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-SS by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-SS by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
print ("SS-SS: y = " + str(m) + "* x +" + str(c))
emptyclassFlag = False
adjustment_name = str('SS_SS' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ******************************************* #
# Site Specific WindSpeed Adjustment (SS-WS)
if method != 'SS-WS':
pass
elif method == 'SS-WS' and adjustments_metadata['SS-WS'] == False:
pass
else:
print('Applying Adjustment Method: SS-WS')
logger.info('Applying Adjustment Method: SS-WS')
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(inputdata.copy())
print("SS-WS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-WS'
adjustment_name = 'SS_WS'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-WS by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-WS by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item[primary_idx].copy())
print("SS-WS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-WS by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-WS by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item.copy())
print ("SS-WS: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-WS by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-WS by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item.copy())
print ("SS-WS: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_WS' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ******************************************* #
# Site Specific Comprehensive Adjustment (SS-WS-Std)
if method != 'SS-WS-Std':
pass
elif method == 'SS-WS-Std' and adjustments_metadata['SS-WS-Std'] == False:
pass
else:
print('Applying Adjustment Method: SS-WS-Std')
logger.info('Applying Adjustment Method: SS-WS-Std')
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(inputdata.copy())
print("SS-WS-Std: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-WS-Std'
adjustment_name = 'SS_WS_Std'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-WS-Std by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item[primary_idx].copy())
print("SS-WS-Std: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS_Std' + | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Hash Code 2017
"""
import os, sys
from pprint import pprint
import argparse
import time
import datetime
import pandas as pd
from pymongo import MongoClient
import numpy as np
from math import cos, sin
from collections import defaultdict, deque
import copy
import matplotlib.pyplot as plt
from sets import Set
def structure_inputs(lines):
## NETWORK
## 5 videos, 2 endpoints, 4 request descriptions, 3 caches 100MB each.
network = lines.pop(0).split(" ")
nb_videos = int(network[0])
nb_endpoints = int(network[1])
nb_requestsDesc = int(network[2])
nb_caches = int(network[3])
nb_MBperCache = int(network[4])
print "nb_videos:", nb_videos
print "nb_endpoints:", nb_endpoints
print "nb_requestsDesc:", nb_requestsDesc
print "nb_caches:", nb_caches
print "nb_MBperCache:", nb_MBperCache
## VIDEOS
video_sizes_row = lines.pop(0)
video_sizes = [int(s) for s in video_sizes_row.split(" ")]
print("\nVideos Sizes")
pprint(video_sizes)
## ENDPOINTS (with caches)
endpoints = []
endpoints_caches = np.ones((nb_endpoints, nb_caches), dtype=np.int) * -1
for e in xrange(nb_endpoints):
infosE = lines.pop(0).split(" ")
endpoints.append({})
endpoints[e]['id'] = e
endpoints[e]['Ld'] = int(infosE[0])
endpoints[e]['nb_caches'] = int(infosE[1])
endpoints[e]['caches'] = []
for c in xrange(endpoints[e]['nb_caches']):
infosEC = lines.pop(0).split(" ")
cacheID = int(infosEC[0])
cacheLatency = int(infosEC[1])
endpoints[e]['caches'].append(cacheID)
endpoints_caches[e][cacheID] = cacheLatency
print("\nEndpoints")
pprint(endpoints)
print("\nEndpoints x Caches latency matrix")
print endpoints_caches
## ENDPOINTS improved: re-order caches of endpoints from fastest to slowest
for e in xrange(nb_endpoints):
cacheIDs = np.array(copy.copy(endpoints[e]['caches']))
cacheLatencies = []
for c in cacheIDs:
cacheLatencies.append(endpoints_caches[e][c])
idx_caches_ranked = np.argsort(cacheLatencies)
endpoints[e]['caches'] = cacheIDs[idx_caches_ranked]
print("\nEndpoints with caches ranked by their latencies")
pprint(endpoints)
## CACHES (serving a list of endpoints)
caches = []
caches = [{'id':i,
#'videos': [],
'endpoints':[]} for i in xrange(nb_caches)]
for e in endpoints:
for cID in e['caches']:
caches[cID]['endpoints'].append(e['id'])
print("\nCaches (each element has its list of endpoints)")
pprint(caches)
## CACHES: re-ordered so that the most connected caches appear first
caches_reordered = []
while len(caches)>0:
mxEndpts = -1
idMxC = -1
for i,c in enumerate(caches):
if len(c['endpoints']) > mxEndpts:
idMxC = i
mxEndpts = len(c['endpoints'])
caches_reordered.append(caches[idMxC])
del caches[idMxC]
caches = np.copy(caches_reordered)
print("\nCaches re-ordered")
pprint(caches)
mostConnected_caches = [c['id'] for c in caches]
print "mostConnected_caches: ", mostConnected_caches
## REQUEST DESCRIPTIONS BY ENDPOINTS PER VIDEOS
videos = []
videos_endpoints = np.zeros((nb_videos, nb_endpoints), dtype=np.int)
for r in xrange(nb_requestsDesc):
infosR = lines.pop(0).split(" ")
videoID = int(infosR[0])
endpointID = int(infosR[1])
nb_req = int(infosR[2])
videos_endpoints[videoID][endpointID] = nb_req
print("\nVideos x Endpoints: requests matrix")
print videos_endpoints
videos_sumEndpoints = np.sum(videos_endpoints, axis=1)
print("\nVideos requests summed over endpoints: requests vector")
print videos_sumEndpoints
print('\nsort videos per "popularity" = nb. of requests over all endpoints')
print("Indices of Videos ranked per total nb. of requests")
print("=What are the videos that requires caching??")
idx_videos_ranked = np.argsort(-videos_sumEndpoints)
print idx_videos_ranked
print("\nVideos requests ranked per total nb. of requests")
videos_ranked = videos_sumEndpoints[idx_videos_ranked]
print videos_ranked
# First videos whose nb. request not null
print("\nVideos ranked per total nb. of requests whose requests are not null")
videos_ranked_not_null = videos_ranked[np.where(videos_ranked > 0)]
print videos_ranked_not_null
nb_videos_ranked_not_null = len(videos_ranked_not_null)
print(nb_videos_ranked_not_null, "videos")
# sorted videos x endpoints matrix
print("\nVideos ranked per total nb. of requests (and not null) x endpoints")
videos_endpoints_ranked = videos_endpoints[idx_videos_ranked]
idx_videos_ranked_not_null = idx_videos_ranked[:nb_videos_ranked_not_null]
videos_endpoints_ranked_not_null = videos_endpoints_ranked[:nb_videos_ranked_not_null]
pprint(videos_endpoints_ranked_not_null)
## update endpoints with a list of videoIDs ranked by requests
endpoints_videos = np.transpose(videos_endpoints_ranked[:nb_videos_ranked_not_null])
for e in xrange(nb_endpoints):
endpoints[e]['videos'] = []
#print "rqs:", endpoints_videos[e]
for iv,r in enumerate(endpoints_videos[e]):
if r != 0:
v = idx_videos_ranked[iv]
#print r, iv ,v
endpoints[e]['videos'].append(v)
print("\nEndpoints now with videos ranked by nb.requests")
pprint(endpoints)
return (nb_videos, video_sizes, nb_endpoints, nb_requestsDesc, nb_caches, nb_MBperCache,
endpoints, endpoints_caches, mostConnected_caches, videos_endpoints,
nb_videos_ranked_not_null, idx_videos_ranked)
def get_score(videoIDs_in_caches,
videos_endpoints, endpoints, endpoints_caches):
""" in the form:
[[videoID_10, videoID_22], # videos in cache #0
[videoID_03, videoID_34], # videos in cache #1
...}
"""
endpoints_videos = np.transpose(videos_endpoints)
tot_time_saved = 0
score = 0
tot_req = 0
for e, endp in enumerate(endpoints):
cacheIDs = endpoints[e]["caches"]
ld = endpoints[e]["Ld"]
lc_arr = endpoints_caches[e]
videosInDemand = copy.deepcopy(endpoints[e]['videos'])
videosRequests = endpoints_videos[e]
for v in videosInDemand:
tot_req += videosRequests[v]
## Checking if any video added to the cache are used by the endpoint (only once by the fastest cache server)
for c in cacheIDs:
for v in videoIDs_in_caches[c]:
if v in videosInDemand:
idx_v = videosInDemand.index(v)
#print "\nvideo #{v} in cache #{c} and used by endpoint #{e}".format(v=v, c=c, e=e)
#print "ld-lc=", ld-lc_arr[c]
time_saved = (ld-lc_arr[c]) * videosRequests[v]
tot_time_saved += time_saved
#print "time saved : {: 8d}".format(time_saved)
#print "total time_saved: {: 8d}".format(tot_time_saved)
#print "nb. requests: ", videosRequests[v]
del videosInDemand[idx_v]
#print "videos in demand", videosInDemand
break
#print "tot_req.=", tot_req
if tot_req == 0:
score = 0
else:
score = int(np.floor(tot_time_saved * 1000. / tot_req))
return score
def check_video_subset_fit_in_caches(videoIDs_in_caches, video_sizes, nb_MBperCache):
mem_used = np.zeros(len(videoIDs_in_caches,), dtype=np.int)
for cID, lst_v in enumerate(videoIDs_in_caches):
for vID in lst_v:
mem_used[cID] += video_sizes[vID]
if mem_used[cID] > nb_MBperCache:
return False
#print "\nPercentage of memory used in each cache"
#pprint(mem_used*100./nb_MBperCache)
return True
def writing_videos_in_caches(videoIDs_in_caches, outFile="test.out"):
#pprint(videoIDs_in_caches)
nb_caches = len(videoIDs_in_caches)
with open(outFile,'w') as o:
o.write(str(nb_caches)+'\n')
for c in videoIDs_in_caches:
o.write(" ".join(str(i) for i in c) +'\n')
def cut_the_crap(scoresDelta, factor):
if len(scoresDelta) < 2:
return True
elif scoresDelta[-1] > 1.0*factor*scoresDelta[1]:
return True
else:
return False
def solve_with_common_sense(endpoints, videos_endpoints, idx_videos_ranked, nb_videos_ranked_not_null,
video_sizes,nb_MBperCache, nb_caches, endpoints_caches, mostConnected_caches,
outFile=outFile):
"""
Base on the shape of the matrix videos_endpoints_ranked showing [videos,endpoints] ranked along video axis
according to the highest total number of requests.
Pseudocode:
1. from a COPY of videos_endpoints_ranked, get stats on the distribution of requests,
define a minimum size of requests to consider: smallestNbReq based on quantile
2. consider dispatching videos only if #req is larger than the minimum
3. select one video to add to one cache based on:
* the most "popular" video which has highest #tot_reqs
* select the fastest cache from the endpoints having most req for that video
4. add the video to endpoints[e]['videosInMyCache']
5. recompute the matrix "videos_endpoints_ranked_not_null" after annealing
push(v in e[fastest_cache]) IF-AND-ONLY-IF v not in e['videosAlreadyInOneOfMyCaches']
"""
videoIDs_in_caches = []
[videoIDs_in_caches.append([]) for c in xrange(nb_caches)]
videos_endpoints_ranked_not_null = np.copy(videos_endpoints[idx_videos_ranked][:nb_videos_ranked_not_null])
df = pd.DataFrame(videos_endpoints_ranked_not_null)
df_req_notNull = df[df>0]
mean = df_req_notNull.mean(axis=0,skipna=True).mean(axis=0,skipna=True)
std = df_req_notNull.std(axis=0,skipna=True).mean(axis=0,skipna=True)
## 68% of requests are in mean +/- 1*std
## 95% of requests are in mean +/- 2*std
## 99% of requests are in mean +/- 3*std
print "mean",mean
print "std",std
# ## Consider gradually more videos to dispatch based on the "popularity"
# ### init
# videoIDs_in_caches = []
# isFittingCacheSize = True
# quantile = 0.4
# pprint(df_req_notNull.quantile(q=quantile, axis=1, numeric_only=True))
# pprint(df_req_notNull.quantile(q=quantile, axis=0, numeric_only=True))
# smallestNbReq = max(df_req_notNull.quantile(q=quantile, axis=1, numeric_only=True))
# print "smallestNbReq: ",smallestNbReq
# sys.exit()
# for eID, e in enumerate(endpoints):
# e['vidAlreadyInOneOfMyCaches'] = []
# while isFittingCacheSize:
# isFittingCacheSize = False #check_video_subset_fit_in_caches(videoIDs_in_caches, video_sizes, nb_MBperCache)
newScore = 0
iter = -1
while iter < nb_videos_ranked_not_null-1: # Best would be not estimate when a cache is nearly full, with no chance to accept one more video
oldScore = -1
iter += 1
print "#"*100
print "iter={iter}/{tot_iter}".format(iter=iter,tot_iter=nb_videos_ranked_not_null)
top_video = videos_endpoints_ranked_not_null[iter]
top_video_ID = idx_videos_ranked[iter]
#print "Id for the current top video: ", top_video_ID
# get endpoints where the video ranked #1 is present
endpts_listeners_set = Set()
potential_caches_set = Set()
for eID,rqs in enumerate(top_video):
if rqs > mean: ## CAREFUL, this parameter can be tuned to consider more/less endpoints wrt #requests. Try 0 or mean, or mean-std
endpts_listeners_set.add(eID)
[potential_caches_set.add(c) for c in endpoints[eID]['caches']]
potential_caches = list(potential_caches_set)
## Re-order the potential caches
potential_caches_reordered = []
for pc in mostConnected_caches:
if pc in potential_caches:
potential_caches_reordered.append(pc)
potential_caches = copy.deepcopy(potential_caches_reordered)
if len(potential_caches) == 0:
#print "Nothing to cache with this video"
continue
#print "endpts_listeners_set: ", endpts_listeners_set
#print "potential_caches:", potential_caches
# compute the score for various combination of videos in caches
test = 0
scoresDelta = []
print cut_the_crap(scoresDelta, factor=0.1)
while newScore > oldScore and cut_the_crap(scoresDelta, factor=0.1):
scoresDelta.append(newScore-oldScore)
print "newScore= ",newScore
print "oldScore= ", oldScore
print "newScore-oldScore= ",newScore-oldScore
oldScore = newScore
test += 1
# keep adding the same video to more caches. Start with one cache, see if more caches improves
# Build the possible configurations to test
test_videoIDs_in_caches = [ copy.deepcopy(videoIDs_in_caches) for c in xrange(len(potential_caches))]
#print "\nConfigurations before adding the top video to the caches"
#pprint(test_videoIDs_in_caches)
for t in xrange(len(potential_caches)):
cacheID = potential_caches[t]
#print "cacheID to add the video to: ", cacheID
if top_video_ID not in test_videoIDs_in_caches[t][cacheID]:
temp_config | |
-- an end-user error
if not self.commands:
raise DistutilsArgError, "no commands supplied"
# All is well: return true
return 1
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
if sys.version < '2.4':
toplevel_options = self.global_options
else:
toplevel_options = Distribution._get_toplevel_options(self)
return toplevel_options + self.toplevel_options
def finalize_options(self):
if sys.version < '2.5':
# Run the setter functions for the metadata fields that have them.
# Only those fields that have a supplied value (not None) will
# be considered.
for name, value in vars(self.metadata).items():
if value is not None:
try:
setter = getattr(self.metadata, 'set_' + name)
except AttributeError:
pass
else:
setter(value)
requires_python = self.get_requires_python()
if requires_python:
requires_python = 'Python (%s)' % ', '.join(requires_python)
requires_python = Version.VersionPredicate(requires_python)
python_version = version.StrictVersion()
python_version.version = sys.version_info[:3]
python_version.prerelease = sys.version_info[3:]
if not requires_python.satisfied_by(python_version):
raise DistutilsSetupError(
"%s requires %s" % (self.metadata.name, requires_python))
# Initialize the containter type data variables before dealing
# with the information from the package defintions.
if self.packages is None:
self.packages = []
if self.package_dir is None:
self.package_dir = {}
if self.py_modules is None:
self.py_modules = []
if self.libraries is None:
self.libraries = []
if self.headers is None:
self.headers = []
if self.ext_modules is None:
self.ext_modules = []
if self.include_dirs is None:
self.include_dirs = []
if self.scripts is None:
self.scripts = []
if self.data_files is None:
self.data_files = []
if self.package_file is None:
self.package_file = self.script_name
if self.namespace_packages is None:
self.namespace_packages = []
# Per PEP 314, only use License and Platform if they can't be
# handled by an appropriate classifier. Or, in our case, aren't
# being handled by a classifier entry.
has_platform = has_license = False
for classifier in self.get_classifiers():
category = classifier.split('::', 1)[0]
category = category.strip().title()
if category == 'Operating System':
has_platform = True
elif category == 'License':
has_license = True
if self.metadata.license and has_license:
raise DistutilsSetupError("license keyword conflicts with"
" classifiers list")
if self.metadata.platforms and has_platform:
raise DistutilsSetupError("platforms keyword conflicts with"
" classifiers list")
# Finalize "private" variables; those that are not part of the
# setup arguments.
self._allfiles = None
Distribution.finalize_options(self)
def print_commands(self):
"""
Overridden to add the commands defined by 'command_mapping' to the
list of "standard commands".
"""
std_commands = []
is_std = {}
for command in self.standard_commands:
std_commands.append(command)
is_std[command] = True
klass = self.get_command_class(command)
for command, method in klass.sub_commands:
std_commands.append(command)
is_std[command] = True
extra_commands = []
for command in self.cmdclass:
if command not in is_std:
extra_commands.append(command)
max_length = max(map(len, (std_commands + extra_commands)))
self.print_command_list(std_commands, "Standard commands", max_length)
if extra_commands:
print
self.print_command_list(extra_commands, "Extra commands",
max_length)
return
def get_command_list(self):
"""
Overridden to add the commands defined by 'command_mapping' to the
list of (command, description) tuples.
"""
for command in self.command_mapping:
self.get_command_class(command)
return Distribution.get_command_list(self)
def print_option_list(self, options, header, max_length):
# Generate lines of help text.
line_width = Terminfo.GetColumns()
opt_width = max_length + 2 + 2 + 2 # room for indent + dashes + gutter
text_width = line_width - opt_width
big_indent = ' ' * opt_width
print header
for option in options:
long, short, help = option[:3]
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all
if short is None:
opt_names = long
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
text = wrap_text(help, text_width)
if text:
print " --%-*s %s" % (max_length, opt_names, text[0])
for line in text[1:]:
print big_indent + line
else:
print " --%-*s" % (max_length, opt_names)
print
return
def _show_help (self, parser, global_options=1, display_options=1,
commands=[]):
# Gather the options for the distribution
options = []
if global_options:
if display_options:
global_options = self._get_toplevel_options()
else:
global_options = self.global_options
options.extend(global_options)
if display_options:
display_options = self.display_options
options.extend(display_options)
# Gather the options for the requested commands
commands = []
for command in self.commands:
klass = self.get_command_class(command)
command_name = getattr(klass, 'command_name', klass.__name__)
command_options = klass.user_options
if hasattr(klass, 'help_options'):
command_options = command_options + klass.help_options
commands.append((command_name, command_options))
options.extend(command_options)
# Determine maximum length of option names
max_length = 0
for option in options:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_length:
max_length = l
# Now print the option tables
if global_options:
self.print_option_list(global_options, "Global options:",
max_length)
if display_options:
self.print_option_list(display_options,
"Information display options (just display"
" information, ignore any commands):",
max_length)
for name, options in commands:
self.print_option_list(options,
"Options for '%s' command:" % name,
max_length)
print gen_usage(self.script_name)
return
# -- Command class/object methods ----------------------------------
def get_command_class(self, command):
"""
Extends Distribution.get_command_class() to search 'command_mapping'
for modules that implement that requested command.
"""
# Try user defined classes first (and already loaded classes)
klass = self.cmdclass.get(command)
if klass:
return klass
if command in self.command_aliases:
command = self.command_aliases[command]
base_name = self.command_mapping.get(command)
if base_name is None:
return Distribution.get_command_class(self, command)
command_package = 'Ft.Lib.DistExt'
module_name = command_package + '.' + base_name
klass_name = base_name
try:
module = __import__(module_name, {}, {}, [klass_name])
except ImportError:
# If the module exists but is just broken, re-raise the existing
# exception as this is (most likely) a developer error.
if sys.exc_info()[-1].tb_next is not None:
raise
raise DistutilsModuleError(
"invalid command '%s' (no module named '%s')" %
(command, module_name))
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')" %
(command, klass_name, module_name))
# Make sure that the command provides the proper command name
try:
if command != klass.command_name:
raise AttributeError('command_name')
except AttributeError:
raise DistutilsClassError(
"command class %s must define 'command_name' as %r" %
(klass, command))
self.cmdclass[command] = klass
return klass
# -- Methods that operate on the Distribution ----------------------
def announce (self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
# -- Distribution query methods ------------------------------------
def has_l10n(self):
# Used for both build and generate
return len(self.l10n) > 0
def has_sysconf(self):
# Used for install
return len(self.sysconf_files) > 0
def has_localstate(self):
# Used for install
return len(self.localstate_files) > 0
def has_docs(self):
# Used for both build and install
# Both scripts and modules have generated documentation
return (len(self.doc_files) > 0 or
self.has_modules() or self.has_scripts())
def has_text(self):
return self.license_file is not None or len(self.doc_files) > 0
def has_devel(self):
# Used for install
return len(self.devel_files) > 0
def has_bgen(self):
# Used for both sdist and generate
return self.bgen_files and len(self.bgen_files) > 0
# ----------------------------------------------------------------------
# Upgade distutils core support to 2.5+ features
import re, operator
from distutils import dist
from distutils.util import rfc822_escape
class DistributionMetadata(dist.DistributionMetadata):
_METHOD_BASENAMES = dist.DistributionMetadata._METHOD_BASENAMES + (
'requires_python', 'requires_external')
requires_python = None
requires_external = None
copyright = None
def get_requires_python(self):
return self.requires_python or []
def set_requires_python(self, value):
if not isinstance(value, list):
value = [ v.strip() for v in value.split(',') ]
for v in value:
Version.SplitComparison(v)
self.requires_python = value
def get_requires_external(self):
return self.requires_external or []
def set_requires_external(self, value):
for v in value:
Version.SplitComparison(v)
self.requires_external = value
if sys.version < '2.5':
requires = None
provides = None
obsoletes = None
_METHOD_BASENAMES += ('requires', 'provides', 'obsoletes')
def get_requires(self):
return self.requires or []
def set_requires(self, value):
for v in value:
Version.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
for v in value:
Version.SplitProvision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
for v in value:
Version.VersionPredicate(v)
self.obsoletes = value
def write_pkg_info(self, base_dir):
"""
Write the PKG-INFO file into the release tree.
"""
pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w')
self.write_pkg_file(pkg_info)
pkg_info.close()
if sys.version < '2.3':
classifiers = None
download_url = None
_METHOD_BASENAMES += ('classifiers', 'download_url')
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# -- PKG-INFO and .egg-info utility methods --------------------
def from_stream(cls, stream):
headers = email.message_from_file(stream)
| |
"""
Module implements classes and functions to specify data for use in pointlike analysis.
author(s): <NAME>, <NAME>
"""
__version__ = '$Revision: 1.29 $'
#$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/data/dataman.py,v 1.29 2016/06/22 17:02:49 wallacee Exp $
import os, sys
import collections
import glob
import warnings
from cPickle import dump,load
import numpy as np
from astropy.io import fits as pyfits
import pointlike
import skymaps
from uw.utilities import keyword_options,fitstools
from uw.data import dssman
class DataManException(Exception):pass
dataman_version=__version__.split()[1]
# TODO energy sanity check on DSS keywords
# -- scheme would be to cut all data that isn't commensurate
# -- or flag bad bins
# TODO event class check against Pass6 or Pass7
# TODO check to make sure FT2 and FT1 agree (compare Gti to FT2 times?)
# -- think this is too onerous if we allow FT1/FT2 lists
# -- eew: might also cause problems for some cases where the FT1 Gti is
# -- subset of the FT2 Gti, which I think can be valid.
# TODO idea for binning -- just have it save both 4 and 8?
# TODO -- check exposure radius against ROI kwargs...
# -- eew: perhaps better done in the ROI constructor?
def get_pass(ft1):
""" Try to determine if the provided FT1 file is Pass6 or Pass7.
If the algo. can't figure it out, Pass7 is assumed."""
f = pyfits.open(ft1)
h = f[1]._header
v = h.get('PASS_VER')
if (v is not None):
if str(v).strip()[:2]=='P7': return 7
if str(v).strip()[:2]=='P8': return 8
if 'CTBCLASSLEVEL' in h.keys(): return 6
return 7
def SimpleCut(vmin,vmax,vuni,colname):
""" Specify a simple (inclusive or exclusive) cut on a single
FT1 column, e.g. ZENITH_ANGLE < 100 or 100 < ENERGY < 100000.
To specify an open-ended cut, use None for the upper or lower
bound.
This is a wrapper/factory for DSSSimpleRange."""
return dssman.make_simple_dss(colname,vuni,vmin,vmax)
def get_default(colname, kw):
"""return DSS object for colname
defaults wired in, except for event class, get info from kw args
"""
if colname == 'ZENITH_ANGLE':
return SimpleCut(None,100,'deg','ZENITH_ANGLE')
if colname == 'THETA':
#print 'applying thetacut, kw=', kw
return SimpleCut(None,kw.get('thetacut',66.4),'deg','THETA')
if colname == 'EVENT_CLASS':
if kw.get('data_pass')>6:
d = dict(TYP='BIT_MASK(EVENT_CLASS,%d)'%kw.get('event_class_bit',2),UNI='DIMENSIONLESS',
VAL='1:1', REF=None)
return dssman.DSSBitMask(d)
else:
return SimpleCut(3,None,'dimensionless','EVENT_CLASS')
class NsideMapper(object):
"""
Manage the mapping between energy bands and pixel size, as parameterized
by nside, the number of subdivisions of the base pixels in HEALPix scheme.
Roughly, the side of a (quadrilateral) pixel is 1/Nside radians, or
60/Nside degrees.
The default scheme is hardwired based on the pre-scaling of the PSF
for Pass 6. This is a power law with slope -0.8. This prescaling
gives, approximately, r68. The default pixelization is so that, at
100 MeV, 5 pixels fit within the sigma pre-scale.
This pixelization continues until about 1 GeV, when nside goes rapidly
to 8192, the maximum value for 32-bit architecture, with a pixel size
of 0.007 deg. We do this because with pixels appropriate to the PSF
the mean pixel occupation becomes 1 at about 1 GeV, so there is nothing
to be gained by binning. Using such small pixels means the data are
essentially unbinned in position for E > a few GeV.
"""
norms = [0.0116, 0.0192, 0.0283, 0.0202, 0.0161, 0.007] # pix size at 100 MeV in radians
# front back psf0 psf1 psf2 psf3
# derived from https://confluence.slac.stanford.edu/display/SCIGRPS/2015/02/22/P8V6+irfs
slopes = [-0.8]*6 # slope for pix size with energy
cuts = [20.]*6 # "cutoff" energy, 2 GeV = 20 in E_100 units
maxnside = [8192]*6
minnside = [0]*6
@staticmethod
def nside(en,ct=0):
"""Return nside for provide energy and conversion type.
en -- energy in MeV
ct -- conversion type (0/1)
"""
en = np.asarray(en)/100.
nsm = NsideMapper
mns = nsm.maxnside[ct]
t = nsm.norms[ct]*(en)**nsm.slopes[ct]*np.exp(-(en/nsm.cuts[ct])**2)
nside = np.round(float(mns)/(1+mns*t)).astype(int)
return np.maximum(nside,nsm.minnside[ct]).tolist()
class DataSpec(object):
""" Class to specify the data for use in a spectral analysis.
This includes both RAW data (e.g. FT1) and CUTS.
Required cuts are on ZENITH, THETA, and EVENT_CLASS
In order of precedence, these are taken from
(1) the FT1 DSS keywords
(2) user specification
(3) defaults
An exception is raised if a set of FT1 files have differing
DSS keywords. This is done to ensure consistency with gtmktime.
Some other cuts are recognized by the pointlike machinery.
(1) PULSE_PHASE (treated as a table, a la GTI) / TODO
If a binned photon file (binfile) and livetime cube (ltcube)
do not yet exist (see keywords), they will be created. If the
destination for these files isn't specified, files with sensible
names will be created in the same directory as the base FT1 file.
DSS keywords are saved both to the binfile and the ltcube,
conventionally in the primary header (0). On loading these
data, they keywords will be compared with those saved at the
time of their creation as a sanity check.
"""
defaults = (
('ft1',None,'a file, list of files, or wildcard expression'),
('ft2','$FERMI/ft2.fits','a file, list of files, or wildcard expression'),
('binfile',None,'(a) destination for new binfile or (b) location of existing one'),
('ltcube',None,'(a) destination for new ltcube or (b) location of existing one'),
('binsperdec',4,'energy bins per decade; must be 8 or 4'),
('psf_event_types', False,'if set, use the PSFn event types instead of front/back'),
('zenith_cut',None,'a SimpleCut wrapper giving zenith cuts'),
('theta_cut',None,'a SimpleCut wrapper giving theta cuts'),
('event_class_cut',None,'a SimpleCut wrapper giving event cuts'),
('event_class_bit',2, 'an integer specifying the event class, post pass 6'),
('gti_mask',None,'a GTI mask to apply to the data (intersection); note this can be used to set tstart/tstop for the data'),
('mc_src_id',-1,'select only photons from MC source ID; default is no selection'),
('mc_energy',False,'bin on MC_ENERGY instead of ENERGY'),
('clobber',False,'if True, will attempt to produce new binfile and ltcube and replace any existing ones'),
('quiet',True,'control verbosity, ever so coarsely'),
('use_weighted_livetime',True,'if True, calculate the weighted livetime for use in livetime-dependent corrections to the effective area'),
('livetime_buffer',10,'radius in degrees by which livetime cube cone is larger than ROI cone'),
('livetime_pixelsize',1,'pixel size to use for livetime calculation'),
('exposure_cube', None, 'if set, file names of a pair of exposure cubes genertated by gtexpcube2'\
'override use of ltcube'),
('data_name', '', 'descriptive name for the data set'),
('legacy', False, 'relax DSS requirements for legacy files'),
('data_pass',7,'the generation (Pass6, Pass7,...) of the data'),
('nocreate', False, 'Set True to supress creation of files, raise exception instead'),
# keyword controlling livetimecube pixel size? and buffer cone?
)
binner = None # static variable for PhotonBinner
@keyword_options.decorate(defaults)
def __init__(self,output=None,**kwargs):
""" **NB -- if ft1 is None, binfile MUST be set to a real file
**NB -- if ft2 is None, either ltcube must be set to a real file or $FERMI/ft2.fits must exist
"""
keyword_options.process(self,kwargs)
self._set_bins()
self.dss = None # initialize
self.gti = None
def init_data():
self.ft1files = self._parse_filename(self.ft1)
if self.ft1files is not None:
# Register FT1 DSS keywords
self._get_ft1_dss()
self._make_cuts()
# Get GTI from FT1 if not already set
if self.gti is None:
self.gti = self._get_GTI()
if not self._check_binfile():
if self.nocreate: raise DataManException('need to create %s' %self.binfile)
self._make_binfile()
elif not self._check_binfile():
raise ValueError('No FT1 files or valid binned data found. (Looking for %s)' % self.binfile)
init_data()
def init_exposure():
self.ft2files = self._parse_filename(self.ft2)
if self.exposure_cube is not None:
print 'using exposure cube files: ignore FT2'
full = [os.path.join(os.path.expandvars('$FERMI/data'),f) for f in self.exposure_cube]
assert np.all(map(os.path.exists, full)), 'Exposure cube file(s) #s not found' %full
self.exposure_cube = full #replace with full path
return
if self.ft2files is not None:
if not self._check_ltcube():
if self.nocreate: raise DataManException('need to create %s' %self.ltcube)
self._make_ltcube()
elif not self._check_ltcube():
raise ValueError('No FT2 files or valid livetime found.')
init_exposure()
# save version to allow custom processing for backwards compat.
self.version = dataman_version
if output is not None: self.dump(output)
def __str__(self):
""" Pretty print of cuts/data."""
s = collections.deque()
s.append('Event types:' + 'PSF' if self.psf_event_types else 'Front/back')
s.append('Bins per decade: {0}'.format(self.binsperdec))
s.append('DSS keywords:\n{0}'.format(self.dss))
def process_ft(files):
if files is None:
s.append('\t\tNone')
return
if len(files) < 10:
s.append('\n\t'.join(files))
else:
s.append('\n\t'.join(files[:5]))
s.append('...')
s.append('\n\t'.join(files[-5:]))
s.append('FT1 files: ')
process_ft(self.ft1files)
s.append('FT2 files: ')
process_ft(self.ft2files)
s.append('Binned data: {0}'.format(self.binfile))
s.append('Livetime cube: {0}'.format(self.ltcube))
return '\n'.join(s)
def __getstate__(self):
#If you're pickling, you shouldn't be clobbering
self.clobber = False
self.binner = None
return self.__dict__
def __setstate__(self,dict):
""" Override default unpickle to perform a few sanity checks."""
for t | |
# -*- coding: utf-8 -*-
# Copyright © 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import os
import gc
import numpy as np
from warnings import warn
try:
from sys import maxint
except ImportError:
from sys import maxsize as maxint
import h5py
from .hdf5.h5group import H5Group
from .block import Block
from .section import Section
from .container import Container, SectionContainer
from . import util
from .exceptions import InvalidFile, DuplicateName
from .util import find as finders
from . import validator
from .compression import Compression
FILE_FORMAT = "nix"
HDF_FF_VERSION = (1, 2, 1)
def can_write(nixfile):
filever = nixfile.version
if len(filever) != 3:
raise RuntimeError("Invalid version specified in file.")
if HDF_FF_VERSION == filever:
return True
else:
return False
def can_read(nixfile):
filever = nixfile.version
if len(filever) != 3:
raise RuntimeError("Invalid version specified in file.")
ver_x, ver_y, _ = HDF_FF_VERSION
file_x, file_y, _ = filever
if ver_x == file_x and ver_y >= file_y:
return True
else:
return False
class FileMode(object):
ReadOnly = 'r'
ReadWrite = 'a'
Overwrite = 'w'
def map_file_mode(mode):
if mode == FileMode.ReadOnly:
return h5py.h5f.ACC_RDONLY
elif mode == FileMode.ReadWrite:
return h5py.h5f.ACC_RDWR
elif mode == FileMode.Overwrite:
return h5py.h5f.ACC_TRUNC
else:
raise ValueError("Invalid file mode specified.")
def make_fapl():
return h5py.h5p.create(h5py.h5p.FILE_ACCESS)
def make_fcpl():
fcpl = h5py.h5p.create(h5py.h5p.FILE_CREATE)
flags = h5py.h5p.CRT_ORDER_TRACKED | h5py.h5p.CRT_ORDER_INDEXED
fcpl.set_link_creation_order(flags)
return fcpl
class File(object):
def __init__(self, path, mode=FileMode.ReadWrite,
compression=Compression.Auto,
auto_update_timestamps=True):
"""
Open a NIX file, or create it if it does not exist.
:param path: Path to file
:param mode: FileMode ReadOnly, ReadWrite, or Overwrite.
(default: ReadWrite)
:param compression: No, DeflateNormal, Auto (default: Auto)
:param auto_update_timestamps: Enable/disable automatic updating of
'updated_at' timestamp. (default: True)
:return: nixio.File object
"""
try:
path = path.encode("utf-8")
except (UnicodeError, LookupError):
pass
if not os.path.exists(path) and mode == FileMode.ReadOnly:
raise RuntimeError(
"Cannot open non-existent file in ReadOnly mode!"
)
if not os.path.exists(path) or mode == FileMode.Overwrite:
mode = FileMode.Overwrite
h5mode = map_file_mode(mode)
fid = h5py.h5f.create(path, flags=h5mode, fapl=make_fapl(),
fcpl=make_fcpl())
self._h5file = h5py.File(fid)
self._root = H5Group(self._h5file, "/", create=True)
self._create_header()
else:
h5mode = map_file_mode(mode)
fid = h5py.h5f.open(path, flags=h5mode, fapl=make_fapl())
self._h5file = h5py.File(fid)
self._root = H5Group(self._h5file, "/")
self._h5group = self._root # to match behaviour of other objects
self._auto_update_timestamps = auto_update_timestamps
self._check_header(mode)
self.mode = mode
self._data = self._root.open_group("data", create=True)
self._metadata = self._root.open_group("metadata", create=True)
if "created_at" not in self._h5file.attrs:
self.force_created_at()
if "updated_at" not in self._h5file.attrs:
self.force_updated_at()
if compression == Compression.Auto:
compression = Compression.No
self._compr = compression
# make container props but don't initialise
self._blocks = None
self._sections = None
@classmethod
def open(cls, path, mode=FileMode.ReadWrite, compression=Compression.Auto,
backend=None, auto_update_timestamps=True):
if backend is not None:
warn("Backend selection is deprecated. Ignoring value.")
return cls(path, mode, compression, auto_update_timestamps)
def _create_header(self):
self._set_format()
self._set_version()
self._set_id()
def _check_header(self, mode):
if self.format != FILE_FORMAT:
raise InvalidFile
if mode == FileMode.ReadWrite:
if not can_write(self):
raise RuntimeError("Cannot open file for writing. "
"Incompatible version.")
elif mode == FileMode.ReadOnly:
if not can_read(self):
raise RuntimeError("Cannot open file. "
"Incompatible version.")
if self.version >= (1, 2, 0):
if not util.is_uuid(self.id):
raise RuntimeError("Cannot open file. "
"The file does not have an ID.")
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@property
def id(self):
return self._root.get_attr("id")
def _set_id(self):
# file id attribute should only be set on creation (or format
# upgrade), so do nothing if it's already set
if self._root.get_attr("id"):
return
self._root.set_attr("id", util.create_id())
@property
def version(self):
"""
The file format version.
:type: tuple
"""
return tuple(self._root.get_attr("version"))
def _set_version(self):
# file format version should only be set on creation, so do nothing
# if it's already set
if self._root.get_attr("version"):
return
# convert to np.int32 since py3 defaults to 64
file_ver = np.array(HDF_FF_VERSION, dtype=np.int32)
self._root.set_attr("version", file_ver)
@property
def format(self):
"""
The format of the file. This read only property should always have the
value 'nix'.
:type: str
"""
return self._root.get_attr("format")
def _set_format(self):
self._root.set_attr("format", FILE_FORMAT.encode("ascii"))
@property
def auto_update_timestamps(self):
"""
If enabled, automatically updates the 'updated_at' attribute when an
object's data or attributes are changed.
:type: bool
"""
return self._auto_update_timestamps
@auto_update_timestamps.setter
def auto_update_timestamps(self, enable):
"""
If enabled, automatically updates the 'updated_at' attribute when an
object's data or attributes are changed.
:type: bool
"""
self._auto_update_timestamps = enable
@property
def created_at(self):
"""
The creation time of the file. This is a read-only property.
Use `force_created_at` in order to change the creation time.
:rtype: int
"""
return util.str_to_time(self._h5file.attrs["created_at"])
def force_created_at(self, time=None):
"""
Sets the creation time `created_at` to the given time
(default: current time).
:param time: The time to set
:type time: int
"""
if time is None:
time = util.now_int()
else:
util.check_attr_type(time, int)
self._h5file.attrs["created_at"] = util.time_to_str(time)
@property
def updated_at(self):
"""
The time of the last update of the file. This is a read-only
property. Use `force_updated_at` in order to change the update
time.
:rtype: int
"""
return util.str_to_time(self._h5file.attrs["updated_at"])
def force_updated_at(self, time=None):
"""
Sets the update time `updated_at` to the given time.
(default: current time)
:param time: The time to set (default: now)
:type time: int
"""
if time is None:
time = util.now_int()
else:
util.check_attr_type(time, int)
self._h5file.attrs["updated_at"] = util.time_to_str(time)
def is_open(self):
"""
Checks whether a file is open or closed.
:returns: True if the file is open, False otherwise.
:rtype: bool
"""
try:
_ = self._h5file.mode
return True
except ValueError:
return False
def validate(self):
return validator.check_file(self)
def pprint(self, indent=2, max_length=120, extra=True, max_depth=3):
"""
Pretty Printing the Data and MetaData Tree of the whole File
:param indent: The length of one indentation space
:type indent: int
:param max_length: Maximum length of each line of output
:type max_length: int
:param extra: True to print extra information of Entities
:type extra: bool
:param max_depth: Maximum recursion being printed in MetaData tree
:type max_depth: int
"""
print("File: name = {}".format(self._h5group.group.file.filename))
if self.blocks:
for blk in self.blocks:
blk.pprint(indent=indent,
max_length=max_length, extra=extra, start_depth=1)
if self.sections:
for sec in self.sections:
sec.pprint(indent=indent, max_depth=max_depth,
max_length=max_length, current_depth=1)
# TODO: if same file, set_attr("entity_id", id_)
def copy_section(self, obj, children=True, keep_id=True, name=""):
"""
Copy a section to the file.
:param obj: The Section to be copied
:type obj: nixio.Section
:param children: Specify if the copy should be recursive
:type children: bool
:param keep_id: Specify if the id should be kept
:type keep_id: bool
:param name: Name of copied section, Default is name of source section
:type name: str
:returns: The copied section
:rtype: nixio.Section
"""
if not isinstance(obj, Section):
raise TypeError("Object to be copied is not a Section")
if obj._sec_parent:
src = "{}/{}".format("sections", obj.name)
else:
src = "{}/{}".format("metadata", obj.name)
clsname = "metadata"
if not name:
name = str(obj.name)
sec = self._h5group.open_group("sections", True)
if name in sec:
raise NameError("Name already exist. Possible solution is to "
"provide a new name when copying destination "
"is the same as the source parent")
obj._parent._h5group.copy(source=src, dest=self._h5group,
name=name, cls=clsname,
shallow=not children, keep_id=keep_id)
if not children:
for prop in obj.props:
self.sections[obj.name].create_property(copy_from=prop, keep_copy_id=keep_id)
return self.sections[obj.name]
def flush(self):
self._h5file.flush()
def close(self):
"""
Closes an open file.
"""
gc.collect() # should handle refs better instead of calling collect()
# Flush is probably unnecessary
self._h5file.flush()
self._h5file.close()
# Block
def create_block(self, name="", type_="", compression=Compression.Auto,
copy_from=None, keep_copy_id=True):
"""
Create a new block inside the file.
:param name: The name of the block to create.
:type name: str
:param type_: The type of the block.
:type type_: str
:param compression: No, DeflateNormal, Auto (default: Auto)
:param copy_from: The Block to be copied, None in normal mode
:type copy_from: nixio.Block
:param keep_copy_id: Specify if the id should be copied in copy mode
:type keep_copy_id: bool
:returns: The newly created block.
:rtype: nixio.Block
"""
if copy_from:
if not isinstance(copy_from, Block):
raise TypeError("Object to be copied is not a Block")
clsname = "data"
src = "{}/{}".format(clsname, copy_from.name)
if not name:
name = str(copy_from.name)
if name in self._data:
raise NameError("Name already exist. Possible solution is to "
"provide a new name when copying destination "
"is the same as the source parent")
blk = copy_from._parent._h5group.copy(source=src, dest=self._h5group, name=name, cls=clsname,
keep_id=keep_copy_id)
entity_id = blk.attrs["entity_id"]
return self.blocks[entity_id]
if name in self._data:
raise DuplicateName("Block with the given name already exists!")
if compression == Compression.Auto:
compression = self._compr
| |
maint_date
maintenance.maint_type = row[8]
maintenance.current_mileage = row[7]
maintenance.description = row[2]
maintenance.projected_cost = row[11]
maintenance.actual_cost = row[12]
maintenance.difference = row[13]
maintenance.status = "Good Condition"
maintenance.invoice_number = row[3]
maintenance.service_provider = row[4]
maintenance.created_by=request.user
maintenance.modified_by=request.user
maintenance.accept=True
maintenance.authorize="Aproved"
if "Service" in str(row[8]):
service = ServiceBooking.objects.filter(vehicle=maintenance.vehicle, booking_date=maintenance.maint_date, mileage=maintenance.current_mileage ).order_by('-id')[0]
maintenance.service_booking_number = service
maintenance.save()
upload_list.append(maintenance)
return upload_list
def import_incidences(request, import_file):
upload_list = []
dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
capture_date = date.today()
for row in dataReader:
if not row[0] == "IncidentDate":
incident = Incident()
employee = row[59]
drivers = Employee.objects.filter(employee_old_id = employee)
if len(drivers) > 0:
driver = drivers.first()
incident.driver = driver
licence_plate = str(row[60])
vehicles = Vehicle.objects.filter(licence_plate = licence_plate)
print(licence_plate, ": Data Imported")
if len(vehicles) > 0:
vehicle = vehicles.first()
incident.vehicle = vehicle
inc_date = datetime.strptime(row[0], "%Y-%m-%d").date()
incident.incident_date = inc_date
inc_time = datetime.strptime(row[1], "%H:%M").time()
incident.time_of_incident = inc_time
incident.incident_type = row[2]
case_number = row[3]
if case_number == "":
case_number = "%s-%s-%s"%(driver.id,vehicle.id,row[0])
incident.case_number = case_number
incident.location = row[4]
incident.Description = row[5]
incident.recomendations = row[6]
incident.date_reported = row[7]
incident.police_station = row[8]
incident.damage_extent = row[9]
incident.right_rear_fender =row[10]
incident.right_rear_wheel = row[11]
incident.right_rear_door = row[12]
incident.right_rear_lamp = row[13]
incident.right_rear_window = row[14]
incident.right_rear_door_window = row[15]
incident.right_rear_viewmirror = row[16]
incident.right_front_door_window = row[17]
incident.right_front_door = row[18]
incident.right_front_wheel = row[19]
incident.right_front_fender = row[20]
incident.right_head_lamp = row[21]
incident.left_rear_fender =row[22]
incident.left_rear_wheel = row[23]
incident.left_rear_door = row[24]
incident.left_rear_lamp = row[25]
incident.left_rear_window = row[26]
incident.left_rear_door_window = row[27]
incident.left_rear_viewmirror = row[28]
incident.left_front_door_window = row[29]
incident.left_front_door = row[30]
incident.left_front_wheel = row[31]
incident.left_front_fender = row[32]
incident.left_head_lamp = row[33]
incident.rear_bumper = row[34]
incident.boot_door = row[35]
incident.rear_wind_screen = row[36]
incident.car_top = row[37]
incident.wind_screen = row[38]
incident.hood = row[39]
incident.grill = row[40]
incident.front_bumper = row[41]
incident.chasis = row[42]
incident.suspension = row[43]
incident.engine = row[44]
incident.gear_box = row[45]
incident.dashboard = row[46]
incident.dashboard_controls = row[47]
incident.sound_system = row[48]
incident.steering = row[49]
incident.left_front_seat = row[50]
incident.rear_seat = row[51]
incident.right_front_seat = row[52]
incident.door_panels = row[53]
incident.foot_pedals = row[54]
incident.hand_brake = row[55]
incident.capets = row[56]
incident.ceiling = row[57]
incident.current_mileage = row[58]
incident.save()
upload_list.append(incident)
return upload_list
def import_inspections(request, import_file):
upload_list = []
dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
capture_date = date.today()
for row in dataReader:
# if not row[0] == "LogDate":
insepction = MileageLog()
log_date = datetime.strptime(row[0], "%Y-%m-%d").date()
insepction.log_date = log_date
insepction.starting_mileage = float(row[4])
insepction.current_mileage = float(row[5])
insepction.mileage = float(row[6])
insepction.fuel_balance_bf = 0
insepction.fuel_used = 0
insepction.fuel_balance = 0
insepction.doors = 1
insepction.seats = 1
insepction.body = 1
insepction.tires = 1
insepction.interior = 1
insepction.boot = 1
insepction.under_hood = 1
insepction.engine_check = 1
insepction.exhaust_check = 1
insepction.features_check = 1
insepction.sound_system = 1
insepction.steering = 1
insepction.brakes = 1
insepction.transmission = 1
insepction.overall_feel = 1
start_date = datetime.strptime(row[7], "%Y-%m-%d").date()
insepction.start_date = start_date
end_date = datetime.strptime(row[8], "%Y-%m-%d").date()
insepction.end_date = end_date
employee = row[10]
insepction.created_by=request.user
insepction.modified_by=request.user
drivers = Employee.objects.filter(employee_old_id = employee)
if len(drivers) > 0:
driver = drivers.first()
insepction.driver = driver
licence_plate = str(row[1])
vehicles = Vehicle.objects.filter(licence_plate = licence_plate)
print(licence_plate, ": Data Imported")
if len(vehicles) > 0:
vehicle = vehicles.first()
insepction.vehicle = vehicle
insepction.save()
upload_list.append(insepction)
return upload_list
def import_vehicle_allocations(request, import_file):
upload_list = []
dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
capture_date = date.today()
for row in dataReader:
# if not row[0] == "TransactionType":
allocation = VehicleAllocation()
allocation.transaction_type = row[1]
allo_date = datetime.strptime(row[0], "%Y-%m-%d").date()
allocation.allocation_date = allo_date
allocation.cycle_limit = 5000
allocation.mileage = float(row[8])
allocation.status = row[5]
allocation.accept=True
allocation.authorize=True
allocation.created_by=request.user
allocation.modified_by=request.user
employee = row[3]
drivers = Employee.objects.filter(employee_old_id = employee)
if len(drivers) > 0:
driver = drivers.first()
allocation.driver = driver
card = row[4]
petrol_cards = FuelCard.objects.filter(card_number =card)
if len(petrol_cards) > 0:
petrol_card = petrol_cards.first()
allocation.fuel_card = petrol_card
licence_plate = row[2]
vehicles = Vehicle.objects.filter(licence_plate = licence_plate)
if len(vehicles) > 0:
vehicle = vehicles.first()
allocation.vehicle = vehicle
allocation.save()
upload_list.append(vehicle)
return upload_list
def import_vehicle(request, import_file):
upload_list = []
dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
capture_date = date.today()
for row in dataReader:
if not row[0] == "LicencePlate":
vehicle= Vehicle()
vehicle.vehicle = row[0]
vehicle.ownership_type = row[1]
vehicle.vin_number = row[2]
vehicle.model_year = row[3]
vehicle.signing_mileage = float(row[4])
licence_date = datetime.strptime(row[5], "%Y-%m-%d").date()
vehicle.licence_disk_expiry = licence_date
vehicle.color = row[6]
vehicle.status = row[7]
vehicle.available = int(row[8])
vehicle.active = int(row[9])
re_start_date = datetime.strptime(row[10], "%Y-%m-%d").date()
vehicle.rental_start_date = re_start_date
re_end_date = datetime.strptime(row[11], "%Y-%m-%d").date()
vehicle.rental_end_date = re_end_date
purchase_date = datetime.strptime(row[12], "%Y-%m-%d").date()
vehicle.purchase_date = purchase_date
vehicle.purchase_amount = float(row[13])
vehicle.supplier = row[14]
vehicle.condition = row[15]
vehicle.invoice_number = row[16]
vehicle.warranty_expiry = row[17]
vehicle.financier = row[18]
vehicle.on_sp = int(row[19])
vehicle.on_mo = int(row[20])
vehicle.plan_provider = row[21]
vehicle.period = row[22]
start_date = datetime.strptime(row[23], "%Y-%m-%d").date()
vehicle.start_date = start_date
end_date = datetime.strptime(row[24], "%Y-%m-%d").date()
vehicle.end_date = end_date
vehicle.mileage_covered = float(row[25])
vehicle.fuel_balance = float(row[26])
vehicle.current_driver_id = row[28]
vehicle.make_n_model_id = row[29]
vehicle.save()
upload_list.append(vehicle)
return upload_list
# def imports(request):
# upload_file_form = fileUploadForm(request.POST or None, request.FILES or None, prefix='doc')
# context = {
# "upload_file_form": upload_file_form,
# }
# post = request.POST
# if request.POST:
# if u'upload' in post:
# # services = ServiceBooking.objects.all()
# # for service in services:
# # service.next_service_mileage = service.mileage + 10000
# # service.save()
# # print("Saved")
# validated = upload_file_form.is_valid()
# if validated:
# uploaded = upload_file_form.save(commit=False)
# uploaded.file_name = uploaded.file.name
# uploaded.transaction = "Data Imports"
# uploaded.save()
# file = '%s/%s'% (settings.MEDIA_ROOT, uploaded.file)
# # import_vehicle(request, file)
# # import_vehicle_allocations(request, file)
# # import_fuel_cards(request, file)
# # import_inspections(request, file)
# # import_incidences(request, file)
# # import_maintenance(request, file)
# # import_service_bookings(request, file)
# # import_traffic_fines(request, file)
# # import_claims(request, file)
# employee_contacts(request, file)
# # transactions = VehicleMaintenance.objects.all()
# # for transaction in transactions:
# # if not transaction.comments == "":
# # comment = Comment()
# # comment.comments = transaction.comments
# # comment.vehicle = transaction.vehicle
# # comment.comment_type = "VehicleMaintenance"
# # comment.obj_id = transaction.id
# # comment.created_by = request.user
# # print(comment.vehicle, comment.comment_type)
# # comment.save()
# return HttpResponseRedirect(reverse('fleet:vehiclesList'))
# return render(request, "imports.html", context)
# def employee_contacts(request, import_file):
# upload_list = []
# dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
# capture_date = date.today()
# for row in dataReader:
# if not row[0] == "FullName":
# contact = Contact()
# employee = row[11]
# drivers = Employee.objects.filter(employee_old_id = employee)
# if len(drivers) > 0:
# driver = drivers.first()
# contact.employee = driver
# contact.email = row[6]
# contact.celphone ='0%s'%(row[7])
# contact.res_address1 = row[5]
# contact.save()
# upload_list.append(contact)
# return upload_list
# def import_comments(request):
# import_list = []
# transactions = Incident.objects.all()
# for transaction in transactions:
# if not transaction.recomendations == "":
# comment = Comment()
# comment.comments = transaction.recomendations
# comment.vehicle = transaction.vehicle
# comment.comment_type = "Incidences"
# comment.obj_id = transaction.id
# comment.created_by = request.user
# print(comment.vehicle, comment.comment_type)
# comment.save()
# import_list.append(comment)
# return import_list
# def import_claims(request, import_file):
# upload_list = []
# dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
# capture_date = date.today()
# for row in dataReader:
# if not row[0] == "SubmissionDate":
# claim = InsuranceClaim()
# employee = row[9]
# drivers = Employee.objects.filter(employee_old_id = employee)
# if len(drivers) > 0:
# driver = drivers.first()
# claim.driver = driver
# licence_plate = str(row[10])
# vehicles = Vehicle.objects.filter(vehicle = licence_plate)
# print(licence_plate, ">>>>>>>Data Imported")
# if len(vehicles) > 0:
# vehicle = vehicles.first()
# claim.vehicle = vehicle
# submission_date = datetime.strptime(row[0], "%Y-%m-%d").date()
# claim.submission_date = submission_date
# claim_number = row[1]
# if claim_number == "":
# claim_number = "%s-%s-%s"%(driver.id,vehicle.id,row[0])
# claim.claim_number = claim_number
# payout_date = datetime.strptime(row[2], "%Y-%m-%d").date()
# claim.payout_date = payout_date
# claim.payout_amount = row[3]
# sp_payout_date = datetime.strptime(row[4], "%Y-%m-%d").date()
# claim.sp_payout_date = sp_payout_date
# claim.sp_payout_amount = row[5]
# claim.excess = row[6]
# claim.comments = row[7]
# claim.claim_status = row[8]
# claim.save()
# upload_list.append(claim)
# return upload_list
# def import_traffic_fines(request, import_file):
# upload_list = []
# dataReader = csv.reader(open(import_file), delimiter=',', quotechar='"')
# capture_date = date.today()
# for row in dataReader:
# if not row[0] == "NoticeNumber":
# notice_number = row[0]
# existing = Trafficfine.objects.filter(notice_number=notice_number).count()
# if existing == 0:
# fine = Trafficfine()
# fine.notice_number = row[0]
# offence_date = datetime.strptime(row[1], "%Y-%m-%d").date()
# fine.offence_date = offence_date
# due_date = datetime.strptime(row[2], "%Y-%m-%d").date()
# fine.due_date = due_date
# fine.description = str(row[3])
# fine.location = row[4]
# fine.amount = row[5]
# court_date = datetime.strptime(row[6], "%Y-%m-%d").date()
# fine.court_date = court_date
# fine.serious_offence = int(row[7])
# fine.awaiting_summons = int(row[8])
# fine.court_appearance = int(row[9])
# fine.court_attended = int(row[10])
# fine.paid = int(row[11])
# date_paid = datetime.strptime(row[12], "%Y-%m-%d").date()
# if row[12] == "1900-01-01":
# date_paid = due_date
# fine.payment_date = date_paid
# employee = row[13]
# drivers = Employee.objects.filter(employee_old_id = employee)
# if len(drivers) > 0:
# driver = drivers.first()
# fine.driver = driver
# licence_plate = str(row[14])
# vehicles = Vehicle.objects.filter(vehicle = licence_plate)
# print("Vehicle:>>",licence_plate, notice_number, ":>>Data Imported")
# if len(vehicles) > | |
"""UmiSchedules module."""
import calendar
import collections
import hashlib
from datetime import datetime
import numpy as np
import pandas as pd
from validator_collection import validators
from archetypal.schedule import Schedule, _ScheduleParser, get_year_for_first_weekday
from archetypal.template.umi_base import UmiBase
from archetypal.utils import log
class UmiSchedule(Schedule, UmiBase):
"""Class that handles Schedules."""
__slots__ = ("_quantity",)
def __init__(self, Name, quantity=None, **kwargs):
"""Initialize object with parameters.
Args:
Name:
quantity:
**kwargs:
"""
super(UmiSchedule, self).__init__(Name, **kwargs)
self.quantity = quantity
@property
def quantity(self):
"""Get or set the schedule quantity."""
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@classmethod
def constant_schedule(cls, value=1, Name="AlwaysOn", Type="Fraction", **kwargs):
"""Create an UmiSchedule with a constant value at each timestep.
Args:
Type:
value (float):
Name:
idf:
**kwargs:
"""
value = validators.float(value)
return super(UmiSchedule, cls).constant_schedule(
value=value, Name=Name, Type=Type, **kwargs
)
@classmethod
def random(cls, Name="AlwaysOn", Type="Fraction", **kwargs):
"""Create an UmiSchedule with a randomized value (0-1) at each timestep.
Args:
Name (str): The name of the Schedule.
Type (str or ScheduleTypeLimits):
**kwargs: keywords passed to the constructor.
"""
values = np.random.rand(
8760,
)
return cls(Values=values.tolist(), Name=Name, Type=Type, **kwargs)
@classmethod
def from_values(cls, Name, Values, Type="Fraction", **kwargs):
"""Create an UmiSchedule from a list of values.
Args:
Name (str): The name of the Schedule.
Values (list):
Type:
**kwargs:
"""
return super(UmiSchedule, cls).from_values(
Name=Name, Values=Values, Type=Type, **kwargs
)
def combine(self, other, weights=None, quantity=None):
"""Combine two UmiSchedule objects together.
Args:
other (UmiSchedule): The other Schedule object to combine with.
weights (list, dict or string): Attribute of self and other containing the
weight factor. If a list is passed, it must have len = 2; the first
element is applied to self and the second element is applied to other.
If a dict is passed, the self.Name and other.Name are the keys. If a
str is passed, the
quantity (list or dict or bool): Scalar value that will be multiplied by
self before the averaging occurs. This ensures that the resulting
schedule returns the correct integrated value. If a dict is passed,
keys are schedules Names and values are quantities.
Returns:
(UmiSchedule): the combined UmiSchedule object.
Raises:
TypeError: if Quantity is not of type list, tuple, dict or a callable.
"""
# Check if other is None. Simply return self
if not other:
return self
if not self:
return other
if not isinstance(other, UmiSchedule):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
# check if the schedule is the same
if self == other:
if self.quantity and other.quantity:
self.quantity += other.quantity
return self
# check if self is only zeros. Should not affect other.
if not np.any(self.all_values):
return other
# check if other is only zeros. Should not affect self.
if not np.any(other.all_values):
return self
if not weights:
log(
'using 1 as weighting factor in "{}" '
"combine.".format(self.__class__.__name__)
)
weights = [1, 1]
elif isinstance(weights, str):
# get the attribute from self and other
weights = [getattr(self, weights), getattr(other, weights)]
elif isinstance(weights, (list, tuple)):
# check if length is 2.
length = len(weights)
if length != 2:
raise ValueError(
"USing a list or tuple, the weights attribute must "
"have a length of 2. A length of {}".format(length)
)
elif isinstance(weights, dict):
weights = [weights[self.Name], weights[other.Name]]
if quantity is None:
new_values = np.average(
[self.all_values, other.all_values], axis=0, weights=weights
)
elif isinstance(quantity, dict):
# Multiplying the schedule values by the quantity for both self and other
# and then using a weighted average. Finally, new values are normalized.
new_values = np.average(
[
self.all_values * quantity[self.Name],
other.all_values * quantity[other.Name],
],
axis=0,
weights=weights,
)
new_values /= quantity[self.Name] + quantity[other.Name]
elif callable(quantity):
new_values = np.average(
np.stack((self.all_values, other.all_values), axis=1),
axis=1,
weights=[
quantity(self.predecessors.data),
quantity(other.predecessors.data),
],
)
elif isinstance(quantity, (list, tuple)):
# Multiplying the schedule values by the quantity for both self and other
# and then using a weighted average. Finally, new values are normalized.
self_quantity, other_quantity = quantity
new_values = (
self.all_values * self_quantity + other.all_values * other_quantity
) / sum(quantity)
elif isinstance(quantity, bool):
new_values = np.average(
[self.all_values, other.all_values],
axis=0,
weights=[self.quantity * weights[0], other.quantity * weights[1]],
)
else:
raise TypeError("Quantity is not of type list, tuple, dict or a callable")
# the new object's name
meta = self._get_predecessors_meta(other)
# Overriding meta Name
hasher = hashlib.md5()
hasher.update(new_values)
meta["Name"] = f"Combined_UmiSchedule_{hasher.hexdigest()}"
quantity = np.nansum(
[self.quantity or float("nan"), other.quantity or float("nan")]
)
new_obj = UmiSchedule.from_values(
Values=new_values, Type="Fraction", quantity=quantity, **meta
)
new_obj.predecessors.update(self.predecessors + other.predecessors)
new_obj.weights = sum(weights)
return new_obj
def develop(self):
"""Develop the UmiSchedule into a Year-Week-Day schedule structure."""
year, weeks, days = self.to_year_week_day()
lines = ["- {}".format(obj) for obj in self.predecessors]
_from = "\n".join(lines)
year.Comments = (
f"Year Week Day schedules created from: \n{_from}" + str(id(self)),
)
return year
def get_unique(self):
"""Return the first of all the created objects that is equivalent to self."""
return super(UmiSchedule, self.develop()).get_unique()
def to_dict(self):
"""Return UmiSchedule dictionary representation.
Hint:
UmiSchedule does not implement the to_dict method because it is not used
when generating the json file. Only Year-Week- and DaySchedule classes
are used.
"""
return self.to_ref()
def to_ref(self):
"""Return a ref pointer to self."""
return {"$ref": str(self.id)}
def validate(self):
"""Validate object and fill in missing values."""
return self
def mapping(self, validate=True):
"""Get a dict based on the object properties, useful for dict repr.
Args:
validate (bool): If True, try to validate object before returning the
mapping.
"""
if validate:
self.validate()
return dict(
Category=self.Category,
Type=self.Type,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def get_ref(self, ref):
"""Get item matching reference id.
Args:
ref:
"""
return next(
iter(
[
value
for value in UmiSchedule.CREATED_OBJECTS
if value.id == ref["$ref"]
]
),
None,
)
def duplicate(self):
"""Get copy of self."""
return self.__copy__()
def __add__(self, other):
"""Return new object that is the combination of self and other."""
return UmiSchedule.combine(self, other)
def __repr__(self):
"""Return a representation of self."""
name = self.Name
resample = self.series.resample("D")
min = resample.min().mean()
mean = resample.mean().mean()
max = resample.max().mean()
return (
name
+ ": "
+ "mean daily min:{:.2f} mean:{:.2f} max:{:.2f} ".format(min, mean, max)
+ (f"quantity {self.quantity}" if self.quantity is not None else "")
)
def __str__(self):
"""Return the string representation of self."""
return repr(self)
def __hash__(self):
"""Return the hash value of self."""
return hash((self.__class__.__name__, getattr(self, "Name", None)))
def __eq__(self, other):
"""Assert self is equivalent to other."""
if not isinstance(other, UmiSchedule):
return NotImplemented
if self.all_values.size != other.all_values.size:
return NotImplemented
else:
return all(
[
self.strict == other.strict,
self.Type == other.Type,
self.quantity == other.quantity,
np.allclose(self.all_values, other.all_values, rtol=1e-02),
]
)
def __copy__(self):
"""Create a copy of self."""
return self.__class__(
Name=self.Name,
quantity=self.quantity,
Values=self.all_values.tolist(),
strict=self.strict,
Type=self.Type,
)
class YearSchedulePart:
"""Helper Class for YearSchedules defined with FromDay FromMonth ToDay ToMonth."""
__slots__ = ("_from_day", "_from_month", "_to_day", "_to_month", "_schedule")
def __init__(
self,
FromDay=None,
FromMonth=None,
ToDay=None,
ToMonth=None,
Schedule=None,
**kwargs,
):
"""Initialize YearSchedulePart.
Args:
FromDay (int): This numeric field is the starting day for the
schedule time period.
FromMonth (int): This numeric field is the starting month for the
schedule time period.
ToDay (int): This numeric field is the ending day for the schedule
time period.
ToMonth (int): This numeric field is the ending month for the
schedule time period.
Schedule (UmiSchedule): The associated UmiSchedule related to this
object.
kwargs (dict): Other Keyword arguments.
"""
self.FromDay = FromDay
self.FromMonth = FromMonth
self.ToDay = ToDay
self.ToMonth = ToMonth
self.Schedule = Schedule
@property
def FromDay(self):
"""Get or set the start day-of-month number [int]."""
return self._from_day
@FromDay.setter
def FromDay(self, value):
self._from_day = validators.integer(value, minimum=1, maximum=31)
@property
def FromMonth(self):
"""Get or set the start month-number [int]."""
return self._from_month
@FromMonth.setter
def FromMonth(self, value):
self._from_month = validators.integer(value, minimum=1, maximum=12)
@property
def ToDay(self):
"""Get or set the end day-of-month number [int]."""
return self._to_day
@ToDay.setter
def ToDay(self, value):
self._to_day = validators.integer(value, minimum=1, maximum=31)
@property
def ToMonth(self):
"""Get or set the end month-number [int]."""
return self._to_month
@ToMonth.setter
def ToMonth(self, value):
self._to_month = validators.integer(value, minimum=1, maximum=12)
@property
def Schedule(self):
"""Get or set the WeekSchedule object."""
return self._schedule
@Schedule.setter
def Schedule(self, value):
assert isinstance(value, WeekSchedule), "schedule must be of type WeekSchedule"
self._schedule = value
@classmethod
def from_dict(cls, data, schedules, **kwargs):
"""Create a YearSchedulePart object from a dictionary.
Args:
data (dict): The python dictionary.
| |
is '<CatName>_HTM'
Output :- A list of N_trixels dictionnaries containing the 2D matrix info
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
#print('I am looking for the data in',catalogs_dir + '/' + CatDir + '/' +Filename)
Data=class_HDF5.HDF5(catalogs_dir + '/' + CatDir + '/' +Filename).load(VarName,numpy_array=True)#as many columns as trixels, 13 lines with:
# [index,Father index,son1 index,son2 index,son3 index,son4 index, Pole1 long, Pole1 lat,Pole2 long, Pole2 lat,Pole3 long, Pole3 lat, either Nan or the data]
N_trixels=np.shape(Data)[1]
#print('there are {0} trixels'.format(N_trixels))
#load this data into a dictionnaries
#each trixel is a dictionnary
HTM_list=[]#will end up being a list of N_trixels dictionnaries
for i in range(N_trixels):
trixel = dict()
trixel['level']=Data[0,i]#line 1 of column 0
if np.isnan(np.array(Data[1,i])).all() == True:
trixel['father']=[]
else:
trixel['father']=Data[1,i]
if np.isnan(np.array(Data[2,i])).all() == True:
trixel['son']=[]
else:
trixel['son']=Data[2:6,i]
trixel['PolesCoo'] = np.zeros((3, 2))
trixel['PolesCoo'][0, 0] = Data[6,i]
trixel['PolesCoo'][0, 1] = Data[7,i]
trixel['PolesCoo'][1, 0] = Data[8,i]
trixel['PolesCoo'][1, 1] = Data[9,i]
trixel['PolesCoo'][2, 0] = Data[10,i]
trixel['PolesCoo'][2, 1] = Data[11,i]
trixel['Nsrc']=Data[12,i]
HTM_list.append(trixel)
return HTM_list,Data
def load_colcell(CatDir,CatName):
ColCelFile = CatDir+'/'+CatName + '_htmColCell.mat'
test = sio.loadmat(ColCelFile)
if np.shape(test['ColCell'])[1] < np.shape(test['ColCell'])[0]:
# test=test.transpose()
Ncol = np.shape(test['ColCell'])[0]
else:
Ncol = np.shape(test['ColCell'])[1]
ColCell = np.empty((Ncol), dtype=object)
ColUnits = np.empty((Ncol), dtype=object)
if np.shape(test['ColCell'])[1] < np.shape(test['ColCell'])[0]:
# test=test.transpose()
Ncol = np.shape(test['ColCell'])[0]
for i, j in enumerate(test['ColCell'][:, 0]):
# print(str(test['ColCell'][i][0][0]))
ColCell[i] = str(test['ColCell'][i][0][0])
for i, j in enumerate(test['ColUnits'][0, :]):
if len(test['ColUnits'][0, i]) > 0:
ColUnits[i] = str(test['ColUnits'][0, i][0])
else:
ColUnits[i] = ' '
else:
Ncol = np.shape(test['ColCell'])[1]
for i, j in enumerate(test['ColCell'][0, :]):
# print(test['ColCell'][0,i][0])
ColCell[i] = str(test['ColCell'][0, i][0])
for i, j in enumerate(test['ColUnits'][0, :]):
if len(test['ColUnits'][0, i]) > 0:
ColUnits[i] = str(test['ColUnits'][0, i][0])
else:
ColUnits[i] = ' '
return ColCell, ColUnits
def load_trix_by_ind(CatName,index,SearchParValue=None,num=100,catalogs_dir='./data',Ncol=None,Verbose=True):#load_cat in Eran's library
"""Description: given a catalog basename and the index of a trixel, load the content of the corresponding trixel dataset to a numpy array
Input :- CatName
- trixel index, or a a dataset name
- A two element vector of lower and upper value. Only lines in which the sorted parameter is between the low and high value will be retrieved.
If empty, retrieve all lines. Default is empty.
-number of columns in the catalog.
Output :-a numpy array with the content of the trixel, Ind ?
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
if isinstance(index,str)==False:
names=get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF=num,Verbose=Verbose)
Filename=names[0]
Data_set_name=names[1]
CatDir=get_CatDir(CatName)
if SearchParValue is None:
trixel_data=class_HDF5.HDF5(catalogs_dir + '/'+ CatDir + '/' + Filename).load(Data_set_name, numpy_array=True).T
Ind=1
else:
#load the index file
VarIndStr=Data_set_name+'_Ind' #the name of the index file
if Verbose==True:
print('Filename is',Filename)
DataInd=class_HDF5.HDF5(catalogs_dir+'/'+CatDir+'/'+Filename).load(VarIndStr,numpy_array=True,Verbose=Verbose).T#the content f the index file
if len(DataInd)>0:
Ndi=np.shape(DataInd)[0]
I1=bin_sear(DataInd[:,1],SearchParValue[0])
I2=bin_sear(DataInd[:,1],SearchParValue[1])
#print('before the if, I1 is {0} and I2 is {1}'.format(I1,I2))
Ind=DataInd[I1,0] #the
Offset=np.append(DataInd[I1,0]-1,0)
if I1==I2:
I2=I2+1
I2=min(I2,Ndi-1)
Block=[1+DataInd[I2,0]-DataInd[I1,0],Ncol]
#print('Block is',Block)
trixel_data=class_HDF5.HDF5(catalogs_dir+'/'+CatDir+'/'+Filename).load(Data_set_name,Offset=Offset,Block=Block,numpy_array=True,Verbose=Verbose).T
#seach the indexes of the
else:
trixel_data=np.array([])
Ind=None
return trixel_data,Ind
def bin_sear(X,Val): #Util.find.of eran
"""Description:
Input :- sorted vector (ascending)
- Value to search
Output :- Index of closest value
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
N=len(X)
if N==1:
IndVal=1
else:
Ind1=0
Ind2=N-1
IndM=math.floor(0.5*N)
Y1=X[Ind1]
Y2=X[Ind2]
Ym=X[IndM]
Found=0
while Found==0:
if Val>Ym:
Ind1=IndM
Y1=X[Ind1]
if Ind2-Ind1>=2:
IndM= math.floor(0.5*(Ind2+Ind1))
else:
Found=1
if abs(Val-Y1)<abs(Val-Y2):
IndVal=Ind1
else:
IndVal=Ind2
Ym=X[IndM]
elif Val<Ym:
Ind2=IndM
Y2=X[Ind2]
if Ind2-Ind1>=2:
IndM=math.floor(0.5*(Ind1+Ind2))
else:
Found=1
if abs(Val-Y1)<abs(Val-Y2):
IndVal=Ind1
else:
IndVal=Ind2
Ym=X[IndM]
else:
Found=1
IndVal=IndM
return IndVal
def mfind_bin(X,Vals):
"""Description: Binary search on a vector running simolutnously on
multiple values. A feature of this program is that it
you need to add 1 to the index in order to make sure
the found value is larger than the searched value.
Input :- Sorted column vector.
- Row vector of values to search.
Output :- Indices of nearest values.
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
Nvals=len(Vals)
N=len(X)
I1=np.ones(Nvals)
I2=N*np.ones(Nvals)
Im=np.floor(0.5*(I1+I2)).astype(int)
#print('Im is',Im)
PrevIm=np.zeros(np.shape(Im)[0]).astype(int)
#print('PrevIm is', PrevIm)
#pdb.set_trace()
if np.shape(X)[0]<2:
if X.size==0:
Im=[]
else:
Im=np.ones(Nvals).astype(int)
else:
while np.all(Im==PrevIm)==False:
#print(np.all(Im==PrevIm))
#print('X[Im-1] is',X[Im-1])
FlagU=Vals>X[Im-1]
#print('FlagU is',FlagU)
FlagD=np.invert(FlagU)
#print('FlagD is',FlagD)
I1[FlagU]=Im[FlagU]
I2[FlagD]=Im[FlagD]
PrevIm=Im
Im=np.floor(0.5*(I1+I2)).astype(int)
#print('Im is',Im)
#print('PrevIm is',PrevIm)
return Im
def get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF,Verbose=True):#get_file_var_from_htmid in Eran's library
"""Description: given a catalog basename and the index of a trixel and the number of trixels in an HDF5 file,
create the trixel dataset name
Input :- CatName
- index
- NfilesinHDF: number of datasets in an HDF5 files (default is 100)
Output :- Filename: name of the HDF5 file where the trixel_dataset is stored
- Datasetname: name of the trixel_dataset
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
if Verbose==True:
print('index is',index)
num_file=math.floor(index/NfilesinHDF)*NfilesinHDF #equivalent to index//Nfiles*Nfiles
Filename='%s_htm_%06d.hdf5' % (CatName, num_file)
DatasetName='htm_%06d' % index
return Filename,DatasetName
def Number_of_trixels(Catname,catalogs_dir='./data',CatDir=None):
"""Description: finds the number of trixels for a given catalod
Input :- catalog basename
Output :- number of trixels for this catalog
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
IndexFileName = get_index_filename(Catname)[0] # name of the index file associated with Catname
IndexVarName=get_index_filename(Catname)[1] # name of the data set containing the index filename content
List_of_dict=load_HTM_ind(IndexFileName,IndexVarName,catalogs_dir=catalogs_dir,CatDir=CatDir)[0]
Number_of_trixels_in_cat=len(List_of_dict)
return Number_of_trixels_in_cat
def simplify_list(val):
if isinstance(val, list) == False:
return val
else:
if len(val) > 1:
return val
else:
return simplify_list(val[0])
def simplify2(x):
IDc=[]
for i in x:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDc.append(j)
else:
IDc.append(i)
return IDc
#return simplify2(IDc)
def simplify3(x):
if isinstance(x[0],(list, tuple, np.ndarray)) == False:
return x
else:
y=simplify2(x)
#print(y)
return simplify3(y)
def match_cats(Cat,Refcat,Radius=2,RadiusUnits='arcsec'):
"""Description: translation of VO.search.match_cats of Eran. Given two spherical coordinate catalogs. - for each entry
in the reference catalog (second input argument), search for all nearby sources in the catalog (first input).
Input :- A catalog sorted by declination. Ra and Dec in Rad
- A reference catalog. Ra and Dec in rad
- 'Radius' - Search radius. This is either a scalar or a vector which length is identical to that of the reference
catalog (second input). If a vector than each source in the reference catalog may have a different search radius.
Default is 2 (arcsec).
- 'RadiusUnits' - Search radius units. See convert.angular for options. Default is 'arcsec'.
Output :-Vec: a dictionnary with the following keys
Vec['Nfound']= A vector, the size of RefCat, with the number of sources found in the catalog Cat that are within the search radius from the source with same indice in refcat. in the reference catalog.
Vec['MinDist']=A vector, the size of RefCat, with the minimum distance (radians) of matched sources in Cat to the source of same indice in RefCat. NaN if not found.
- Res: a list of dictionnaries (one item per *matched* refernce source! this list is not the size of cat1, it is the size of the
number of objects in cat1 that DO have at least one cross-matched object in cat2):
Res['IndRef']=Index of source in reference catalog.
Res['IndCat']=List of indices in the catalog that are matched to
% the 'IndRef' source of the reference catalog.
Res['Dist']= Vecor of angular distances (radians) for each one
% of the sources indicated in 'IndCat'.
Res['Num']=Number of sources within search radius
- IndCatMinDist: vector, the size of Refcat, with the indice of the cat2 nearest sources to the cat1 source of indice Res[Indref]. NaN if no source was found
example:
By : <NAME> (original Matlab function by <NAME>) August 2018"""
if RadiusUnits=='rad':
Radius=Radius
if RadiusUnits=='arcsec':
Radius=math.pi*Radius/(180.*3600.)
Ncat=np.shape(Cat)[0]
#print('Ncat is',Ncat)#ok
#print('Refcat is',Refcat)
Nref=np.shape(Refcat)[0]
#print('Nref is', Nref)#ok
Radius=Radius*np.ones(Nref)
Res=[]
Iuppx=mfind_bin(Cat[:,1],Refcat[:,1]+Radius) #only if second column is dec!
Ilowx=mfind_bin(Cat[:,1],Refcat[:,1]-Radius) #only if second column is dec!
#print('Iupx is',Iuppx)#ok
#print('Ilowx is',Ilowx)#ok
Ilow=np.zeros(np.shape(Ilowx)[0])
for r,s in enumerate(Ilowx):
Ilow[r]=max(1,Ilowx[r])
#Ilow=np.max(1,Ilowx)
Iupp=np.zeros(np.shape(Iuppx)[0])
for r,s in enumerate(Iuppx):
Iupp[r]=min(Ncat,Iuppx[r]+1)
#print('Iup is',Iupp)#ok
#print('Ilow is',Ilow)#ok
Ncand=Iupp-Ilow
Ic=np.array(np.where(Ncand>=1))[0]
#print('Ic is',Ic)
#print(np.shape(Ic))
#print('Ic is',Ic)#index where condition verified, same as matlab one -1
Nc=np.shape(Ic)[0]
#print('Nc is',Nc)
#pdb.set_trace()
Vec=dict()
Vec['Nfound']=np.zeros(Nref)
#vectornan=np.empty(Nref)
#vectornan[:]=np.nan
Vec['MinDist']=np.full(Nref, np.nan)#vectornan
Vec['MinPa']=np.full(Nref, np.nan)#vectornan
K=0
IndCatMinDist=np.full(Nref, np.nan)#vectornan
for Icr in range(Nc):
#print("Vec['MinDist']5 is", Vec['MinDist'])
#print('Nc is',Nc)
Iref=Ic[Icr]
#print('Iref is',Iref)#ok
#pdb.set_trace()
Icat=np.linspace(Ilow[Iref],Iupp[Iref],Iupp[Iref]-Ilow[Iref]+1).astype(int)
#print('Icat is',Icat)#ok
#print('Cat[Icat-1,0] is',Cat[Icat-1,0])#ok
#print('Cat[Icat-1,1] is',Cat[Icat-1,1])#ok
#print('Refcat[Iref,0]',Refcat[Iref,0])#ok
#print( 'Refcat[Iref,1]) is',Refcat[Iref,1])#ok
Dist=celestial.sphere_dist_fast(Cat[Icat-1,0],Cat[Icat-1,1],Refcat[Iref,0],Refcat[Iref,1])[0]
#print('Dist is',Dist)
#print('Radius[Iref] is',Radius[Iref])
IndRelative=np.where(Dist<=Radius[Iref])[0]
IndCat=Ilow[Icr]-1+IndRelative
#print('IndRelative is',IndRelative)#ok
#print('IndCat is',IndCat)#ok
Vec['Nfound'][Iref]=np.shape(IndCat)[0]#ok
#print("Vec['Nfound'][Iref] is",Vec['Nfound'][Iref])#ok
#pdb.set_trace()
if | |
import numpy as np
import matplotlib
import platform
if platform.system() == 'Darwin':
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import rcParams
import torch
from datetime import datetime
import time
import pickle
import os
import seaborn as sns
import matplotlib.pylab as plt
from scipy.special import softmax
import json
from double_well_model import *
from metropolis import MetropolisHastings
from utils import *
from nflib.MADE import *
from nflib.flows import *
from nflib.spline_flows import NSF_AR, NSF_CL
import itertools
import os
cwd = os.getcwd()
print('current directory', cwd)
def main(params):
# setting device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
params['device'] = device
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# timing the entire run.
start_time = time.time()
if params['random_seed'] == 0:
params['random_seed'] = np.random.randint(1,100)
# setting the random seeds
torch.manual_seed(params['random_seed'])
np.random.seed(params['random_seed'])
# Creating new directory to save all run outputs in
date_time = str(datetime.now()).replace(' ', '_').replace(':', '_') # ensures there aren't any issues saving this as a file name.
experiment_name = params['exp_base_name']+"_rand_seed-%s_ML_epochs-%s_KL_epochs-%s_learning_rate-%s_MLweight-%s_KLweight-%s_explore%s_temperature-%s_s_time-%s" % (
params['random_seed'], params['MLepochs'], params['KLepochs'],
params['lr'], params['MLweight'], params['KLweight'],
params['explore'], params['temperature'], date_time )
os.mkdir('experiments/'+experiment_name)
experiment_dir = 'experiments/'+ experiment_name+'/'
# write out all of the parameters used into a text file:
with open(experiment_dir+ 'params_used.txt', 'w') as file:
file.write(json.dumps(params, cls=NumpyEncoder))
# loading in the environment class, used to score the evolutionary hamiltonians
well_params = DoubleWell.params_default.copy()
well_params['dim'] = 2
gen_model = DoubleWell(params=well_params)
if params['MCMC'] == True:
nsteps = 20000
x0_left = np.array([[-1.8, 0.0]])
x0_right = np.array([[1.8, 0.0]])
sampler = MetropolisHastings(gen_model, x0=x0_left, noise=0.1,
stride=10, mapper=None,
is_discrete=False)
data1 = sampler.run(nsteps)
sampler = MetropolisHastings(gen_model, x0=x0_right, noise=0.1,
stride=10, mapper=None,
is_discrete=False)
data2 = sampler.run(nsteps)
data = np.concatenate([data1, data2 ], axis=0)
print('amount of concat data', data.shape)
print('the size of all data to be used (train and val)', data.shape)
# make data a torch tensor
data = torch.from_numpy(data).float().to(device)
# prepare transition state
x_ts = np.vstack([np.zeros(1000), (1.0/gen_model.params['k']) * np.random.randn(1000)]).T
# make train test split
rand_inds = np.random.choice(np.arange(data.shape[0]), params['tda'], replace=False)
train_set = rand_inds[: (params['tda']//2) ]
test_set = rand_inds[ (params['tda']//2): ]
x = data[train_set, :]
xval = data[test_set, :]
print('shape of data used for training', x.shape)
# plotting the training and Xval dataset energy histograms:
for dset, name in zip([x, xval], ['Train', 'XVal']):
plt.figure()
scores = gen_model.energy(dset.cpu().detach().numpy())
plt.hist(scores, bins=100)
plt.gcf().savefig(experiment_dir+'Expectation_'+name+'_Data_Hist.png', dpi=100)
plt.close()
# ======= setting up the normalizing flows:
# logistic distribution
# base = TransformedDistribution(Uniform(torch.zeros(gen_model.dim), torch.ones(gen_model.dim)), SigmoidTransform().inv)
base = torch.distributions.multivariate_normal.MultivariateNormal(torch.zeros(gen_model.dim), torch.eye(gen_model.dim))
if params['model_type'] == 'realNVP':
# RealNVP
# used to have 9 layers
flows = [AffineHalfFlow(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim'], block_mask=params['block_mask']) for i in range(params['num_layers'])]
if params['model_type'] == 'NICE':
# NICE
# 4 layers
flows = [AffineHalfFlow(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim'] ,scale=False, block_mask=params['block_mask']) for i in range(params['num_layers'])]
flows.append(AffineConstantFlow(dim=gen_model.dim, shift=False))
if params['model_type'] == 'slowMAF':
#SlowMAF (MAF, but without any parameter sharing for each dimension's scale/shift)
#4 layers
flows = [SlowMAF(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim']) for i in range(params['num_layers'])]
if params['model_type'] == 'MAF':
# MAF (with MADE net, so we get very fast density estimation)
# 4 layers
flows = [MAF(dim=gen_model.dim, parity=i%2, nh=params['hidden_dim']) for i in range(params['num_layers'])]
# Neural splines, coupling
if params['model_type'] == 'neuralSpline':
nfs_flow = NSF_CL if True else NSF_AR
# MAY WANT TO CHANGE THIS HIDDEN_DIM SIZE!
# 3 layers
flows = [nfs_flow(dim=gen_model.dim, K=8, B=3, hidden_dim=params['hidden_dim']) for _ in range(params['num_layers'])]
convs = [Invertible1x1Conv(dim=gen_model.dim) for _ in flows]
# PREVIOUSLY WAS ACTNORM BUT THIS CLEVER INIT DOESNT WORK FOR ONEHOTS
norms = [AffineConstantFlow(dim=gen_model.dim) for _ in flows]
flows = list(itertools.chain(*zip(norms, convs, flows)))
network = NormalizingFlowModel(base, flows, gen_model)
network.flow.to(device)
print('data', data1.shape)
# printing out where the samples are from
plt.figure()
plt.scatter(data1[:,0], data1[:,1], color='blue')
plt.scatter(data2[:,0], data2[:,1], color='red')
plt.gcf().savefig(experiment_dir+'training_data.png', dpi=100)
plt.close()
plt.figure()
plt.hist(data1[:,0], color='blue')
plt.hist(data2[:,0], color='red')
plt.gcf().savefig(experiment_dir+'training_data_hist.png', dpi=100)
plt.close()
if params['MLepochs']>0:
# only ML training.
ML_losses = network.train_flexible(x, xval=xval, lr=params['lr'], std=params['latent_std'], epochs=params['MLepochs'], batch_size=params['MLbatch'],
verbose=params['verbose'], clipnorm=params['gradient_clip'], weight_KL=0.0,
save_partway_inter=params['save_partway_inter'], experiment_dir=experiment_dir)
ML_losses = ML_losses['total_loss']
print('done with ML training')
# TODO: Add in temperature for sampling: temperature=params['temperature']
#exp_energy_x, hard_energy_x = network.sample_energy(num_samples=5000, temperature=params['temperature'] )
plt.figure()
fig, axes = plot_network(network, gen_model, data1, data2, x_ts, weight_cutoff=1e-2)
fig.savefig(experiment_dir+'ML_only_network_plot.png', dpi=100)
plt.close()
plt.figure()
plt.plot(ML_losses, label='training')
#plt.plot(network1.history['val_loss'], label='validation')
plt.legend()
plt.gcf().savefig(experiment_dir+'Post_ML_LossCurves.png', dpi=100)
plt.close()
torch.save(network.flow.state_dict(), experiment_dir+'Model_Post_ML_Training.torch')
pickle.dump(ML_losses, open(experiment_dir+'ML_only_losses_dict.pickle','wb'))
if params['KL_only']:
KL_losses = network.train_flexible(x, weight_ML=0.0, weight_entropy = params['Entropyweight'],
epochs=params['KLepochs'], lr=params['lr'],
batch_size=params['KLbatch'], temperature=params['temperature'],
explore=params['explore'], verbose=params['verbose'],
save_partway_inter=params['save_partway_inter'],
experiment_dir=experiment_dir, clipnorm=params['gradient_clip'])
KL_losses = KL_losses['total_loss']
plt.figure()
plt.plot(KL_losses, label='training')
#plt.plot(KL_losses, label='validation')
plt.legend()
plt.gcf().savefig(experiment_dir+'Post_KL_LossCurves.png', dpi=100)
plt.close()
torch.save(network.flow.state_dict(), experiment_dir+'Model_Post_KL_Training.torch')
pickle.dump(KL_losses, open(experiment_dir+'KL_only_losses_dict.pickle','wb'))
else:
ML_KL_losses = network.train_flexible(x, xval=xval, lr=params['lr'], std=params['latent_std'], epochs=params['KLepochs'], batch_size=params['KLbatch'],
weight_ML=params['MLweight'], weight_KL=params['KLweight'],
temperature=params['temperature'], explore=params['explore'], verbose=params['verbose'],
save_partway_inter=params['save_partway_inter'], clipnorm=params['gradient_clip'],
experiment_dir=experiment_dir, weight_entropy = params['Entropyweight'])
for loss_to_plot in ['total_loss', 'ld_loss', 'kl_loss', 'ml_loss']:
print('to plot', loss_to_plot, len(ML_KL_losses[loss_to_plot]))
plt.figure()
plt.plot(ML_KL_losses[loss_to_plot])
plt.gcf().savefig(experiment_dir+'Post_KL_'+loss_to_plot+'_LossCurve.png', dpi=100)
plt.close()
pickle.dump(ML_KL_losses, open(experiment_dir+'ML_KL_losses_dict.pickle','wb'))
plt.figure()
fig, axes = plot_network(network, gen_model, data1, data2, x_ts, weight_cutoff=1e-2)
fig.savefig(experiment_dir+'MLandKL_network_plot.png', dpi=100)
plt.close()
total_time = time.time() - start_time
print('======== total time for this run in minutes', total_time/60)
with open(experiment_dir+ 'time_taken.txt', 'w') as file:
file.write('Total time taken was: ' + str(total_time))
def plot_network(network, gen_model, traj_left, traj_right, x_ts,
weight_cutoff=1e-2,):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 3.5))
plt.subplots_adjust(wspace=0.25)
# Plot X distribution
axis = axes[0]
axis.plot(traj_left[:, 0], traj_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(x_ts[:, 0], x_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(traj_right[:, 0], traj_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
axis.set_xlabel('$x_1$')
axis.set_xlim(-3, 3)
axis.set_ylabel('$x_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4])
# Plot Z distribution
axis = axes[1]
with torch.no_grad():
z_left, _, _ = network.forward(torch.from_numpy(traj_left).float())
z_ts, _, _ = network.forward( torch.from_numpy(x_ts).float())
z_right, _, _ = network.forward( torch.from_numpy(traj_right).float())
axis.plot(z_left[:, 0], z_left[:, 1], linewidth=0, marker='.', markersize=3, color='blue')
axis.plot(z_ts[:, 0], z_ts[:, 1], linewidth=0, marker='.', markersize=3, color='orange')
axis.plot(z_right[:, 0], z_right[:, 1], linewidth=0, marker='.', markersize=3, color='red')
circle = plt.Circle((0, 0), radius=1.0, color='black', alpha=0.4, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=2.0, color='black', alpha=0.25, fill=True)
axis.add_artist(circle)
circle = plt.Circle((0, 0), radius=3.0, color='black', alpha=0.1, fill=True)
axis.add_artist(circle)
axis.set_xlabel('$z_1$')
axis.set_xlim(-4, 4)
axis.set_ylabel('$z_2$', labelpad=-12)
axis.set_ylim(-4, 4)
axis.set_yticks([-4, -2, 0, 2, 4])
# Plot proposal distribution
# getting samples and histograms.
X1, Y1 = test_sample(network, temperature=1.0, plot=False) # bin means and then negative log of empirical x0 frequencies.
_, W1 = hist_weights(network)
axis = axes[2]
# this is a grid of energies that are plotted as a line. ground truth.
_, E = gen_model.plot_dimer_energy(axis=axis, temperature=1.0)
Y1 = Y1 - Y1.min() + E.min()
Inan = np.where(W1 < weight_cutoff)
Y1[Inan] = np.nan
#Y2 = Y2 - Y2.min() + E.min()
#axis.plot(X2, Y2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.plot(X1, Y1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([])
axis.set_xlabel('$x_1$')
axis.set_ylabel('Energy / kT')
#plt.legend(ncol=1, loc=9, fontsize=12, frameon=False)
# Plot reweighted distribution
RX1, RY1, DR1 = test_sample_rew(network, gen_model, temperature=1.0, plot=False)
axis = axes[3]
Ex, E = gen_model.plot_dimer_energy(axis=axis, temperature=1.0)
RY1 = RY1 - RY1[np.isfinite(RY1)].min() + E.min()
RY1[Inan] = np.nan
#RY1[RY1 > -4] = np.nan
#RY2 = RY2 - RY2[np.isfinite(RY2)].min() + E.min()
#axis.errorbar(RX2, RY2, DR2, color='#FF6600', linewidth=2, label='ML+KL+RC')
axis.errorbar(RX1, RY1, DR1, color='orange', linewidth=2, label='ML+KL')
axis.set_xlim(-3, 3)
axis.set_ylim(-12, 5.5)
axis.set_yticks([-12, -10, -8, -6, -4, -2, 0, 2, 4])
axis.set_xlabel('$x_1$')
axis.set_ylabel('')
return fig, axes
def test_sample(network, temperature=1.0, nsample=100000, plot=True):
if nsample <= 100000:
sample_x = network.sample_xs(temperature=temperature, num_samples=nsample)
else:
sample_x = []
for i in range(int(nsample/100000)):
sample_x = network.sample_xs(temperature=temperature, num_samples=nsample)
sample_x.append(sample_x)
sample_x = np.vstack(sample_x)
sample_x = sample_x.detach().numpy()
# xgen = network.Tzx.predict(np.sqrt(temperature) * np.random.randn(100000, 2))
params = DoubleWell.params_default.copy()
params['dim'] = 2
double_well = DoubleWell(params=params)
plt.figure(figsize=(4, 4))
h, b = np.histogram(sample_x[:, 0], bins=100)
# h is the numbers in each bin.
bin_means = (b[:-1] + b[1:])/2
Eh = -np.log(h) / temperature # log of numbers in each. this brings it down from the boltzmann.
if plot:
Ex, E = double_well.plot_dimer_energy(temperature=temperature)
Eh = Eh - Eh.min() + E.min() # from the lowest real energy E, have the increase in energy on a log scale.
plt.plot(bin_means, Eh, color='green', linewidth=2)
return bin_means, Eh
def hist_weights(network):
sample_x, log_w = network.sample_log_w(temperature=1.0, num_samples=100000)
log_w -= log_w.max()
bins = np.linspace(-2.5, 2.5, 100)
bin_means = (bins[:-1] + bins[1:]) /2
sample_x_index = np.digitize(sample_x[:, 0], bins)
whist = np.zeros(len(bins) + 1)
for i in range(len(log_w)):
whist[sample_x_index[i]] += np.exp(log_w[i])
return bin_means, whist[1:-1]
# reweighting
def test_sample_rew(network, gen_model, temperature=1.0, plot=True):
sample_x, log_w = network.sample_log_w(temperature=1.0, num_samples=100000)
log_w -= log_w.max()
bin_means, Es = free_energy_bootstrap(sample_x[:, 0], bins=100, nbootstrap=100, log_weights=log_w)
plt.figure(figsize=(4, 4))
Emean = mean_finite(Es, axis=0)-10.7
Estd = std_finite(Es, axis=0)
var = mean_finite(std_finite(Es, axis=0) ** 2)
if plot:
gen_model.plot_dimer_energy()
plt.errorbar(bin_means, Emean, Estd, linewidth=2, color='green')
# variance
print('Estimator Standard Error: | |
direct children.
children = sorted(directory_node.Query(filter_string=filter_string,
limit=100000))
# Filter the children according to types.
if self.visible_types:
children = [x for x in children
if x.__class__.__name__ in self.visible_types]
self.content_cache.Put(key, children)
try:
self.message = "Directory Listing '%s' was taken on %s" % (
aff4_path, directory_node.Get(directory_node.Schema.TYPE.age))
except AttributeError:
pass
except IOError:
children = []
children.sort(reverse=reverse_sort)
row_index = start_row
# Make sure the table knows how large it is for paging.
self.size = len(children)
self.columns[1].base_path = urn
for fd in children[start_row:end_row]:
# We use the timestamp on the TYPE as a proxy for the last update time
# of this object - its only an estimate.
fd_type = fd.Get(fd.Schema.TYPE)
if fd_type:
self.AddCell(row_index, "Age", rdfvalue.RDFDatetime(fd_type.age))
self.AddCell(row_index, "Name", fd.urn)
# Add the fd to all the columns
for column in self.columns:
# This sets AttributeColumns directly from their fd.
if isinstance(column, semantic.AttributeColumn):
column.AddRowFromFd(row_index, fd)
if "Container" in fd.behaviours:
self.AddCell(row_index, "Icon", dict(icon="directory",
description="Directory"))
else:
self.AddCell(row_index, "Icon", dict(icon="file",
description="File Like Object"))
row_index += 1
if row_index > end_row:
return
class FileTable(AbstractFileTable):
"""A table that displays the content of a directory.
Listening Javascript Events:
- tree_select(aff4_path) - A selection event on the tree informing us of the
tree path. We re-layout the entire table on this event to show the
directory listing of aff4_path.
Generated Javascript Events:
- file_select(aff4_path, age) - The full AFF4 path for the file in the
directory which is selected. Age is the latest age we wish to see.
Internal State:
- client_id.
"""
root_path = None # The root will be dynamically set to the client path.
toolbar = "Toolbar"
context_help_url = "user_manual.html#_listing_the_virtual_filesystem"
def __init__(self, **kwargs):
super(FileTable, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"Icon", renderer=semantic.IconRenderer, width="40px"))
self.AddColumn(semantic.RDFValueColumn(
"Name", renderer=semantic.SubjectRenderer, sortable=True, width="20%"))
self.AddColumn(semantic.AttributeColumn("type", width="10%"))
self.AddColumn(semantic.AttributeColumn("size", width="10%"))
self.AddColumn(semantic.AttributeColumn("stat.st_size", width="15%"))
self.AddColumn(semantic.AttributeColumn("stat.st_mtime", width="15%"))
self.AddColumn(semantic.AttributeColumn("stat.st_ctime", width="15%"))
self.AddColumn(semantic.RDFValueColumn(
"Age", renderer=AgeSelector, width="15%"))
def Layout(self, request, response):
"""Populate the table state with the request."""
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.root_path = client_id
return super(FileTable, self).Layout(request, response)
def BuildTable(self, start_row, end_row, request):
client_id = request.REQ.get("client_id")
self.root_path = client_id
return super(FileTable, self).BuildTable(start_row, end_row, request)
class FileSystemTree(renderers.TreeRenderer):
"""A FileSystem navigation Tree.
Generated Javascript Events:
- tree_select(aff4_path) - The full aff4 path for the branch which the user
selected.
Internal State:
- client_id: The client this tree is showing.
- aff4_root: The aff4 node which forms the root of this tree.
"""
# Flows are special children which confuse users when seen, so we remove them
# from the tree. Note that they are still visible in the table.
hidden_branches = ["/flows"]
def Layout(self, request, response):
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.state["aff4_root"] = request.REQ.get("aff4_root", client_id)
response = super(FileSystemTree, self).Layout(request, response)
return self.CallJavascript(response, "FileSystemTree.Layout")
def RenderBranch(self, path, request):
"""Renders tree leafs for filesystem path."""
client_id = request.REQ["client_id"]
aff4_root = rdfvalue.RDFURN(request.REQ.get("aff4_root", client_id))
# Path is relative to the aff4 root specified.
urn = aff4_root.Add(path)
try:
# Open the client
directory = aff4.FACTORY.Open(urn, token=request.token).Upgrade(
"VFSDirectory")
children = [ch for ch in directory.OpenChildren(limit=100000)
if "Container" in ch.behaviours]
try:
self.message = "Directory %s Last retrieved %s" % (
urn, directory.Get(directory.Schema.TYPE).age)
except AttributeError:
pass
for child in sorted(children):
self.AddElement(child.urn.RelativeName(urn))
except IOError as e:
self.message = "Error fetching %s: %s" % (urn, e)
class RecursiveRefreshDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that allows user to recursively update directories."""
post_parameters = ["aff4_path"]
header = "Recursive Refresh"
proceed_button_title = "Refresh!"
content_template = renderers.Template("""
{{this.recursive_refresh_form|safe}}
""")
ajax_template = renderers.Template("""
<p class="text-info">Refresh started successfully!</p>
""")
def Layout(self, request, response):
args = rdfvalue.RecursiveListDirectoryArgs()
self.recursive_refresh_form = forms.SemanticProtoFormRenderer(
args, supressions=["pathspec"]).RawHTML(request)
return super(RecursiveRefreshDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
aff4_path = rdfvalue.RDFURN(request.REQ.get("aff4_path"))
args = forms.SemanticProtoFormRenderer(
rdfvalue.RecursiveListDirectoryArgs()).ParseArgs(request)
fd = aff4.FACTORY.Open(aff4_path, aff4_type="AFF4Volume",
token=request.token)
args.pathspec = fd.real_pathspec
flow.GRRFlow.StartFlow(client_id=aff4_path.Split()[0],
flow_name="RecursiveListDirectory",
args=args,
notify_to_user=True,
token=request.token)
return self.RenderFromTemplate(self.ajax_template, response)
class Toolbar(renderers.TemplateRenderer):
"""A navigation enhancing toolbar.
Listening Javascript Events:
- AttributeUpdated(aff4_path, attribute): This event is fired then the
aff4_path has updated. If the content of this event have changed, we emit
the tree_select and file_select events to force the table to redraw.
Generated Javascript Events:
- file_select(aff4_path), tree_select(aff4_path) are fired when the buttons
are clicked.
Internal State:
- aff4_path: The path we are viewing now in the table.
"""
layout_template = renderers.Template("""
<div class="navbar navbar-default">
<div class="navbar-inner">
<div class="navbar-form pull-right">
<button class="btn btn-default" id='refresh_{{unique|escape}}'
name="Refresh" title='Refresh this directory listing.'>
<img src='/static/images/stock_refresh.png' class="toolbar_icon" />
</button>
<button class="btn btn-default" id='recursive_refresh_{{unique|escape}}'
title='Refresh this directory listing.' style='position: relative'
name="RecursiveRefresh" data-toggle="modal"
data-target="#recursive_refresh_dialog_{{unique|escape}}">
<img src='/static/images/stock_refresh.png' class="toolbar_icon" />
<span style='position: absolute; left: 23px; top: 5px; font-weight: bold;
font-size: 18px; -webkit-text-stroke: 1px #000; color: #fff'>R</span>
</button>
<button class="btn btn-default" id='rweowned'
title='Is this machine pwned?'>
<img src='/static/images/stock_dialog_question.png'
class="toolbar_icon" />
</button>
</div>
<ul class="breadcrumb">
{% for path, fullpath, fullpath_id, i, last in this.paths %}
<li {% if forloop.last %}class="active"{% endif %}>
{% if forloop.last %}
{{path|escape}}
{% else %}
<a id="path_{{i|escape}}">{{path|escape}}</a>
{% endif %}
</li>
{% endfor %}
<div class="clearfix"></div>
</ul>
</div>
</div>
<div id="refresh_action" class="hide"></div>
<div id="rweowned_dialog" class="modal"></div>
<div id="recursive_refresh_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
""")
def Layout(self, request, response):
"""Render the toolbar."""
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.state["aff4_path"] = aff4_path = request.REQ.get(
"aff4_path", client_id)
client_urn = rdfvalue.ClientURN(client_id)
self.paths = [("/", client_urn, "_", 0)]
for path in rdfvalue.RDFURN(aff4_path).Split()[1:]:
previous = self.paths[-1]
fullpath = previous[1].Add(path)
self.paths.append((path, fullpath,
renderers.DeriveIDFromPath(
fullpath.RelativeName(client_urn)),
previous[3] + 1))
response = super(Toolbar, self).Layout(request, response)
return self.CallJavascript(response, "Toolbar.Layout",
aff4_path=utils.SmartUnicode(aff4_path),
paths=self.paths)
class UpdateAttribute(renderers.TemplateRenderer):
"""Reloads a directory listing from client.
The renderer will launch the flow in the layout method, and then call its
render method every few seconds to check if the flow is complete.
Post Parameters:
- aff4_path: The aff4 path to update the attribute for.
- aff4_type: If provided, the aff4 object will be upgraded to this type
before updating.
- attribute: The attribute name to update.
Generated Javascript Events:
- AttributeUpdated(aff4_path, attribute) - When the flow is complete we emit
this event.
"""
# Number of ms to wait
poll_time = 1000
def ParseRequest(self, request):
"""Parses parameters from the request."""
self.aff4_path = request.REQ.get("aff4_path")
self.flow_urn = request.REQ.get("flow_urn")
# Refresh the contains attribute
self.attribute_to_refresh = request.REQ.get("attribute", "CONTAINS")
def Layout(self, request, response):
"""Render the toolbar."""
self.ParseRequest(request)
try:
client_id = rdfvalue.RDFURN(self.aff4_path).Split(2)[0]
update_flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="UpdateVFSFile",
token=request.token, vfs_file_urn=rdfvalue.RDFURN(self.aff4_path),
attribute=self.attribute_to_refresh)
update_flow = aff4.FACTORY.Open(
update_flow_urn, aff4_type="UpdateVFSFile", token=request.token)
self.flow_urn = str(update_flow.state.get_file_flow_urn)
except IOError as e:
raise IOError("Sorry. This path cannot be refreshed due to %s" % e)
if self.flow_urn:
response = super(UpdateAttribute, self).Layout(request, response)
return self.CallJavascript(response,
"UpdateAttribute.Layout",
aff4_path=self.aff4_path,
flow_urn=self.flow_urn,
attribute_to_refresh=self.attribute_to_refresh,
poll_time=self.poll_time)
def RenderAjax(self, request, response):
"""Continue polling as long as the flow is in flight."""
super(UpdateAttribute, self).RenderAjax(request, response)
self.ParseRequest(request)
# Check if the flow is still in flight.
try:
flow_obj = aff4.FACTORY.Open(self.flow_urn, token=request.token)
complete = not flow_obj.GetRunner().IsRunning()
except IOError:
# Something went wrong, stop polling.
complete = True
if complete:
return renderers.JsonResponse("1")
class AFF4ReaderMixin(object):
"""A helper which reads a buffer from an AFF4 object.
This is meant to be mixed in with the HexView and TextView renderers.
"""
def ReadBuffer(self, request, offset, length):
"""Renders the HexTable."""
# Allow derived classes to just set the urn directly
self.aff4_path = request.REQ.get("aff4_path")
self.age = request.REQ.get("age")
if not self.aff4_path: return
try:
fd = aff4.FACTORY.Open(self.aff4_path, token=request.token,
age=rdfvalue.RDFDatetime(self.age))
self.total_size = int(fd.Get(fd.Schema.SIZE))
except (IOError, TypeError, AttributeError):
self.total_size = 0
return ""
fd.Seek(offset)
return fd.Read(length)
class FileHexViewer(AFF4ReaderMixin, fileview_widgets.HexView):
"""A HexView renderer."""
class FileTextViewer(AFF4ReaderMixin, fileview_widgets.TextView):
"""A TextView renderer."""
class VirtualFileSystemView(renderers.Splitter):
"""This is the main view to browse files."""
behaviours = frozenset(["Host"])
order = 10
description = "Browse Virtual Filesystem"
left_renderer = "FileSystemTree"
top_right_renderer = "FileTable"
bottom_right_renderer = "AFF4ObjectRenderer"
class DownloadView(renderers.TemplateRenderer):
"""Renders a download page."""
# We allow a longer execution time here to be able to download large files.
max_execution_time = 60 * 15
layout_template = renderers.Template("""
<h3>{{ this.path|escape }}</h3>
<div id="{{ unique|escape }}_action" class="hide"></div>
{% if this.hash %}
Hash was {{ this.hash|escape }}.
{% endif %}
{% if this.file_exists %}
As downloaded on {{ this.age|escape }}.<br>
<p>
<button id="{{ unique|escape }}_2" class="btn btn-default">
Download ({{this.size|escape}} bytes)
</button>
</p>
<p>or download using command line export tool:</p>
<pre>
{{ this.export_command_str|escape }}
</pre>
<hr/>
{% endif %}
<button id="{{ unique|escape }}" class="btn btn-default">
Get a new Version
</button>
</div>
""")
error_template = renderers.Template("""
<div class="alert alert-danger alert-block">
<h4>Error!</h4> {{this.path|escape}} does not appear to be a file object.
<p><em>{{this.error_message|escape}}</em></p>
</div>
""")
bad_extensions = [".bat", ".cmd", ".exe", ".com", ".pif", ".py", ".pl",
".scr", ".vbs"]
def Layout(self, request, response):
"""Present a download form."""
self.age = rdfvalue.RDFDatetime(request.REQ.get("age"))
client_id = request.REQ.get("client_id")
aff4_path = request.REQ.get("aff4_path", client_id)
try:
fd = aff4.FACTORY.Open(aff4_path, token=request.token, age=self.age)
self.path | |
Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls", value)
@property
@pulumi.getter(name="userDisabledBitMask")
def user_disabled_bit_mask(self) -> Optional[pulumi.Input[int]]:
"""
User disabled bit mask (int)
"""
return pulumi.get(self, "user_disabled_bit_mask")
@user_disabled_bit_mask.setter
def user_disabled_bit_mask(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "user_disabled_bit_mask", value)
@property
@pulumi.getter(name="userEnabledAttribute")
def user_enabled_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User enable attribute (string)
"""
return pulumi.get(self, "user_enabled_attribute")
@user_enabled_attribute.setter
def user_enabled_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_enabled_attribute", value)
@property
@pulumi.getter(name="userLoginAttribute")
def user_login_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User login attribute. Default `uid` (string)
"""
return pulumi.get(self, "user_login_attribute")
@user_login_attribute.setter
def user_login_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_login_attribute", value)
@property
@pulumi.getter(name="userMemberAttribute")
def user_member_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User member attribute. Default `memberOf` (string)
"""
return pulumi.get(self, "user_member_attribute")
@user_member_attribute.setter
def user_member_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_member_attribute", value)
@property
@pulumi.getter(name="userNameAttribute")
def user_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User name attribute. Default `givenName` (string)
"""
return pulumi.get(self, "user_name_attribute")
@user_name_attribute.setter
def user_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name_attribute", value)
@property
@pulumi.getter(name="userObjectClass")
def user_object_class(self) -> Optional[pulumi.Input[str]]:
"""
User object class. Default `inetorgperson` (string)
"""
return pulumi.get(self, "user_object_class")
@user_object_class.setter
def user_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_object_class", value)
@property
@pulumi.getter(name="userSearchAttribute")
def user_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
User search attribute. Default `uid|sn|givenName` (string)
"""
return pulumi.get(self, "user_search_attribute")
@user_search_attribute.setter
def user_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_search_attribute", value)
@pulumi.input_type
class _AuthConfigOpenLdapState:
def __init__(__self__, *,
access_mode: Optional[pulumi.Input[str]] = None,
allowed_principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
connection_timeout: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
group_dn_attribute: Optional[pulumi.Input[str]] = None,
group_member_mapping_attribute: Optional[pulumi.Input[str]] = None,
group_member_user_attribute: Optional[pulumi.Input[str]] = None,
group_name_attribute: Optional[pulumi.Input[str]] = None,
group_object_class: Optional[pulumi.Input[str]] = None,
group_search_attribute: Optional[pulumi.Input[str]] = None,
group_search_base: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_group_membership_enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_account_distinguished_name: Optional[pulumi.Input[str]] = None,
service_account_password: Optional[pulumi.Input[str]] = None,
test_password: Optional[pulumi.Input[str]] = None,
test_username: Optional[pulumi.Input[str]] = None,
tls: Optional[pulumi.Input[bool]] = None,
type: Optional[pulumi.Input[str]] = None,
user_disabled_bit_mask: Optional[pulumi.Input[int]] = None,
user_enabled_attribute: Optional[pulumi.Input[str]] = None,
user_login_attribute: Optional[pulumi.Input[str]] = None,
user_member_attribute: Optional[pulumi.Input[str]] = None,
user_name_attribute: Optional[pulumi.Input[str]] = None,
user_object_class: Optional[pulumi.Input[str]] = None,
user_search_attribute: Optional[pulumi.Input[str]] = None,
user_search_base: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AuthConfigOpenLdap resources.
:param pulumi.Input[str] access_mode: Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_principal_ids: Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations of the resource (map)
:param pulumi.Input[str] certificate: Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
:param pulumi.Input[int] connection_timeout: OpenLdap connection timeout. Default `5000` (int)
:param pulumi.Input[bool] enabled: Enable auth config provider. Default `true` (bool)
:param pulumi.Input[str] group_dn_attribute: Group DN attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_member_mapping_attribute: Group member mapping attribute. Default `member` (string)
:param pulumi.Input[str] group_member_user_attribute: Group member user attribute. Default `entryDN` (string)
:param pulumi.Input[str] group_name_attribute: Group name attribute. Default `cn` (string)
:param pulumi.Input[str] group_object_class: Group object class. Default `groupOfNames` (string)
:param pulumi.Input[str] group_search_attribute: Group search attribute. Default `cn` (string)
:param pulumi.Input[str] group_search_base: Group search base (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels of the resource (map)
:param pulumi.Input[str] name: (Computed) The name of the resource (string)
:param pulumi.Input[bool] nested_group_membership_enabled: Nested group membership enable. Default `false` (bool)
:param pulumi.Input[int] port: OpenLdap port. Default `389` (int)
:param pulumi.Input[Sequence[pulumi.Input[str]]] servers: OpenLdap servers list (list)
:param pulumi.Input[str] service_account_distinguished_name: Service account DN for access OpenLdap service (string)
:param pulumi.Input[str] service_account_password: Service account password for access OpenLdap service (string)
:param pulumi.Input[str] test_password: Password for test access to OpenLdap service (string)
:param pulumi.Input[str] test_username: Username for test access to OpenLdap service (string)
:param pulumi.Input[bool] tls: Enable TLS connection (bool)
:param pulumi.Input[str] type: (Computed) The type of the resource (string)
:param pulumi.Input[int] user_disabled_bit_mask: User disabled bit mask (int)
:param pulumi.Input[str] user_enabled_attribute: User enable attribute (string)
:param pulumi.Input[str] user_login_attribute: User login attribute. Default `uid` (string)
:param pulumi.Input[str] user_member_attribute: User member attribute. Default `memberOf` (string)
:param pulumi.Input[str] user_name_attribute: User name attribute. Default `givenName` (string)
:param pulumi.Input[str] user_object_class: User object class. Default `inetorgperson` (string)
:param pulumi.Input[str] user_search_attribute: User search attribute. Default `uid|sn|givenName` (string)
:param pulumi.Input[str] user_search_base: User search base DN (string)
"""
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if allowed_principal_ids is not None:
pulumi.set(__self__, "allowed_principal_ids", allowed_principal_ids)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if connection_timeout is not None:
pulumi.set(__self__, "connection_timeout", connection_timeout)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if group_dn_attribute is not None:
pulumi.set(__self__, "group_dn_attribute", group_dn_attribute)
if group_member_mapping_attribute is not None:
pulumi.set(__self__, "group_member_mapping_attribute", group_member_mapping_attribute)
if group_member_user_attribute is not None:
pulumi.set(__self__, "group_member_user_attribute", group_member_user_attribute)
if group_name_attribute is not None:
pulumi.set(__self__, "group_name_attribute", group_name_attribute)
if group_object_class is not None:
pulumi.set(__self__, "group_object_class", group_object_class)
if group_search_attribute is not None:
pulumi.set(__self__, "group_search_attribute", group_search_attribute)
if group_search_base is not None:
pulumi.set(__self__, "group_search_base", group_search_base)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if nested_group_membership_enabled is not None:
pulumi.set(__self__, "nested_group_membership_enabled", nested_group_membership_enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if servers is not None:
pulumi.set(__self__, "servers", servers)
if service_account_distinguished_name is not None:
pulumi.set(__self__, "service_account_distinguished_name", service_account_distinguished_name)
if service_account_password is not None:
pulumi.set(__self__, "service_account_password", service_account_password)
if test_password is not None:
pulumi.set(__self__, "test_password", test_password)
if test_username is not None:
pulumi.set(__self__, "test_username", test_username)
if tls is not None:
pulumi.set(__self__, "tls", tls)
if type is not None:
pulumi.set(__self__, "type", type)
if user_disabled_bit_mask is not None:
pulumi.set(__self__, "user_disabled_bit_mask", user_disabled_bit_mask)
if user_enabled_attribute is not None:
pulumi.set(__self__, "user_enabled_attribute", user_enabled_attribute)
if user_login_attribute is not None:
pulumi.set(__self__, "user_login_attribute", user_login_attribute)
if user_member_attribute is not None:
pulumi.set(__self__, "user_member_attribute", user_member_attribute)
if user_name_attribute is not None:
pulumi.set(__self__, "user_name_attribute", user_name_attribute)
if user_object_class is not None:
pulumi.set(__self__, "user_object_class", user_object_class)
if user_search_attribute is not None:
pulumi.set(__self__, "user_search_attribute", user_search_attribute)
if user_search_base is not None:
pulumi.set(__self__, "user_search_base", user_search_base)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Access mode for auth. `required`, `restricted`, `unrestricted` are supported. Default `unrestricted` (string)
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="allowedPrincipalIds")
def allowed_principal_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed principal ids for auth. Required if `access_mode` is `required` or `restricted`. Ex: `openldap_user://<DN>` `openldap_group://<DN>` (list)
"""
return pulumi.get(self, "allowed_principal_ids")
@allowed_principal_ids.setter
def allowed_principal_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_principal_ids", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations of the resource (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded CA certificate for TLS if self-signed. Use filebase64(<FILE>) for encoding file (string)
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> Optional[pulumi.Input[int]]:
"""
OpenLdap connection timeout. Default `5000` (int)
"""
return pulumi.get(self, "connection_timeout")
@connection_timeout.setter
def connection_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_timeout", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable auth config provider. Default `true` (bool)
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="groupDnAttribute")
def group_dn_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group DN attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_dn_attribute")
@group_dn_attribute.setter
def group_dn_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_dn_attribute", value)
@property
@pulumi.getter(name="groupMemberMappingAttribute")
def group_member_mapping_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member mapping attribute. Default `member` (string)
"""
return pulumi.get(self, "group_member_mapping_attribute")
@group_member_mapping_attribute.setter
def group_member_mapping_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_mapping_attribute", value)
@property
@pulumi.getter(name="groupMemberUserAttribute")
def group_member_user_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group member user attribute. Default `entryDN` (string)
"""
return pulumi.get(self, "group_member_user_attribute")
@group_member_user_attribute.setter
def group_member_user_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_member_user_attribute", value)
@property
@pulumi.getter(name="groupNameAttribute")
def group_name_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group name attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_name_attribute")
@group_name_attribute.setter
def group_name_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_name_attribute", value)
@property
@pulumi.getter(name="groupObjectClass")
def group_object_class(self) -> Optional[pulumi.Input[str]]:
"""
Group object class. Default `groupOfNames` (string)
"""
return pulumi.get(self, "group_object_class")
@group_object_class.setter
def group_object_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_object_class", value)
@property
@pulumi.getter(name="groupSearchAttribute")
def group_search_attribute(self) -> Optional[pulumi.Input[str]]:
"""
Group search attribute. Default `cn` (string)
"""
return pulumi.get(self, "group_search_attribute")
@group_search_attribute.setter
def group_search_attribute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_attribute", value)
@property
@pulumi.getter(name="groupSearchBase")
def group_search_base(self) -> Optional[pulumi.Input[str]]:
"""
Group search base (string)
"""
return pulumi.get(self, "group_search_base")
@group_search_base.setter
def group_search_base(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_search_base", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels of the resource (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The name | |
'</td>' + \
'<td>' + r10c3 + '</td>' + \
'<td>' + r10c4 + '</td>' + \
'<td>' + r10c5 + '</td>' + \
'<td>' + r10c6 + '</td>' + \
'<td>' + r10c7 + '</td>' + \
'<td>' + r10c8 + '</td>' + \
'<td>' + r10c9 + '</td>' + \
'<td>' + r10c10 + '</td>' + \
'<td>' + r10c11 + '</td>' + \
'<td>' + r10c12 + '</td>' + \
'<td>' + r10c13 + '</td>' + \
'<td>' + r10c14 + '</td>' + \
'<td>' + r10c15 + '</td>' + \
'<td>' + r10c16 + '</td>' + \
'<td>' + r10c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Gifts actual income</td>' + \
'<td>' + r11c1 + '</td>' + \
'<td>' + r11c2 + '</td>' + \
'<td>' + r11c3 + '</td>' + \
'<td>' + r11c4 + '</td>' + \
'<td>' + r11c5 + '</td>' + \
'<td>' + r11c6 + '</td>' + \
'<td>' + r11c7 + '</td>' + \
'<td>' + r11c8 + '</td>' + \
'<td>' + r11c9 + '</td>' + \
'<td>' + r11c10 + '</td>' + \
'<td>' + r11c11 + '</td>' + \
'<td>' + r11c12 + '</td>' + \
'<td>' + r11c13 + '</td>' + \
'<td>' + r11c14 + '</td>' + \
'<td>' + r11c15 + '</td>' + \
'<td>' + r11c16 + '</td>' + \
'<td>' + r11c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Gifts variance income</td>' + \
'<td>' + r12c1 + '</td>' + \
'<td>' + r12c2 + '</td>' + \
'<td>' + r12c3 + '</td>' + \
'<td>' + r12c4 + '</td>' + \
'<td>' + r12c5 + '</td>' + \
'<td>' + r12c6 + '</td>' + \
'<td>' + r12c7 + '</td>' + \
'<td>' + r12c8 + '</td>' + \
'<td>' + r12c9 + '</td>' + \
'<td>' + r12c10 + '</td>' + \
'<td>' + r12c11 + '</td>' + \
'<td>' + r12c12 + '</td>' + \
'<td>' + r12c13 + '</td>' + \
'<td>' + r12c14 + '</td>' + \
'<td>' + r12c15 + '</td>' + \
'<td>' + r12c16 + '</td>' + \
'<td>' + r12c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Interest budget income</td>' + \
'<td>' + r13c1 + '</td>' + \
'<td>' + r13c2 + '</td>' + \
'<td>' + r13c3 + '</td>' + \
'<td>' + r13c4 + '</td>' + \
'<td>' + r13c5 + '</td>' + \
'<td>' + r13c6 + '</td>' + \
'<td>' + r13c7 + '</td>' + \
'<td>' + r13c8 + '</td>' + \
'<td>' + r13c9 + '</td>' + \
'<td>' + r13c10 + '</td>' + \
'<td>' + r13c11 + '</td>' + \
'<td>' + r13c12 + '</td>' + \
'<td>' + r13c13 + '</td>' + \
'<td>' + r13c14 + '</td>' + \
'<td>' + r13c15 + '</td>' + \
'<td>' + r13c16 + '</td>' + \
'<td>' + r13c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Interest actual income</td>' + \
'<td>' + r14c1 + '</td>' + \
'<td>' + r14c2 + '</td>' + \
'<td>' + r14c3 + '</td>' + \
'<td>' + r14c4 + '</td>' + \
'<td>' + r14c5 + '</td>' + \
'<td>' + r14c6 + '</td>' + \
'<td>' + r14c7 + '</td>' + \
'<td>' + r14c8 + '</td>' + \
'<td>' + r14c9 + '</td>' + \
'<td>' + r14c10 + '</td>' + \
'<td>' + r14c11 + '</td>' + \
'<td>' + r14c12 + '</td>' + \
'<td>' + r14c13 + '</td>' + \
'<td>' + r14c14 + '</td>' + \
'<td>' + r14c15 + '</td>' + \
'<td>' + r14c16 + '</td>' + \
'<td>' + r14c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Interest variance income</td>' + \
'<td>' + r15c1 + '</td>' + \
'<td>' + r15c2 + '</td>' + \
'<td>' + r15c3 + '</td>' + \
'<td>' + r15c4 + '</td>' + \
'<td>' + r15c5 + '</td>' + \
'<td>' + r15c6 + '</td>' + \
'<td>' + r15c7 + '</td>' + \
'<td>' + r15c8 + '</td>' + \
'<td>' + r15c9 + '</td>' + \
'<td>' + r15c10 + '</td>' + \
'<td>' + r15c11 + '</td>' + \
'<td>' + r15c12 + '</td>' + \
'<td>' + r15c13 + '</td>' + \
'<td>' + r15c14 + '</td>' + \
'<td>' + r15c15 + '</td>' + \
'<td>' + r15c16 + '</td>' + \
'<td>' + r15c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent budget income</td>' + \
'<td>' + r16c1 + '</td>' + \
'<td>' + r16c2 + '</td>' + \
'<td>' + r16c3 + '</td>' + \
'<td>' + r16c4 + '</td>' + \
'<td>' + r16c5 + '</td>' + \
'<td>' + r16c6 + '</td>' + \
'<td>' + r16c7 + '</td>' + \
'<td>' + r16c8 + '</td>' + \
'<td>' + r16c9 + '</td>' + \
'<td>' + r16c10 + '</td>' + \
'<td>' + r16c11 + '</td>' + \
'<td>' + r16c12 + '</td>' + \
'<td>' + r16c13 + '</td>' + \
'<td>' + r16c14 + '</td>' + \
'<td>' + r16c15 + '</td>' + \
'<td>' + r16c16 + '</td>' + \
'<td>' + r16c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent actual income</td>' + \
'<td>' + r17c1 + '</td>' + \
'<td>' + r17c2 + '</td>' + \
'<td>' + r17c3 + '</td>' + \
'<td>' + r17c4 + '</td>' + \
'<td>' + r17c5 + '</td>' + \
'<td>' + r17c6 + '</td>' + \
'<td>' + r17c7 + '</td>' + \
'<td>' + r17c8 + '</td>' + \
'<td>' + r17c9 + '</td>' + \
'<td>' + r17c10 + '</td>' + \
'<td>' + r17c11 + '</td>' + \
'<td>' + r17c12 + '</td>' + \
'<td>' + r17c13 + '</td>' + \
'<td>' + r17c14 + '</td>' + \
'<td>' + r17c15 + '</td>' + \
'<td>' + r17c16 + '</td>' + \
'<td>' + r17c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent variance income</td>' + \
'<td>' + r18c1 + '</td>' + \
'<td>' + r18c2 + '</td>' + \
'<td>' + r18c3 + '</td>' + \
'<td>' + r18c4 + '</td>' + \
'<td>' + r18c5 + '</td>' + \
'<td>' + r18c6 + '</td>' + \
'<td>' + r18c7 + '</td>' + \
'<td>' + r18c8 + '</td>' + \
'<td>' + r18c9 + '</td>' + \
'<td>' + r18c10 + '</td>' + \
'<td>' + r18c11 + '</td>' + \
'<td>' + r18c12 + '</td>' + \
'<td>' + r18c13 + '</td>' + \
'<td>' + r18c14 + '</td>' + \
'<td>' + r18c15 + '</td>' + \
'<td>' + r18c16 + '</td>' + \
'<td>' + r18c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other budget income</td>' + \
'<td>' + r19c1 + '</td>' + \
'<td>' + r19c2 + '</td>' + \
'<td>' + r19c3 + '</td>' + \
'<td>' + r19c4 + '</td>' + \
'<td>' + r19c5 + '</td>' + \
'<td>' + r19c6 + '</td>' + \
'<td>' + r19c7 + '</td>' + \
'<td>' + r19c8 + '</td>' + \
'<td>' + r19c9 + '</td>' + \
'<td>' + r19c10 + '</td>' + \
'<td>' + r19c11 + '</td>' + \
'<td>' + r19c12 + '</td>' + \
'<td>' + r19c13 + '</td>' + \
'<td>' + r19c14 + '</td>' + \
'<td>' + r19c15 + '</td>' + \
'<td>' + r19c16 + '</td>' + \
'<td>' + r19c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other actual income</td>' + \
'<td>' + r20c1 + '</td>' + \
'<td>' + r20c2 + '</td>' + \
'<td>' + r20c3 + '</td>' + \
'<td>' + r20c4 + '</td>' + \
'<td>' + r20c5 + '</td>' + \
'<td>' + r20c6 + '</td>' + \
'<td>' + r20c7 + '</td>' + \
'<td>' + r20c8 + '</td>' + \
'<td>' + r20c9 + '</td>' + \
'<td>' + r20c10 + '</td>' + \
'<td>' + r20c11 + '</td>' + \
'<td>' + r20c12 + '</td>' + \
'<td>' + r20c13 + '</td>' + \
'<td>' + r20c14 + '</td>' + \
'<td>' + r20c15 + '</td>' + \
'<td>' + r20c16 + '</td>' + \
'<td>' + r20c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other variance income</td>' + \
'<td>' + r21c1 + '</td>' + \
'<td>' + r21c2 + '</td>' + \
'<td>' + r21c3 + '</td>' + \
'<td>' + r21c4 + '</td>' + \
'<td>' + r21c5 + '</td>' + \
'<td>' + r21c6 + '</td>' + \
'<td>' + r21c7 + '</td>' + \
'<td>' + r21c8 + '</td>' + \
'<td>' + r21c9 + '</td>' + \
'<td>' + r21c10 + '</td>' + \
'<td>' + r21c11 + '</td>' + \
'<td>' + r21c12 + '</td>' + \
'<td>' + r21c13 + '</td>' + \
'<td>' + | |
'''
Created on Jul 07, 2015
@author: alexandru-m-g
'''
import datetime
import json
import logging
from flask import request
from sqlalchemy import or_
import ckan.lib.dictization.model_save as model_save
import ckan.lib.munge as munge
import ckan.lib.plugins as lib_plugins
import ckan.lib.uploader as uploader
import ckan.logic.action.update as core_update
import ckan.plugins as plugins
import ckan.plugins.toolkit as tk
import ckanext.hdx_package.helpers.geopreview as geopreview
import ckanext.hdx_package.helpers.helpers as helpers
from ckan.common import _
from ckanext.hdx_org_group.helpers.org_batch import get_batch_or_generate
from ckanext.hdx_package.helpers.analytics import QACompletedAnalyticsSender
from ckanext.hdx_package.helpers.constants import FILE_WAS_UPLOADED, \
BATCH_MODE, BATCH_MODE_DONT_GROUP, BATCH_MODE_KEEP_OLD
from ckanext.hdx_package.helpers.file_removal import file_remove, find_filename_in_url
_check_access = tk.check_access
_get_action = tk.get_action
_get_or_bust = tk.get_or_bust
get_or_bust = tk.get_or_bust
NotFound = tk.ObjectNotFound
ValidationError = tk.ValidationError
log = logging.getLogger(__name__)
SKIP_VALIDATION = 'skip_validation'
@geopreview.geopreview_4_resources
def resource_update(context, data_dict):
'''
This runs the 'resource_update' action from core ckan's update.py
It allows us to do some minor changes and wrap it.
'''
id = _get_or_bust(data_dict, "id")
process_batch_mode(context, data_dict)
# flag_if_file_uploaded(context, data_dict)
process_skip_validation(context, data_dict)
# make the update faster (less computation in the custom package_show)
context['no_compute_extra_hdx_show_properties'] = True
# prev_resource_dict = _fetch_prev_resource_info(context['model'], id)
# new_file_uploaded = bool(data_dict.get('upload'))
if data_dict.get('resource_type', '') != 'file.upload':
#If this isn't an upload, it is a link so make sure we update
#the url_type otherwise solr will screw everything up
data_dict['url_type'] = 'api'
# we need to overwrite size field (not just setting it to None or pop) otherwise
# ckan.lib.dictization.model_save.resource_dict_save() keeps the old value
data_dict['size'] = 0
else:
try:
if len(request.files) > 0:
data_dict['size'] = request.content_length
data_dict['mimetype'] = request.files['upload'].mimetype
except RuntimeError as re:
log.debug('This usually happens for tests when there is no HTTP request: ' + unicode(re))
if data_dict.get('datastore_active', 'false') in ('false', 'False'):
data_dict['datastore_active'] = False
else:
if data_dict.get('datastore_active', 'true') in ('true', 'True'):
data_dict['datastore_active'] = True
result_dict = core_update.resource_update(context, data_dict)
# if new_file_uploaded:
# _delete_old_file_if_necessary(prev_resource_dict, result_dict)
return result_dict
def _delete_old_file_if_necessary(prev_resource_dict, resource_dict):
prev_resource_is_upload = prev_resource_dict.get('url_type') == 'upload'
new_resource_is_api = resource_dict.get('url_type') == 'api'
filename = find_filename_in_url(resource_dict.get('url', ''))
munged_current_filename = munge.munge_filename(filename)
munged_prev_filename = munge.munge_filename(prev_resource_dict['url'])
new_file_has_same_name = munged_current_filename == munged_prev_filename
if prev_resource_is_upload and (new_resource_is_api or not new_file_has_same_name):
log.info('Deleting resource {}/{}'.format(prev_resource_dict['id'], prev_resource_dict['name']))
file_remove(prev_resource_dict['id'], prev_resource_dict['url'], prev_resource_dict['url_type'])
else:
log.info('Not deleting resource: prev_resource_is_upload {} '
'/ new_file_has_same_name {} / new_resource_is_api {}'
.format(prev_resource_is_upload, new_file_has_same_name, new_resource_is_api))
# def _fetch_prev_resource_info(model, resource_id):
# id_to_resource_map = _fetch_prev_resources_info(model, [resource_id])
# return id_to_resource_map.get(resource_id)
def _fetch_prev_resources_info(model, resource_ids):
q = model.Session.query(model.Resource).filter(
or_(
model.Resource.id.in_(resource_ids), model.Resource.name.in_(resource_ids)
)
)
resources = q.all()
id_to_resource_map = {}
for res in resources:
id_to_resource_map[res.id] = {
'id': res.id,
'name': res.name,
'url_type': res.url_type,
'url': res.url,
}
return id_to_resource_map
@geopreview.geopreview_4_packages
def package_update(context, data_dict):
'''Update a dataset (package).
You must be authorized to edit the dataset and the groups that it belongs
to.
It is recommended to call
:py:func:`ckan.logic.action.get.package_show`, make the desired changes to
the result, and then call ``package_update()`` with it.
Plugins may change the parameters of this function depending on the value
of the dataset's ``type`` attribute, see the
:py:class:`~ckan.plugins.interfaces.IDatasetForm` plugin interface.
For further parameters see
:py:func:`~ckan.logic.action.create.package_create`.
:param id: the name or id of the dataset to update
:type id: string
:returns: the updated dataset (if ``'return_package_dict'`` is ``True`` in
the context, which is the default. Otherwise returns just the
dataset id)
:rtype: dictionary
'''
process_batch_mode(context, data_dict)
process_skip_validation(context, data_dict)
model = context['model']
session = context['session']
name_or_id = data_dict.get('id') or data_dict.get('name')
if name_or_id is None:
raise ValidationError({'id': _('Missing value')})
pkg = model.Package.get(name_or_id)
if pkg is None:
raise NotFound(_('Package was not found.'))
context["package"] = pkg
prev_last_modified = pkg.metadata_modified
# immutable fields
data_dict["id"] = pkg.id
data_dict['type'] = pkg.type
if 'groups' in data_dict:
data_dict['solr_additions'] = helpers.build_additions(data_dict['groups'])
if 'dataset_confirm_freshness' in data_dict and data_dict['dataset_confirm_freshness'] == 'on':
data_dict['review_date'] = datetime.datetime.utcnow()
_check_access('package_update', context, data_dict)
user = context['user']
# get the schema
package_plugin = lib_plugins.lookup_package_plugin(pkg.type)
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.update_package_schema()
if 'api_version' not in context:
# check_data_dict() is deprecated. If the package_plugin has a
# check_data_dict() we'll call it, if it doesn't have the method we'll
# do nothing.
check_data_dict = getattr(package_plugin, 'check_data_dict', None)
if check_data_dict:
try:
package_plugin.check_data_dict(data_dict, schema)
except TypeError:
# Old plugins do not support passing the schema so we need
# to ensure they still work.
package_plugin.check_data_dict(data_dict)
# Inject the existing package_creator as it should not be modifiable
if hasattr(pkg, 'extras'):
data_dict['package_creator'] = pkg.extras.get('package_creator', data_dict.get('package_creator'))
# Get previous version of QA COMPLETED
prev_qa_completed = pkg.extras.get('qa_completed') == 'true'
# Inject a code representing the batch within which this dataset was modified
if pkg.type == 'dataset':
if context.get(BATCH_MODE) == BATCH_MODE_KEEP_OLD:
try:
batch_extras = pkg._extras.get('batch')
if batch_extras and batch_extras.state == 'active':
data_dict['batch'] = batch_extras.value
except Exception, e:
log.info(str(e))
elif context.get(BATCH_MODE) != BATCH_MODE_DONT_GROUP:
data_dict['batch'] = get_batch_or_generate(data_dict.get('owner_org'))
resource_upload_ids = []
resource_uploads = []
for resource in data_dict.get('resources', []):
# I believe that unless a resource has either an upload field or is marked to be deleted
# we don't need to create an uploader object which is expensive
if 'clear_upload' in resource or resource.get('upload'):
#this needs to be run while the upload field still exists
flag_if_file_uploaded(context, resource)
# file uploads/clearing
upload = uploader.get_resource_uploader(resource)
resource_upload_ids.append(resource.get('id') or resource.get('name'))
if 'mimetype' not in resource:
if hasattr(upload, 'mimetype'):
resource['mimetype'] = upload.mimetype
resource['size'] = upload.filesize
else:
upload = None
resource_uploads.append(upload)
ids_to_prev_resource_dict = _fetch_prev_resources_info(model, resource_upload_ids)
data, errors = lib_plugins.plugin_validate(
package_plugin, context, data_dict, schema, 'package_update')
log.debug('package_update validate_errs=%r user=%s package=%s data=%r',
errors, context.get('user'),
context.get('package').name if context.get('package') else '',
data)
if errors:
model.Session.rollback()
raise ValidationError(errors)
#avoid revisioning by updating directly
model.Session.query(model.Package).filter_by(id=pkg.id).update(
{"metadata_modified": datetime.datetime.utcnow()})
model.Session.refresh(pkg)
if 'tags' in data:
data['tags'] = helpers.get_tag_vocabulary(data['tags'])
pkg = modified_save(context, data)
# pkg = model_save.package_dict_save(data, context)
context_org_update = context.copy()
context_org_update['ignore_auth'] = True
context_org_update['defer_commit'] = True
_get_action('package_owner_org_update')(context_org_update,
{'id': pkg.id,
'organization_id': pkg.owner_org})
# Needed to let extensions know the new resources ids
model.Session.flush()
for index, (resource, upload) in enumerate(
zip(data.get('resources', []), resource_uploads)):
resource['id'] = pkg.resources[index].id
if upload:
log.info('There\'s a resource in package_update() which is marked for: {}'
.format('clear' if upload.clear else 'upload'))
upload.upload(resource['id'], uploader.get_max_resource_size())
for item in plugins.PluginImplementations(plugins.IPackageController):
item.edit(pkg)
item.after_update(context, data)
# Create activity
if not pkg.private and pkg.type == 'dataset':
user_obj = model.User.by_name(user)
if user_obj:
user_id = user_obj.id
else:
user_id = 'not logged in'
activity = pkg.activity_stream_item('changed', user_id)
session.add(activity)
if not context.get('defer_commit'):
model.repo.commit()
log.debug('Updated object %s' % pkg.name)
return_id_only = context.get('return_id_only', False)
# Make sure that a user provided schema is not used on package_show
context.pop('schema', None)
# we could update the dataset so we should still be able to read it.
context['ignore_auth'] = True
new_data_dict = _get_action('package_show')(context, {'id': data_dict['id']})
# HDX - delete previous files if needed
for resource_dict in new_data_dict.get('resources'):
prev_resource_dict = ids_to_prev_resource_dict.get(resource_dict['id'])
if prev_resource_dict:
_delete_old_file_if_necessary(prev_resource_dict, resource_dict)
new_qa_completed = new_data_dict.get('qa_completed')
if new_qa_completed != prev_qa_completed and new_data_dict.get('type') == 'dataset':
QACompletedAnalyticsSender(new_data_dict, prev_last_modified,
mark_as_set=new_qa_completed).send_to_queue()
log.debug('new QA COMPLETED value: {}'.format(new_qa_completed))
return data_dict['id'] if return_id_only else new_data_dict
def package_resource_reorder(context, data_dict):
'''
This runs the 'package_resource_reorder' action from core ckan's update.py
It allows us to do some minor changes and wrap it.
'''
process_batch_mode(context, data_dict)
result_dict = core_update.package_resource_reorder(context, data_dict)
return result_dict
def process_batch_mode(context, data_dict):
if BATCH_MODE in data_dict:
context[BATCH_MODE] = data_dict[BATCH_MODE]
del data_dict[BATCH_MODE]
def flag_if_file_uploaded(context, resource_dict):
if resource_dict.get('upload'):
if FILE_WAS_UPLOADED not in context:
context[FILE_WAS_UPLOADED] = set()
context[FILE_WAS_UPLOADED].add(resource_dict.get('id', 'NEW'))
def process_skip_validation(context, data_dict):
if SKIP_VALIDATION in data_dict:
context[SKIP_VALIDATION] = data_dict[SKIP_VALIDATION]
del data_dict[SKIP_VALIDATION]
def modified_save(context, data):
"""
Wrapper around lib.dictization.model_save.package_dict_save
"""
groups_key = 'groups'
if groups_key in data:
temp_groups = data[groups_key]
data[groups_key] = None
pkg = model_save.package_dict_save(data, context)
data[groups_key] = temp_groups
else:
pkg = model_save.package_dict_save(data, context)
package_membership_list_save(data.get("groups"), pkg, context)
return pkg
def package_membership_list_save(group_dicts, package, context):
"""
Overrides lib.dictization.model_save.package_membership_list_save
"""
allow_partial_update = context.get("allow_partial_update", False)
if group_dicts is None and allow_partial_update:
return
capacity = 'public'
model = context["model"]
session = context["session"]
pending = context.get('pending')
user = context.get('user')
members = session.query(model.Member) \
.filter(model.Member.table_id == package.id) \
.filter(model.Member.capacity != 'organization')
group_member = dict((member.group, member)
for member in
members)
groups = set()
for group_dict in group_dicts or []:
id = group_dict.get("id")
name = group_dict.get("name")
capacity = group_dict.get("capacity", "public")
if capacity == 'organization':
continue
if id:
group = session.query(model.Group).get(id)
else:
group = session.query(model.Group).filter_by(name=name).first()
if group:
groups.add(group)
# need to flush so we can get out the package id
model.Session.flush()
# Remove any groups we are no longer in
for group in set(group_member.keys()) - groups:
member_obj = group_member[group]
if member_obj and member_obj.state == 'deleted':
continue
member_obj.capacity = capacity
member_obj.state = 'deleted'
session.add(member_obj)
# Add any new groups
for group in groups:
member_obj = group_member.get(group)
if member_obj and member_obj.state == 'active':
continue
member_obj = group_member.get(group)
if member_obj:
member_obj.capacity = capacity
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.